xref: /freebsd/sys/netinet/sctp_indata.c (revision 70ed590b393173d4ea697be2a27054ed171f0c1a)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 
48 
49 /*
50  * NOTES: On the outbound side of things I need to check the sack timer to
51  * see if I should generate a sack into the chunk queue (if I have data to
52  * send that is and will be sending it .. for bundling.
53  *
54  * The callback in sctp_usrreq.c will get called when the socket is read from.
55  * This will cause sctp_service_queues() to get called on the top entry in
56  * the list.
57  */
58 
59 void
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61 {
62 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63 }
64 
65 /* Calculate what the rwnd would be */
66 uint32_t
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68 {
69 	uint32_t calc = 0;
70 
71 	/*
72 	 * This is really set wrong with respect to a 1-2-m socket. Since
73 	 * the sb_cc is the count that everyone as put up. When we re-write
74 	 * sctp_soreceive then we will fix this so that ONLY this
75 	 * associations data is taken into account.
76 	 */
77 	if (stcb->sctp_socket == NULL)
78 		return (calc);
79 
80 	if (stcb->asoc.sb_cc == 0 &&
81 	    asoc->size_on_reasm_queue == 0 &&
82 	    asoc->size_on_all_streams == 0) {
83 		/* Full rwnd granted */
84 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85 		return (calc);
86 	}
87 	/* get actual space */
88 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89 
90 	/*
91 	 * take out what has NOT been put on socket queue and we yet hold
92 	 * for putting up.
93 	 */
94 	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
95 	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
96 
97 	if (calc == 0) {
98 		/* out of space */
99 		return (calc);
100 	}
101 	/* what is the overhead of all these rwnd's */
102 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
103 	/*
104 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
105 	 * even it is 0. SWS engaged
106 	 */
107 	if (calc < stcb->asoc.my_rwnd_control_len) {
108 		calc = 1;
109 	}
110 	return (calc);
111 }
112 
113 
114 
115 /*
116  * Build out our readq entry based on the incoming packet.
117  */
118 struct sctp_queued_to_read *
119 sctp_build_readq_entry(struct sctp_tcb *stcb,
120     struct sctp_nets *net,
121     uint32_t tsn, uint32_t ppid,
122     uint32_t context, uint16_t stream_no,
123     uint16_t stream_seq, uint8_t flags,
124     struct mbuf *dm)
125 {
126 	struct sctp_queued_to_read *read_queue_e = NULL;
127 
128 	sctp_alloc_a_readq(stcb, read_queue_e);
129 	if (read_queue_e == NULL) {
130 		goto failed_build;
131 	}
132 	read_queue_e->sinfo_stream = stream_no;
133 	read_queue_e->sinfo_ssn = stream_seq;
134 	read_queue_e->sinfo_flags = (flags << 8);
135 	read_queue_e->sinfo_ppid = ppid;
136 	read_queue_e->sinfo_context = stcb->asoc.context;
137 	read_queue_e->sinfo_timetolive = 0;
138 	read_queue_e->sinfo_tsn = tsn;
139 	read_queue_e->sinfo_cumtsn = tsn;
140 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
141 	read_queue_e->whoFrom = net;
142 	read_queue_e->length = 0;
143 	atomic_add_int(&net->ref_count, 1);
144 	read_queue_e->data = dm;
145 	read_queue_e->spec_flags = 0;
146 	read_queue_e->tail_mbuf = NULL;
147 	read_queue_e->aux_data = NULL;
148 	read_queue_e->stcb = stcb;
149 	read_queue_e->port_from = stcb->rport;
150 	read_queue_e->do_not_ref_stcb = 0;
151 	read_queue_e->end_added = 0;
152 	read_queue_e->some_taken = 0;
153 	read_queue_e->pdapi_aborted = 0;
154 failed_build:
155 	return (read_queue_e);
156 }
157 
158 
159 /*
160  * Build out our readq entry based on the incoming packet.
161  */
162 static struct sctp_queued_to_read *
163 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
164     struct sctp_tmit_chunk *chk)
165 {
166 	struct sctp_queued_to_read *read_queue_e = NULL;
167 
168 	sctp_alloc_a_readq(stcb, read_queue_e);
169 	if (read_queue_e == NULL) {
170 		goto failed_build;
171 	}
172 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
173 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
174 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
175 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
176 	read_queue_e->sinfo_context = stcb->asoc.context;
177 	read_queue_e->sinfo_timetolive = 0;
178 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
179 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
180 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
181 	read_queue_e->whoFrom = chk->whoTo;
182 	read_queue_e->aux_data = NULL;
183 	read_queue_e->length = 0;
184 	atomic_add_int(&chk->whoTo->ref_count, 1);
185 	read_queue_e->data = chk->data;
186 	read_queue_e->tail_mbuf = NULL;
187 	read_queue_e->stcb = stcb;
188 	read_queue_e->port_from = stcb->rport;
189 	read_queue_e->spec_flags = 0;
190 	read_queue_e->do_not_ref_stcb = 0;
191 	read_queue_e->end_added = 0;
192 	read_queue_e->some_taken = 0;
193 	read_queue_e->pdapi_aborted = 0;
194 failed_build:
195 	return (read_queue_e);
196 }
197 
198 
199 struct mbuf *
200 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
201     struct sctp_sndrcvinfo *sinfo)
202 {
203 	struct sctp_sndrcvinfo *outinfo;
204 	struct cmsghdr *cmh;
205 	struct mbuf *ret;
206 	int len;
207 	int use_extended = 0;
208 
209 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
210 		/* user does not want the sndrcv ctl */
211 		return (NULL);
212 	}
213 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
214 		use_extended = 1;
215 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
216 	} else {
217 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
218 	}
219 
220 
221 	ret = sctp_get_mbuf_for_msg(len,
222 	    0, M_DONTWAIT, 1, MT_DATA);
223 
224 	if (ret == NULL) {
225 		/* No space */
226 		return (ret);
227 	}
228 	/* We need a CMSG header followed by the struct  */
229 	cmh = mtod(ret, struct cmsghdr *);
230 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
231 	cmh->cmsg_level = IPPROTO_SCTP;
232 	if (use_extended) {
233 		cmh->cmsg_type = SCTP_EXTRCV;
234 		cmh->cmsg_len = len;
235 		memcpy(outinfo, sinfo, len);
236 	} else {
237 		cmh->cmsg_type = SCTP_SNDRCV;
238 		cmh->cmsg_len = len;
239 		*outinfo = *sinfo;
240 	}
241 	SCTP_BUF_LEN(ret) = cmh->cmsg_len;
242 	return (ret);
243 }
244 
245 
246 char *
247 sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
248     int *control_len,
249     struct sctp_sndrcvinfo *sinfo)
250 {
251 	struct sctp_sndrcvinfo *outinfo;
252 	struct cmsghdr *cmh;
253 	char *buf;
254 	int len;
255 	int use_extended = 0;
256 
257 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
258 		/* user does not want the sndrcv ctl */
259 		return (NULL);
260 	}
261 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
262 		use_extended = 1;
263 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
264 	} else {
265 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
266 	}
267 	SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
268 	if (buf == NULL) {
269 		/* No space */
270 		return (buf);
271 	}
272 	/* We need a CMSG header followed by the struct  */
273 	cmh = (struct cmsghdr *)buf;
274 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
275 	cmh->cmsg_level = IPPROTO_SCTP;
276 	if (use_extended) {
277 		cmh->cmsg_type = SCTP_EXTRCV;
278 		cmh->cmsg_len = len;
279 		memcpy(outinfo, sinfo, len);
280 	} else {
281 		cmh->cmsg_type = SCTP_SNDRCV;
282 		cmh->cmsg_len = len;
283 		*outinfo = *sinfo;
284 	}
285 	*control_len = len;
286 	return (buf);
287 }
288 
289 static void
290 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
291 {
292 	uint32_t gap, i;
293 	int fnd = 0;
294 
295 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
296 		return;
297 	}
298 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
299 	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
300 		printf("gap:%x tsn:%x\n", gap, tsn);
301 		sctp_print_mapping_array(asoc);
302 #ifdef INVARIANTS
303 		panic("Things are really messed up now!!");
304 #endif
305 	}
306 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
307 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
308 	if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
309 		asoc->highest_tsn_inside_nr_map = tsn;
310 	}
311 	if (tsn == asoc->highest_tsn_inside_map) {
312 		/* We must back down to see what the new highest is */
313 		for (i = tsn - 1; (compare_with_wrap(i, asoc->mapping_array_base_tsn, MAX_TSN) ||
314 		    (i == asoc->mapping_array_base_tsn)); i--) {
315 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
316 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
317 				asoc->highest_tsn_inside_map = i;
318 				fnd = 1;
319 				break;
320 			}
321 		}
322 		if (!fnd) {
323 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
324 		}
325 	}
326 }
327 
328 
329 /*
330  * We are delivering currently from the reassembly queue. We must continue to
331  * deliver until we either: 1) run out of space. 2) run out of sequential
332  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
333  */
334 static void
335 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
336 {
337 	struct sctp_tmit_chunk *chk;
338 	uint16_t nxt_todel;
339 	uint16_t stream_no;
340 	int end = 0;
341 	int cntDel;
342 
343 	struct sctp_queued_to_read *control, *ctl, *ctlat;
344 
345 	if (stcb == NULL)
346 		return;
347 
348 	cntDel = stream_no = 0;
349 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
350 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
351 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
352 		/* socket above is long gone or going.. */
353 abandon:
354 		asoc->fragmented_delivery_inprogress = 0;
355 		chk = TAILQ_FIRST(&asoc->reasmqueue);
356 		while (chk) {
357 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
358 			asoc->size_on_reasm_queue -= chk->send_size;
359 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
360 			/*
361 			 * Lose the data pointer, since its in the socket
362 			 * buffer
363 			 */
364 			if (chk->data) {
365 				sctp_m_freem(chk->data);
366 				chk->data = NULL;
367 			}
368 			/* Now free the address and data */
369 			sctp_free_a_chunk(stcb, chk);
370 			/* sa_ignore FREED_MEMORY */
371 			chk = TAILQ_FIRST(&asoc->reasmqueue);
372 		}
373 		return;
374 	}
375 	SCTP_TCB_LOCK_ASSERT(stcb);
376 	do {
377 		chk = TAILQ_FIRST(&asoc->reasmqueue);
378 		if (chk == NULL) {
379 			return;
380 		}
381 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
382 			/* Can't deliver more :< */
383 			return;
384 		}
385 		stream_no = chk->rec.data.stream_number;
386 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
387 		if (nxt_todel != chk->rec.data.stream_seq &&
388 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
389 			/*
390 			 * Not the next sequence to deliver in its stream OR
391 			 * unordered
392 			 */
393 			return;
394 		}
395 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
396 
397 			control = sctp_build_readq_entry_chk(stcb, chk);
398 			if (control == NULL) {
399 				/* out of memory? */
400 				return;
401 			}
402 			/* save it off for our future deliveries */
403 			stcb->asoc.control_pdapi = control;
404 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
405 				end = 1;
406 			else
407 				end = 0;
408 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
409 			sctp_add_to_readq(stcb->sctp_ep,
410 			    stcb, control, &stcb->sctp_socket->so_rcv, end,
411 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
412 			cntDel++;
413 		} else {
414 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
415 				end = 1;
416 			else
417 				end = 0;
418 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
419 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
420 			    stcb->asoc.control_pdapi,
421 			    chk->data, end, chk->rec.data.TSN_seq,
422 			    &stcb->sctp_socket->so_rcv)) {
423 				/*
424 				 * something is very wrong, either
425 				 * control_pdapi is NULL, or the tail_mbuf
426 				 * is corrupt, or there is a EOM already on
427 				 * the mbuf chain.
428 				 */
429 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
430 					goto abandon;
431 				} else {
432 #ifdef INVARIANTS
433 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
434 						panic("This should not happen control_pdapi NULL?");
435 					}
436 					/* if we did not panic, it was a EOM */
437 					panic("Bad chunking ??");
438 #else
439 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
440 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
441 					}
442 					SCTP_PRINTF("Bad chunking ??\n");
443 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
444 
445 #endif
446 					goto abandon;
447 				}
448 			}
449 			cntDel++;
450 		}
451 		/* pull it we did it */
452 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
453 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
454 			asoc->fragmented_delivery_inprogress = 0;
455 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
456 				asoc->strmin[stream_no].last_sequence_delivered++;
457 			}
458 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
459 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
460 			}
461 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
462 			/*
463 			 * turn the flag back on since we just  delivered
464 			 * yet another one.
465 			 */
466 			asoc->fragmented_delivery_inprogress = 1;
467 		}
468 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
469 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
470 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
471 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
472 
473 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
474 		asoc->size_on_reasm_queue -= chk->send_size;
475 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
476 		/* free up the chk */
477 		chk->data = NULL;
478 		sctp_free_a_chunk(stcb, chk);
479 
480 		if (asoc->fragmented_delivery_inprogress == 0) {
481 			/*
482 			 * Now lets see if we can deliver the next one on
483 			 * the stream
484 			 */
485 			struct sctp_stream_in *strm;
486 
487 			strm = &asoc->strmin[stream_no];
488 			nxt_todel = strm->last_sequence_delivered + 1;
489 			ctl = TAILQ_FIRST(&strm->inqueue);
490 			if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
491 				while (ctl != NULL) {
492 					/* Deliver more if we can. */
493 					if (nxt_todel == ctl->sinfo_ssn) {
494 						ctlat = TAILQ_NEXT(ctl, next);
495 						TAILQ_REMOVE(&strm->inqueue, ctl, next);
496 						asoc->size_on_all_streams -= ctl->length;
497 						sctp_ucount_decr(asoc->cnt_on_all_streams);
498 						strm->last_sequence_delivered++;
499 						sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
500 						sctp_add_to_readq(stcb->sctp_ep, stcb,
501 						    ctl,
502 						    &stcb->sctp_socket->so_rcv, 1,
503 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
504 						ctl = ctlat;
505 					} else {
506 						break;
507 					}
508 					nxt_todel = strm->last_sequence_delivered + 1;
509 				}
510 			}
511 			break;
512 		}
513 		/* sa_ignore FREED_MEMORY */
514 		chk = TAILQ_FIRST(&asoc->reasmqueue);
515 	} while (chk);
516 }
517 
518 /*
519  * Queue the chunk either right into the socket buffer if it is the next one
520  * to go OR put it in the correct place in the delivery queue.  If we do
521  * append to the so_buf, keep doing so until we are out of order. One big
522  * question still remains, what to do when the socket buffer is FULL??
523  */
524 static void
525 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
526     struct sctp_queued_to_read *control, int *abort_flag)
527 {
528 	/*
529 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
530 	 * all the data in one stream this could happen quite rapidly. One
531 	 * could use the TSN to keep track of things, but this scheme breaks
532 	 * down in the other type of stream useage that could occur. Send a
533 	 * single msg to stream 0, send 4Billion messages to stream 1, now
534 	 * send a message to stream 0. You have a situation where the TSN
535 	 * has wrapped but not in the stream. Is this worth worrying about
536 	 * or should we just change our queue sort at the bottom to be by
537 	 * TSN.
538 	 *
539 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
540 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
541 	 * assignment this could happen... and I don't see how this would be
542 	 * a violation. So for now I am undecided an will leave the sort by
543 	 * SSN alone. Maybe a hybred approach is the answer
544 	 *
545 	 */
546 	struct sctp_stream_in *strm;
547 	struct sctp_queued_to_read *at;
548 	int queue_needed;
549 	uint16_t nxt_todel;
550 	struct mbuf *oper;
551 
552 	queue_needed = 1;
553 	asoc->size_on_all_streams += control->length;
554 	sctp_ucount_incr(asoc->cnt_on_all_streams);
555 	strm = &asoc->strmin[control->sinfo_stream];
556 	nxt_todel = strm->last_sequence_delivered + 1;
557 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
558 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
559 	}
560 	SCTPDBG(SCTP_DEBUG_INDATA1,
561 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
562 	    (uint32_t) control->sinfo_stream,
563 	    (uint32_t) strm->last_sequence_delivered,
564 	    (uint32_t) nxt_todel);
565 	if (compare_with_wrap(strm->last_sequence_delivered,
566 	    control->sinfo_ssn, MAX_SEQ) ||
567 	    (strm->last_sequence_delivered == control->sinfo_ssn)) {
568 		/* The incoming sseq is behind where we last delivered? */
569 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
570 		    control->sinfo_ssn, strm->last_sequence_delivered);
571 protocol_error:
572 		/*
573 		 * throw it in the stream so it gets cleaned up in
574 		 * association destruction
575 		 */
576 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
577 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
578 		    0, M_DONTWAIT, 1, MT_DATA);
579 		if (oper) {
580 			struct sctp_paramhdr *ph;
581 			uint32_t *ippp;
582 
583 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
584 			    (sizeof(uint32_t) * 3);
585 			ph = mtod(oper, struct sctp_paramhdr *);
586 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
587 			ph->param_length = htons(SCTP_BUF_LEN(oper));
588 			ippp = (uint32_t *) (ph + 1);
589 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
590 			ippp++;
591 			*ippp = control->sinfo_tsn;
592 			ippp++;
593 			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
594 		}
595 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
596 		sctp_abort_an_association(stcb->sctp_ep, stcb,
597 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
598 
599 		*abort_flag = 1;
600 		return;
601 
602 	}
603 	if (nxt_todel == control->sinfo_ssn) {
604 		/* can be delivered right away? */
605 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
606 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
607 		}
608 		/* EY it wont be queued if it could be delivered directly */
609 		queue_needed = 0;
610 		asoc->size_on_all_streams -= control->length;
611 		sctp_ucount_decr(asoc->cnt_on_all_streams);
612 		strm->last_sequence_delivered++;
613 
614 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
615 		sctp_add_to_readq(stcb->sctp_ep, stcb,
616 		    control,
617 		    &stcb->sctp_socket->so_rcv, 1,
618 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
619 		control = TAILQ_FIRST(&strm->inqueue);
620 		while (control != NULL) {
621 			/* all delivered */
622 			nxt_todel = strm->last_sequence_delivered + 1;
623 			if (nxt_todel == control->sinfo_ssn) {
624 				at = TAILQ_NEXT(control, next);
625 				TAILQ_REMOVE(&strm->inqueue, control, next);
626 				asoc->size_on_all_streams -= control->length;
627 				sctp_ucount_decr(asoc->cnt_on_all_streams);
628 				strm->last_sequence_delivered++;
629 				/*
630 				 * We ignore the return of deliver_data here
631 				 * since we always can hold the chunk on the
632 				 * d-queue. And we have a finite number that
633 				 * can be delivered from the strq.
634 				 */
635 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
636 					sctp_log_strm_del(control, NULL,
637 					    SCTP_STR_LOG_FROM_IMMED_DEL);
638 				}
639 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
640 				sctp_add_to_readq(stcb->sctp_ep, stcb,
641 				    control,
642 				    &stcb->sctp_socket->so_rcv, 1,
643 				    SCTP_READ_LOCK_NOT_HELD,
644 				    SCTP_SO_NOT_LOCKED);
645 				control = at;
646 				continue;
647 			}
648 			break;
649 		}
650 	}
651 	if (queue_needed) {
652 		/*
653 		 * Ok, we did not deliver this guy, find the correct place
654 		 * to put it on the queue.
655 		 */
656 		if ((compare_with_wrap(asoc->cumulative_tsn,
657 		    control->sinfo_tsn, MAX_TSN)) ||
658 		    (control->sinfo_tsn == asoc->cumulative_tsn)) {
659 			goto protocol_error;
660 		}
661 		if (TAILQ_EMPTY(&strm->inqueue)) {
662 			/* Empty queue */
663 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
664 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
665 			}
666 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
667 		} else {
668 			TAILQ_FOREACH(at, &strm->inqueue, next) {
669 				if (compare_with_wrap(at->sinfo_ssn,
670 				    control->sinfo_ssn, MAX_SEQ)) {
671 					/*
672 					 * one in queue is bigger than the
673 					 * new one, insert before this one
674 					 */
675 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
676 						sctp_log_strm_del(control, at,
677 						    SCTP_STR_LOG_FROM_INSERT_MD);
678 					}
679 					TAILQ_INSERT_BEFORE(at, control, next);
680 					break;
681 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
682 					/*
683 					 * Gak, He sent me a duplicate str
684 					 * seq number
685 					 */
686 					/*
687 					 * foo bar, I guess I will just free
688 					 * this new guy, should we abort
689 					 * too? FIX ME MAYBE? Or it COULD be
690 					 * that the SSN's have wrapped.
691 					 * Maybe I should compare to TSN
692 					 * somehow... sigh for now just blow
693 					 * away the chunk!
694 					 */
695 
696 					if (control->data)
697 						sctp_m_freem(control->data);
698 					control->data = NULL;
699 					asoc->size_on_all_streams -= control->length;
700 					sctp_ucount_decr(asoc->cnt_on_all_streams);
701 					if (control->whoFrom)
702 						sctp_free_remote_addr(control->whoFrom);
703 					control->whoFrom = NULL;
704 					sctp_free_a_readq(stcb, control);
705 					return;
706 				} else {
707 					if (TAILQ_NEXT(at, next) == NULL) {
708 						/*
709 						 * We are at the end, insert
710 						 * it after this one
711 						 */
712 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
713 							sctp_log_strm_del(control, at,
714 							    SCTP_STR_LOG_FROM_INSERT_TL);
715 						}
716 						TAILQ_INSERT_AFTER(&strm->inqueue,
717 						    at, control, next);
718 						break;
719 					}
720 				}
721 			}
722 		}
723 	}
724 }
725 
726 /*
727  * Returns two things: You get the total size of the deliverable parts of the
728  * first fragmented message on the reassembly queue. And you get a 1 back if
729  * all of the message is ready or a 0 back if the message is still incomplete
730  */
731 static int
732 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
733 {
734 	struct sctp_tmit_chunk *chk;
735 	uint32_t tsn;
736 
737 	*t_size = 0;
738 	chk = TAILQ_FIRST(&asoc->reasmqueue);
739 	if (chk == NULL) {
740 		/* nothing on the queue */
741 		return (0);
742 	}
743 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
744 		/* Not a first on the queue */
745 		return (0);
746 	}
747 	tsn = chk->rec.data.TSN_seq;
748 	while (chk) {
749 		if (tsn != chk->rec.data.TSN_seq) {
750 			return (0);
751 		}
752 		*t_size += chk->send_size;
753 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
754 			return (1);
755 		}
756 		tsn++;
757 		chk = TAILQ_NEXT(chk, sctp_next);
758 	}
759 	return (0);
760 }
761 
762 static void
763 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
764 {
765 	struct sctp_tmit_chunk *chk;
766 	uint16_t nxt_todel;
767 	uint32_t tsize, pd_point;
768 
769 doit_again:
770 	chk = TAILQ_FIRST(&asoc->reasmqueue);
771 	if (chk == NULL) {
772 		/* Huh? */
773 		asoc->size_on_reasm_queue = 0;
774 		asoc->cnt_on_reasm_queue = 0;
775 		return;
776 	}
777 	if (asoc->fragmented_delivery_inprogress == 0) {
778 		nxt_todel =
779 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
780 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
781 		    (nxt_todel == chk->rec.data.stream_seq ||
782 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
783 			/*
784 			 * Yep the first one is here and its ok to deliver
785 			 * but should we?
786 			 */
787 			if (stcb->sctp_socket) {
788 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
789 				    stcb->sctp_ep->partial_delivery_point);
790 			} else {
791 				pd_point = stcb->sctp_ep->partial_delivery_point;
792 			}
793 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
794 
795 				/*
796 				 * Yes, we setup to start reception, by
797 				 * backing down the TSN just in case we
798 				 * can't deliver. If we
799 				 */
800 				asoc->fragmented_delivery_inprogress = 1;
801 				asoc->tsn_last_delivered =
802 				    chk->rec.data.TSN_seq - 1;
803 				asoc->str_of_pdapi =
804 				    chk->rec.data.stream_number;
805 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
806 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
807 				asoc->fragment_flags = chk->rec.data.rcv_flags;
808 				sctp_service_reassembly(stcb, asoc);
809 			}
810 		}
811 	} else {
812 		/*
813 		 * Service re-assembly will deliver stream data queued at
814 		 * the end of fragmented delivery.. but it wont know to go
815 		 * back and call itself again... we do that here with the
816 		 * got doit_again
817 		 */
818 		sctp_service_reassembly(stcb, asoc);
819 		if (asoc->fragmented_delivery_inprogress == 0) {
820 			/*
821 			 * finished our Fragmented delivery, could be more
822 			 * waiting?
823 			 */
824 			goto doit_again;
825 		}
826 	}
827 }
828 
829 /*
830  * Dump onto the re-assembly queue, in its proper place. After dumping on the
831  * queue, see if anthing can be delivered. If so pull it off (or as much as
832  * we can. If we run out of space then we must dump what we can and set the
833  * appropriate flag to say we queued what we could.
834  */
835 static void
836 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
837     struct sctp_tmit_chunk *chk, int *abort_flag)
838 {
839 	struct mbuf *oper;
840 	uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
841 	u_char last_flags;
842 	struct sctp_tmit_chunk *at, *prev, *next;
843 
844 	prev = next = NULL;
845 	cum_ackp1 = asoc->tsn_last_delivered + 1;
846 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
847 		/* This is the first one on the queue */
848 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
849 		/*
850 		 * we do not check for delivery of anything when only one
851 		 * fragment is here
852 		 */
853 		asoc->size_on_reasm_queue = chk->send_size;
854 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
855 		if (chk->rec.data.TSN_seq == cum_ackp1) {
856 			if (asoc->fragmented_delivery_inprogress == 0 &&
857 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
858 			    SCTP_DATA_FIRST_FRAG) {
859 				/*
860 				 * An empty queue, no delivery inprogress,
861 				 * we hit the next one and it does NOT have
862 				 * a FIRST fragment mark.
863 				 */
864 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
865 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
866 				    0, M_DONTWAIT, 1, MT_DATA);
867 
868 				if (oper) {
869 					struct sctp_paramhdr *ph;
870 					uint32_t *ippp;
871 
872 					SCTP_BUF_LEN(oper) =
873 					    sizeof(struct sctp_paramhdr) +
874 					    (sizeof(uint32_t) * 3);
875 					ph = mtod(oper, struct sctp_paramhdr *);
876 					ph->param_type =
877 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
878 					ph->param_length = htons(SCTP_BUF_LEN(oper));
879 					ippp = (uint32_t *) (ph + 1);
880 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
881 					ippp++;
882 					*ippp = chk->rec.data.TSN_seq;
883 					ippp++;
884 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
885 
886 				}
887 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
888 				sctp_abort_an_association(stcb->sctp_ep, stcb,
889 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
890 				*abort_flag = 1;
891 			} else if (asoc->fragmented_delivery_inprogress &&
892 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
893 				/*
894 				 * We are doing a partial delivery and the
895 				 * NEXT chunk MUST be either the LAST or
896 				 * MIDDLE fragment NOT a FIRST
897 				 */
898 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
899 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
900 				    0, M_DONTWAIT, 1, MT_DATA);
901 				if (oper) {
902 					struct sctp_paramhdr *ph;
903 					uint32_t *ippp;
904 
905 					SCTP_BUF_LEN(oper) =
906 					    sizeof(struct sctp_paramhdr) +
907 					    (3 * sizeof(uint32_t));
908 					ph = mtod(oper, struct sctp_paramhdr *);
909 					ph->param_type =
910 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
911 					ph->param_length = htons(SCTP_BUF_LEN(oper));
912 					ippp = (uint32_t *) (ph + 1);
913 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
914 					ippp++;
915 					*ippp = chk->rec.data.TSN_seq;
916 					ippp++;
917 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
918 				}
919 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
920 				sctp_abort_an_association(stcb->sctp_ep, stcb,
921 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
922 				*abort_flag = 1;
923 			} else if (asoc->fragmented_delivery_inprogress) {
924 				/*
925 				 * Here we are ok with a MIDDLE or LAST
926 				 * piece
927 				 */
928 				if (chk->rec.data.stream_number !=
929 				    asoc->str_of_pdapi) {
930 					/* Got to be the right STR No */
931 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
932 					    chk->rec.data.stream_number,
933 					    asoc->str_of_pdapi);
934 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
935 					    0, M_DONTWAIT, 1, MT_DATA);
936 					if (oper) {
937 						struct sctp_paramhdr *ph;
938 						uint32_t *ippp;
939 
940 						SCTP_BUF_LEN(oper) =
941 						    sizeof(struct sctp_paramhdr) +
942 						    (sizeof(uint32_t) * 3);
943 						ph = mtod(oper,
944 						    struct sctp_paramhdr *);
945 						ph->param_type =
946 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
947 						ph->param_length =
948 						    htons(SCTP_BUF_LEN(oper));
949 						ippp = (uint32_t *) (ph + 1);
950 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
951 						ippp++;
952 						*ippp = chk->rec.data.TSN_seq;
953 						ippp++;
954 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
955 					}
956 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
957 					sctp_abort_an_association(stcb->sctp_ep,
958 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
959 					*abort_flag = 1;
960 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
961 					    SCTP_DATA_UNORDERED &&
962 				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
963 					/* Got to be the right STR Seq */
964 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
965 					    chk->rec.data.stream_seq,
966 					    asoc->ssn_of_pdapi);
967 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
968 					    0, M_DONTWAIT, 1, MT_DATA);
969 					if (oper) {
970 						struct sctp_paramhdr *ph;
971 						uint32_t *ippp;
972 
973 						SCTP_BUF_LEN(oper) =
974 						    sizeof(struct sctp_paramhdr) +
975 						    (3 * sizeof(uint32_t));
976 						ph = mtod(oper,
977 						    struct sctp_paramhdr *);
978 						ph->param_type =
979 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
980 						ph->param_length =
981 						    htons(SCTP_BUF_LEN(oper));
982 						ippp = (uint32_t *) (ph + 1);
983 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
984 						ippp++;
985 						*ippp = chk->rec.data.TSN_seq;
986 						ippp++;
987 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
988 
989 					}
990 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
991 					sctp_abort_an_association(stcb->sctp_ep,
992 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
993 					*abort_flag = 1;
994 				}
995 			}
996 		}
997 		return;
998 	}
999 	/* Find its place */
1000 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1001 		if (compare_with_wrap(at->rec.data.TSN_seq,
1002 		    chk->rec.data.TSN_seq, MAX_TSN)) {
1003 			/*
1004 			 * one in queue is bigger than the new one, insert
1005 			 * before this one
1006 			 */
1007 			/* A check */
1008 			asoc->size_on_reasm_queue += chk->send_size;
1009 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1010 			next = at;
1011 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1012 			break;
1013 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1014 			/* Gak, He sent me a duplicate str seq number */
1015 			/*
1016 			 * foo bar, I guess I will just free this new guy,
1017 			 * should we abort too? FIX ME MAYBE? Or it COULD be
1018 			 * that the SSN's have wrapped. Maybe I should
1019 			 * compare to TSN somehow... sigh for now just blow
1020 			 * away the chunk!
1021 			 */
1022 			if (chk->data) {
1023 				sctp_m_freem(chk->data);
1024 				chk->data = NULL;
1025 			}
1026 			sctp_free_a_chunk(stcb, chk);
1027 			return;
1028 		} else {
1029 			last_flags = at->rec.data.rcv_flags;
1030 			last_tsn = at->rec.data.TSN_seq;
1031 			prev = at;
1032 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1033 				/*
1034 				 * We are at the end, insert it after this
1035 				 * one
1036 				 */
1037 				/* check it first */
1038 				asoc->size_on_reasm_queue += chk->send_size;
1039 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1040 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1041 				break;
1042 			}
1043 		}
1044 	}
1045 	/* Now the audits */
1046 	if (prev) {
1047 		prev_tsn = chk->rec.data.TSN_seq - 1;
1048 		if (prev_tsn == prev->rec.data.TSN_seq) {
1049 			/*
1050 			 * Ok the one I am dropping onto the end is the
1051 			 * NEXT. A bit of valdiation here.
1052 			 */
1053 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1054 			    SCTP_DATA_FIRST_FRAG ||
1055 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1056 			    SCTP_DATA_MIDDLE_FRAG) {
1057 				/*
1058 				 * Insert chk MUST be a MIDDLE or LAST
1059 				 * fragment
1060 				 */
1061 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1062 				    SCTP_DATA_FIRST_FRAG) {
1063 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1064 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1065 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1066 					    0, M_DONTWAIT, 1, MT_DATA);
1067 					if (oper) {
1068 						struct sctp_paramhdr *ph;
1069 						uint32_t *ippp;
1070 
1071 						SCTP_BUF_LEN(oper) =
1072 						    sizeof(struct sctp_paramhdr) +
1073 						    (3 * sizeof(uint32_t));
1074 						ph = mtod(oper,
1075 						    struct sctp_paramhdr *);
1076 						ph->param_type =
1077 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1078 						ph->param_length =
1079 						    htons(SCTP_BUF_LEN(oper));
1080 						ippp = (uint32_t *) (ph + 1);
1081 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1082 						ippp++;
1083 						*ippp = chk->rec.data.TSN_seq;
1084 						ippp++;
1085 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1086 
1087 					}
1088 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1089 					sctp_abort_an_association(stcb->sctp_ep,
1090 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1091 					*abort_flag = 1;
1092 					return;
1093 				}
1094 				if (chk->rec.data.stream_number !=
1095 				    prev->rec.data.stream_number) {
1096 					/*
1097 					 * Huh, need the correct STR here,
1098 					 * they must be the same.
1099 					 */
1100 					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1101 					    chk->rec.data.stream_number,
1102 					    prev->rec.data.stream_number);
1103 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1104 					    0, M_DONTWAIT, 1, MT_DATA);
1105 					if (oper) {
1106 						struct sctp_paramhdr *ph;
1107 						uint32_t *ippp;
1108 
1109 						SCTP_BUF_LEN(oper) =
1110 						    sizeof(struct sctp_paramhdr) +
1111 						    (3 * sizeof(uint32_t));
1112 						ph = mtod(oper,
1113 						    struct sctp_paramhdr *);
1114 						ph->param_type =
1115 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1116 						ph->param_length =
1117 						    htons(SCTP_BUF_LEN(oper));
1118 						ippp = (uint32_t *) (ph + 1);
1119 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1120 						ippp++;
1121 						*ippp = chk->rec.data.TSN_seq;
1122 						ippp++;
1123 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1124 					}
1125 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1126 					sctp_abort_an_association(stcb->sctp_ep,
1127 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1128 
1129 					*abort_flag = 1;
1130 					return;
1131 				}
1132 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1133 				    chk->rec.data.stream_seq !=
1134 				    prev->rec.data.stream_seq) {
1135 					/*
1136 					 * Huh, need the correct STR here,
1137 					 * they must be the same.
1138 					 */
1139 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1140 					    chk->rec.data.stream_seq,
1141 					    prev->rec.data.stream_seq);
1142 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1143 					    0, M_DONTWAIT, 1, MT_DATA);
1144 					if (oper) {
1145 						struct sctp_paramhdr *ph;
1146 						uint32_t *ippp;
1147 
1148 						SCTP_BUF_LEN(oper) =
1149 						    sizeof(struct sctp_paramhdr) +
1150 						    (3 * sizeof(uint32_t));
1151 						ph = mtod(oper,
1152 						    struct sctp_paramhdr *);
1153 						ph->param_type =
1154 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1155 						ph->param_length =
1156 						    htons(SCTP_BUF_LEN(oper));
1157 						ippp = (uint32_t *) (ph + 1);
1158 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1159 						ippp++;
1160 						*ippp = chk->rec.data.TSN_seq;
1161 						ippp++;
1162 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1163 					}
1164 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1165 					sctp_abort_an_association(stcb->sctp_ep,
1166 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1167 
1168 					*abort_flag = 1;
1169 					return;
1170 				}
1171 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1172 			    SCTP_DATA_LAST_FRAG) {
1173 				/* Insert chk MUST be a FIRST */
1174 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1175 				    SCTP_DATA_FIRST_FRAG) {
1176 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1177 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1178 					    0, M_DONTWAIT, 1, MT_DATA);
1179 					if (oper) {
1180 						struct sctp_paramhdr *ph;
1181 						uint32_t *ippp;
1182 
1183 						SCTP_BUF_LEN(oper) =
1184 						    sizeof(struct sctp_paramhdr) +
1185 						    (3 * sizeof(uint32_t));
1186 						ph = mtod(oper,
1187 						    struct sctp_paramhdr *);
1188 						ph->param_type =
1189 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1190 						ph->param_length =
1191 						    htons(SCTP_BUF_LEN(oper));
1192 						ippp = (uint32_t *) (ph + 1);
1193 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1194 						ippp++;
1195 						*ippp = chk->rec.data.TSN_seq;
1196 						ippp++;
1197 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1198 
1199 					}
1200 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1201 					sctp_abort_an_association(stcb->sctp_ep,
1202 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1203 
1204 					*abort_flag = 1;
1205 					return;
1206 				}
1207 			}
1208 		}
1209 	}
1210 	if (next) {
1211 		post_tsn = chk->rec.data.TSN_seq + 1;
1212 		if (post_tsn == next->rec.data.TSN_seq) {
1213 			/*
1214 			 * Ok the one I am inserting ahead of is my NEXT
1215 			 * one. A bit of valdiation here.
1216 			 */
1217 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1218 				/* Insert chk MUST be a last fragment */
1219 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1220 				    != SCTP_DATA_LAST_FRAG) {
1221 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1222 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1223 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1224 					    0, M_DONTWAIT, 1, MT_DATA);
1225 					if (oper) {
1226 						struct sctp_paramhdr *ph;
1227 						uint32_t *ippp;
1228 
1229 						SCTP_BUF_LEN(oper) =
1230 						    sizeof(struct sctp_paramhdr) +
1231 						    (3 * sizeof(uint32_t));
1232 						ph = mtod(oper,
1233 						    struct sctp_paramhdr *);
1234 						ph->param_type =
1235 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1236 						ph->param_length =
1237 						    htons(SCTP_BUF_LEN(oper));
1238 						ippp = (uint32_t *) (ph + 1);
1239 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1240 						ippp++;
1241 						*ippp = chk->rec.data.TSN_seq;
1242 						ippp++;
1243 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1244 					}
1245 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1246 					sctp_abort_an_association(stcb->sctp_ep,
1247 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1248 
1249 					*abort_flag = 1;
1250 					return;
1251 				}
1252 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1253 				    SCTP_DATA_MIDDLE_FRAG ||
1254 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1255 			    SCTP_DATA_LAST_FRAG) {
1256 				/*
1257 				 * Insert chk CAN be MIDDLE or FIRST NOT
1258 				 * LAST
1259 				 */
1260 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1261 				    SCTP_DATA_LAST_FRAG) {
1262 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1263 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1264 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1265 					    0, M_DONTWAIT, 1, MT_DATA);
1266 					if (oper) {
1267 						struct sctp_paramhdr *ph;
1268 						uint32_t *ippp;
1269 
1270 						SCTP_BUF_LEN(oper) =
1271 						    sizeof(struct sctp_paramhdr) +
1272 						    (3 * sizeof(uint32_t));
1273 						ph = mtod(oper,
1274 						    struct sctp_paramhdr *);
1275 						ph->param_type =
1276 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1277 						ph->param_length =
1278 						    htons(SCTP_BUF_LEN(oper));
1279 						ippp = (uint32_t *) (ph + 1);
1280 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1281 						ippp++;
1282 						*ippp = chk->rec.data.TSN_seq;
1283 						ippp++;
1284 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1285 
1286 					}
1287 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1288 					sctp_abort_an_association(stcb->sctp_ep,
1289 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1290 
1291 					*abort_flag = 1;
1292 					return;
1293 				}
1294 				if (chk->rec.data.stream_number !=
1295 				    next->rec.data.stream_number) {
1296 					/*
1297 					 * Huh, need the correct STR here,
1298 					 * they must be the same.
1299 					 */
1300 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1301 					    chk->rec.data.stream_number,
1302 					    next->rec.data.stream_number);
1303 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1304 					    0, M_DONTWAIT, 1, MT_DATA);
1305 					if (oper) {
1306 						struct sctp_paramhdr *ph;
1307 						uint32_t *ippp;
1308 
1309 						SCTP_BUF_LEN(oper) =
1310 						    sizeof(struct sctp_paramhdr) +
1311 						    (3 * sizeof(uint32_t));
1312 						ph = mtod(oper,
1313 						    struct sctp_paramhdr *);
1314 						ph->param_type =
1315 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1316 						ph->param_length =
1317 						    htons(SCTP_BUF_LEN(oper));
1318 						ippp = (uint32_t *) (ph + 1);
1319 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1320 						ippp++;
1321 						*ippp = chk->rec.data.TSN_seq;
1322 						ippp++;
1323 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1324 
1325 					}
1326 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1327 					sctp_abort_an_association(stcb->sctp_ep,
1328 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1329 
1330 					*abort_flag = 1;
1331 					return;
1332 				}
1333 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1334 				    chk->rec.data.stream_seq !=
1335 				    next->rec.data.stream_seq) {
1336 					/*
1337 					 * Huh, need the correct STR here,
1338 					 * they must be the same.
1339 					 */
1340 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1341 					    chk->rec.data.stream_seq,
1342 					    next->rec.data.stream_seq);
1343 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1344 					    0, M_DONTWAIT, 1, MT_DATA);
1345 					if (oper) {
1346 						struct sctp_paramhdr *ph;
1347 						uint32_t *ippp;
1348 
1349 						SCTP_BUF_LEN(oper) =
1350 						    sizeof(struct sctp_paramhdr) +
1351 						    (3 * sizeof(uint32_t));
1352 						ph = mtod(oper,
1353 						    struct sctp_paramhdr *);
1354 						ph->param_type =
1355 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1356 						ph->param_length =
1357 						    htons(SCTP_BUF_LEN(oper));
1358 						ippp = (uint32_t *) (ph + 1);
1359 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1360 						ippp++;
1361 						*ippp = chk->rec.data.TSN_seq;
1362 						ippp++;
1363 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1364 					}
1365 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1366 					sctp_abort_an_association(stcb->sctp_ep,
1367 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1368 
1369 					*abort_flag = 1;
1370 					return;
1371 				}
1372 			}
1373 		}
1374 	}
1375 	/* Do we need to do some delivery? check */
1376 	sctp_deliver_reasm_check(stcb, asoc);
1377 }
1378 
1379 /*
1380  * This is an unfortunate routine. It checks to make sure a evil guy is not
1381  * stuffing us full of bad packet fragments. A broken peer could also do this
1382  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1383  * :< more cycles.
1384  */
1385 static int
1386 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1387     uint32_t TSN_seq)
1388 {
1389 	struct sctp_tmit_chunk *at;
1390 	uint32_t tsn_est;
1391 
1392 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1393 		if (compare_with_wrap(TSN_seq,
1394 		    at->rec.data.TSN_seq, MAX_TSN)) {
1395 			/* is it one bigger? */
1396 			tsn_est = at->rec.data.TSN_seq + 1;
1397 			if (tsn_est == TSN_seq) {
1398 				/* yep. It better be a last then */
1399 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1400 				    SCTP_DATA_LAST_FRAG) {
1401 					/*
1402 					 * Ok this guy belongs next to a guy
1403 					 * that is NOT last, it should be a
1404 					 * middle/last, not a complete
1405 					 * chunk.
1406 					 */
1407 					return (1);
1408 				} else {
1409 					/*
1410 					 * This guy is ok since its a LAST
1411 					 * and the new chunk is a fully
1412 					 * self- contained one.
1413 					 */
1414 					return (0);
1415 				}
1416 			}
1417 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1418 			/* Software error since I have a dup? */
1419 			return (1);
1420 		} else {
1421 			/*
1422 			 * Ok, 'at' is larger than new chunk but does it
1423 			 * need to be right before it.
1424 			 */
1425 			tsn_est = TSN_seq + 1;
1426 			if (tsn_est == at->rec.data.TSN_seq) {
1427 				/* Yep, It better be a first */
1428 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1429 				    SCTP_DATA_FIRST_FRAG) {
1430 					return (1);
1431 				} else {
1432 					return (0);
1433 				}
1434 			}
1435 		}
1436 	}
1437 	return (0);
1438 }
1439 
1440 
1441 static int
1442 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1443     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1444     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1445     int *break_flag, int last_chunk)
1446 {
1447 	/* Process a data chunk */
1448 	/* struct sctp_tmit_chunk *chk; */
1449 	struct sctp_tmit_chunk *chk;
1450 	uint32_t tsn, gap;
1451 	struct mbuf *dmbuf;
1452 	int indx, the_len;
1453 	int need_reasm_check = 0;
1454 	uint16_t strmno, strmseq;
1455 	struct mbuf *oper;
1456 	struct sctp_queued_to_read *control;
1457 	int ordered;
1458 	uint32_t protocol_id;
1459 	uint8_t chunk_flags;
1460 	struct sctp_stream_reset_list *liste;
1461 
1462 	chk = NULL;
1463 	tsn = ntohl(ch->dp.tsn);
1464 	chunk_flags = ch->ch.chunk_flags;
1465 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1466 		asoc->send_sack = 1;
1467 	}
1468 	protocol_id = ch->dp.protocol_id;
1469 	ordered = ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0);
1470 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1471 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1472 	}
1473 	if (stcb == NULL) {
1474 		return (0);
1475 	}
1476 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1477 	if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1478 	    asoc->cumulative_tsn == tsn) {
1479 		/* It is a duplicate */
1480 		SCTP_STAT_INCR(sctps_recvdupdata);
1481 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1482 			/* Record a dup for the next outbound sack */
1483 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1484 			asoc->numduptsns++;
1485 		}
1486 		asoc->send_sack = 1;
1487 		return (0);
1488 	}
1489 	/* Calculate the number of TSN's between the base and this TSN */
1490 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1491 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1492 		/* Can't hold the bit in the mapping at max array, toss it */
1493 		return (0);
1494 	}
1495 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1496 		SCTP_TCB_LOCK_ASSERT(stcb);
1497 		if (sctp_expand_mapping_array(asoc, gap)) {
1498 			/* Can't expand, drop it */
1499 			return (0);
1500 		}
1501 	}
1502 	if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1503 		*high_tsn = tsn;
1504 	}
1505 	/* See if we have received this one already */
1506 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1507 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1508 		SCTP_STAT_INCR(sctps_recvdupdata);
1509 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1510 			/* Record a dup for the next outbound sack */
1511 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1512 			asoc->numduptsns++;
1513 		}
1514 		asoc->send_sack = 1;
1515 		return (0);
1516 	}
1517 	/*
1518 	 * Check to see about the GONE flag, duplicates would cause a sack
1519 	 * to be sent up above
1520 	 */
1521 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1522 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1523 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1524 	    ) {
1525 		/*
1526 		 * wait a minute, this guy is gone, there is no longer a
1527 		 * receiver. Send peer an ABORT!
1528 		 */
1529 		struct mbuf *op_err;
1530 
1531 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1532 		sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1533 		*abort_flag = 1;
1534 		return (0);
1535 	}
1536 	/*
1537 	 * Now before going further we see if there is room. If NOT then we
1538 	 * MAY let one through only IF this TSN is the one we are waiting
1539 	 * for on a partial delivery API.
1540 	 */
1541 
1542 	/* now do the tests */
1543 	if (((asoc->cnt_on_all_streams +
1544 	    asoc->cnt_on_reasm_queue +
1545 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1546 	    (((int)asoc->my_rwnd) <= 0)) {
1547 		/*
1548 		 * When we have NO room in the rwnd we check to make sure
1549 		 * the reader is doing its job...
1550 		 */
1551 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1552 			/* some to read, wake-up */
1553 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1554 			struct socket *so;
1555 
1556 			so = SCTP_INP_SO(stcb->sctp_ep);
1557 			atomic_add_int(&stcb->asoc.refcnt, 1);
1558 			SCTP_TCB_UNLOCK(stcb);
1559 			SCTP_SOCKET_LOCK(so, 1);
1560 			SCTP_TCB_LOCK(stcb);
1561 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1562 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1563 				/* assoc was freed while we were unlocked */
1564 				SCTP_SOCKET_UNLOCK(so, 1);
1565 				return (0);
1566 			}
1567 #endif
1568 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1569 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1570 			SCTP_SOCKET_UNLOCK(so, 1);
1571 #endif
1572 		}
1573 		/* now is it in the mapping array of what we have accepted? */
1574 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
1575 		    compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1576 			/* Nope not in the valid range dump it */
1577 			sctp_set_rwnd(stcb, asoc);
1578 			if ((asoc->cnt_on_all_streams +
1579 			    asoc->cnt_on_reasm_queue +
1580 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1581 				SCTP_STAT_INCR(sctps_datadropchklmt);
1582 			} else {
1583 				SCTP_STAT_INCR(sctps_datadroprwnd);
1584 			}
1585 			indx = *break_flag;
1586 			*break_flag = 1;
1587 			return (0);
1588 		}
1589 	}
1590 	strmno = ntohs(ch->dp.stream_id);
1591 	if (strmno >= asoc->streamincnt) {
1592 		struct sctp_paramhdr *phdr;
1593 		struct mbuf *mb;
1594 
1595 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1596 		    0, M_DONTWAIT, 1, MT_DATA);
1597 		if (mb != NULL) {
1598 			/* add some space up front so prepend will work well */
1599 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1600 			phdr = mtod(mb, struct sctp_paramhdr *);
1601 			/*
1602 			 * Error causes are just param's and this one has
1603 			 * two back to back phdr, one with the error type
1604 			 * and size, the other with the streamid and a rsvd
1605 			 */
1606 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1607 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1608 			phdr->param_length =
1609 			    htons(sizeof(struct sctp_paramhdr) * 2);
1610 			phdr++;
1611 			/* We insert the stream in the type field */
1612 			phdr->param_type = ch->dp.stream_id;
1613 			/* And set the length to 0 for the rsvd field */
1614 			phdr->param_length = 0;
1615 			sctp_queue_op_err(stcb, mb);
1616 		}
1617 		SCTP_STAT_INCR(sctps_badsid);
1618 		SCTP_TCB_LOCK_ASSERT(stcb);
1619 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1620 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1621 			asoc->highest_tsn_inside_nr_map = tsn;
1622 		}
1623 		if (tsn == (asoc->cumulative_tsn + 1)) {
1624 			/* Update cum-ack */
1625 			asoc->cumulative_tsn = tsn;
1626 		}
1627 		return (0);
1628 	}
1629 	/*
1630 	 * Before we continue lets validate that we are not being fooled by
1631 	 * an evil attacker. We can only have 4k chunks based on our TSN
1632 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1633 	 * way our stream sequence numbers could have wrapped. We of course
1634 	 * only validate the FIRST fragment so the bit must be set.
1635 	 */
1636 	strmseq = ntohs(ch->dp.stream_sequence);
1637 #ifdef SCTP_ASOCLOG_OF_TSNS
1638 	SCTP_TCB_LOCK_ASSERT(stcb);
1639 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1640 		asoc->tsn_in_at = 0;
1641 		asoc->tsn_in_wrapped = 1;
1642 	}
1643 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1644 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1645 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1646 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1647 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1648 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1649 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1650 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1651 	asoc->tsn_in_at++;
1652 #endif
1653 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1654 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1655 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1656 	    (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1657 	    strmseq, MAX_SEQ) ||
1658 	    asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1659 		/* The incoming sseq is behind where we last delivered? */
1660 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1661 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1662 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1663 		    0, M_DONTWAIT, 1, MT_DATA);
1664 		if (oper) {
1665 			struct sctp_paramhdr *ph;
1666 			uint32_t *ippp;
1667 
1668 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1669 			    (3 * sizeof(uint32_t));
1670 			ph = mtod(oper, struct sctp_paramhdr *);
1671 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1672 			ph->param_length = htons(SCTP_BUF_LEN(oper));
1673 			ippp = (uint32_t *) (ph + 1);
1674 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1675 			ippp++;
1676 			*ippp = tsn;
1677 			ippp++;
1678 			*ippp = ((strmno << 16) | strmseq);
1679 
1680 		}
1681 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1682 		sctp_abort_an_association(stcb->sctp_ep, stcb,
1683 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1684 		*abort_flag = 1;
1685 		return (0);
1686 	}
1687 	/************************************
1688 	 * From here down we may find ch-> invalid
1689 	 * so its a good idea NOT to use it.
1690 	 *************************************/
1691 
1692 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1693 	if (last_chunk == 0) {
1694 		dmbuf = SCTP_M_COPYM(*m,
1695 		    (offset + sizeof(struct sctp_data_chunk)),
1696 		    the_len, M_DONTWAIT);
1697 #ifdef SCTP_MBUF_LOGGING
1698 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1699 			struct mbuf *mat;
1700 
1701 			mat = dmbuf;
1702 			while (mat) {
1703 				if (SCTP_BUF_IS_EXTENDED(mat)) {
1704 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1705 				}
1706 				mat = SCTP_BUF_NEXT(mat);
1707 			}
1708 		}
1709 #endif
1710 	} else {
1711 		/* We can steal the last chunk */
1712 		int l_len;
1713 
1714 		dmbuf = *m;
1715 		/* lop off the top part */
1716 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1717 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1718 			l_len = SCTP_BUF_LEN(dmbuf);
1719 		} else {
1720 			/*
1721 			 * need to count up the size hopefully does not hit
1722 			 * this to often :-0
1723 			 */
1724 			struct mbuf *lat;
1725 
1726 			l_len = 0;
1727 			lat = dmbuf;
1728 			while (lat) {
1729 				l_len += SCTP_BUF_LEN(lat);
1730 				lat = SCTP_BUF_NEXT(lat);
1731 			}
1732 		}
1733 		if (l_len > the_len) {
1734 			/* Trim the end round bytes off  too */
1735 			m_adj(dmbuf, -(l_len - the_len));
1736 		}
1737 	}
1738 	if (dmbuf == NULL) {
1739 		SCTP_STAT_INCR(sctps_nomem);
1740 		return (0);
1741 	}
1742 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1743 	    asoc->fragmented_delivery_inprogress == 0 &&
1744 	    TAILQ_EMPTY(&asoc->resetHead) &&
1745 	    ((ordered == 0) ||
1746 	    ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1747 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1748 		/* Candidate for express delivery */
1749 		/*
1750 		 * Its not fragmented, No PD-API is up, Nothing in the
1751 		 * delivery queue, Its un-ordered OR ordered and the next to
1752 		 * deliver AND nothing else is stuck on the stream queue,
1753 		 * And there is room for it in the socket buffer. Lets just
1754 		 * stuff it up the buffer....
1755 		 */
1756 
1757 		/* It would be nice to avoid this copy if we could :< */
1758 		sctp_alloc_a_readq(stcb, control);
1759 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1760 		    protocol_id,
1761 		    stcb->asoc.context,
1762 		    strmno, strmseq,
1763 		    chunk_flags,
1764 		    dmbuf);
1765 		if (control == NULL) {
1766 			goto failed_express_del;
1767 		}
1768 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1769 		    control, &stcb->sctp_socket->so_rcv,
1770 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1771 
1772 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1773 			/* for ordered, bump what we delivered */
1774 			asoc->strmin[strmno].last_sequence_delivered++;
1775 		}
1776 		SCTP_STAT_INCR(sctps_recvexpress);
1777 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1778 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1779 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1780 		}
1781 		control = NULL;
1782 
1783 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1784 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1785 			asoc->highest_tsn_inside_nr_map = tsn;
1786 		}
1787 		goto finish_express_del;
1788 	}
1789 failed_express_del:
1790 	/* If we reach here this is a new chunk */
1791 	chk = NULL;
1792 	control = NULL;
1793 	/* Express for fragmented delivery? */
1794 	if ((asoc->fragmented_delivery_inprogress) &&
1795 	    (stcb->asoc.control_pdapi) &&
1796 	    (asoc->str_of_pdapi == strmno) &&
1797 	    (asoc->ssn_of_pdapi == strmseq)
1798 	    ) {
1799 		control = stcb->asoc.control_pdapi;
1800 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1801 			/* Can't be another first? */
1802 			goto failed_pdapi_express_del;
1803 		}
1804 		if (tsn == (control->sinfo_tsn + 1)) {
1805 			/* Yep, we can add it on */
1806 			int end = 0;
1807 			uint32_t cumack;
1808 
1809 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1810 				end = 1;
1811 			}
1812 			cumack = asoc->cumulative_tsn;
1813 			if ((cumack + 1) == tsn)
1814 				cumack = tsn;
1815 
1816 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1817 			    tsn,
1818 			    &stcb->sctp_socket->so_rcv)) {
1819 				SCTP_PRINTF("Append fails end:%d\n", end);
1820 				goto failed_pdapi_express_del;
1821 			}
1822 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1823 			if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1824 				asoc->highest_tsn_inside_nr_map = tsn;
1825 			}
1826 			SCTP_STAT_INCR(sctps_recvexpressm);
1827 			control->sinfo_tsn = tsn;
1828 			asoc->tsn_last_delivered = tsn;
1829 			asoc->fragment_flags = chunk_flags;
1830 			asoc->tsn_of_pdapi_last_delivered = tsn;
1831 			asoc->last_flags_delivered = chunk_flags;
1832 			asoc->last_strm_seq_delivered = strmseq;
1833 			asoc->last_strm_no_delivered = strmno;
1834 			if (end) {
1835 				/* clean up the flags and such */
1836 				asoc->fragmented_delivery_inprogress = 0;
1837 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1838 					asoc->strmin[strmno].last_sequence_delivered++;
1839 				}
1840 				stcb->asoc.control_pdapi = NULL;
1841 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1842 					/*
1843 					 * There could be another message
1844 					 * ready
1845 					 */
1846 					need_reasm_check = 1;
1847 				}
1848 			}
1849 			control = NULL;
1850 			goto finish_express_del;
1851 		}
1852 	}
1853 failed_pdapi_express_del:
1854 	control = NULL;
1855 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1856 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1857 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1858 			asoc->highest_tsn_inside_nr_map = tsn;
1859 		}
1860 	} else {
1861 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1862 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1863 			asoc->highest_tsn_inside_map = tsn;
1864 		}
1865 	}
1866 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1867 		sctp_alloc_a_chunk(stcb, chk);
1868 		if (chk == NULL) {
1869 			/* No memory so we drop the chunk */
1870 			SCTP_STAT_INCR(sctps_nomem);
1871 			if (last_chunk == 0) {
1872 				/* we copied it, free the copy */
1873 				sctp_m_freem(dmbuf);
1874 			}
1875 			return (0);
1876 		}
1877 		chk->rec.data.TSN_seq = tsn;
1878 		chk->no_fr_allowed = 0;
1879 		chk->rec.data.stream_seq = strmseq;
1880 		chk->rec.data.stream_number = strmno;
1881 		chk->rec.data.payloadtype = protocol_id;
1882 		chk->rec.data.context = stcb->asoc.context;
1883 		chk->rec.data.doing_fast_retransmit = 0;
1884 		chk->rec.data.rcv_flags = chunk_flags;
1885 		chk->asoc = asoc;
1886 		chk->send_size = the_len;
1887 		chk->whoTo = net;
1888 		atomic_add_int(&net->ref_count, 1);
1889 		chk->data = dmbuf;
1890 	} else {
1891 		sctp_alloc_a_readq(stcb, control);
1892 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1893 		    protocol_id,
1894 		    stcb->asoc.context,
1895 		    strmno, strmseq,
1896 		    chunk_flags,
1897 		    dmbuf);
1898 		if (control == NULL) {
1899 			/* No memory so we drop the chunk */
1900 			SCTP_STAT_INCR(sctps_nomem);
1901 			if (last_chunk == 0) {
1902 				/* we copied it, free the copy */
1903 				sctp_m_freem(dmbuf);
1904 			}
1905 			return (0);
1906 		}
1907 		control->length = the_len;
1908 	}
1909 
1910 	/* Mark it as received */
1911 	/* Now queue it where it belongs */
1912 	if (control != NULL) {
1913 		/* First a sanity check */
1914 		if (asoc->fragmented_delivery_inprogress) {
1915 			/*
1916 			 * Ok, we have a fragmented delivery in progress if
1917 			 * this chunk is next to deliver OR belongs in our
1918 			 * view to the reassembly, the peer is evil or
1919 			 * broken.
1920 			 */
1921 			uint32_t estimate_tsn;
1922 
1923 			estimate_tsn = asoc->tsn_last_delivered + 1;
1924 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1925 			    (estimate_tsn == control->sinfo_tsn)) {
1926 				/* Evil/Broke peer */
1927 				sctp_m_freem(control->data);
1928 				control->data = NULL;
1929 				if (control->whoFrom) {
1930 					sctp_free_remote_addr(control->whoFrom);
1931 					control->whoFrom = NULL;
1932 				}
1933 				sctp_free_a_readq(stcb, control);
1934 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1935 				    0, M_DONTWAIT, 1, MT_DATA);
1936 				if (oper) {
1937 					struct sctp_paramhdr *ph;
1938 					uint32_t *ippp;
1939 
1940 					SCTP_BUF_LEN(oper) =
1941 					    sizeof(struct sctp_paramhdr) +
1942 					    (3 * sizeof(uint32_t));
1943 					ph = mtod(oper, struct sctp_paramhdr *);
1944 					ph->param_type =
1945 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1946 					ph->param_length = htons(SCTP_BUF_LEN(oper));
1947 					ippp = (uint32_t *) (ph + 1);
1948 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1949 					ippp++;
1950 					*ippp = tsn;
1951 					ippp++;
1952 					*ippp = ((strmno << 16) | strmseq);
1953 				}
1954 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1955 				sctp_abort_an_association(stcb->sctp_ep, stcb,
1956 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1957 
1958 				*abort_flag = 1;
1959 				return (0);
1960 			} else {
1961 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1962 					sctp_m_freem(control->data);
1963 					control->data = NULL;
1964 					if (control->whoFrom) {
1965 						sctp_free_remote_addr(control->whoFrom);
1966 						control->whoFrom = NULL;
1967 					}
1968 					sctp_free_a_readq(stcb, control);
1969 
1970 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1971 					    0, M_DONTWAIT, 1, MT_DATA);
1972 					if (oper) {
1973 						struct sctp_paramhdr *ph;
1974 						uint32_t *ippp;
1975 
1976 						SCTP_BUF_LEN(oper) =
1977 						    sizeof(struct sctp_paramhdr) +
1978 						    (3 * sizeof(uint32_t));
1979 						ph = mtod(oper,
1980 						    struct sctp_paramhdr *);
1981 						ph->param_type =
1982 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1983 						ph->param_length =
1984 						    htons(SCTP_BUF_LEN(oper));
1985 						ippp = (uint32_t *) (ph + 1);
1986 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1987 						ippp++;
1988 						*ippp = tsn;
1989 						ippp++;
1990 						*ippp = ((strmno << 16) | strmseq);
1991 					}
1992 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1993 					sctp_abort_an_association(stcb->sctp_ep,
1994 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1995 
1996 					*abort_flag = 1;
1997 					return (0);
1998 				}
1999 			}
2000 		} else {
2001 			/* No PDAPI running */
2002 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2003 				/*
2004 				 * Reassembly queue is NOT empty validate
2005 				 * that this tsn does not need to be in
2006 				 * reasembly queue. If it does then our peer
2007 				 * is broken or evil.
2008 				 */
2009 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2010 					sctp_m_freem(control->data);
2011 					control->data = NULL;
2012 					if (control->whoFrom) {
2013 						sctp_free_remote_addr(control->whoFrom);
2014 						control->whoFrom = NULL;
2015 					}
2016 					sctp_free_a_readq(stcb, control);
2017 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2018 					    0, M_DONTWAIT, 1, MT_DATA);
2019 					if (oper) {
2020 						struct sctp_paramhdr *ph;
2021 						uint32_t *ippp;
2022 
2023 						SCTP_BUF_LEN(oper) =
2024 						    sizeof(struct sctp_paramhdr) +
2025 						    (3 * sizeof(uint32_t));
2026 						ph = mtod(oper,
2027 						    struct sctp_paramhdr *);
2028 						ph->param_type =
2029 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2030 						ph->param_length =
2031 						    htons(SCTP_BUF_LEN(oper));
2032 						ippp = (uint32_t *) (ph + 1);
2033 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2034 						ippp++;
2035 						*ippp = tsn;
2036 						ippp++;
2037 						*ippp = ((strmno << 16) | strmseq);
2038 					}
2039 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2040 					sctp_abort_an_association(stcb->sctp_ep,
2041 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2042 
2043 					*abort_flag = 1;
2044 					return (0);
2045 				}
2046 			}
2047 		}
2048 		/* ok, if we reach here we have passed the sanity checks */
2049 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2050 			/* queue directly into socket buffer */
2051 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2052 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2053 			    control,
2054 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2055 		} else {
2056 			/*
2057 			 * Special check for when streams are resetting. We
2058 			 * could be more smart about this and check the
2059 			 * actual stream to see if it is not being reset..
2060 			 * that way we would not create a HOLB when amongst
2061 			 * streams being reset and those not being reset.
2062 			 *
2063 			 * We take complete messages that have a stream reset
2064 			 * intervening (aka the TSN is after where our
2065 			 * cum-ack needs to be) off and put them on a
2066 			 * pending_reply_queue. The reassembly ones we do
2067 			 * not have to worry about since they are all sorted
2068 			 * and proceessed by TSN order. It is only the
2069 			 * singletons I must worry about.
2070 			 */
2071 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2072 			    ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)))
2073 			    ) {
2074 				/*
2075 				 * yep its past where we need to reset... go
2076 				 * ahead and queue it.
2077 				 */
2078 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2079 					/* first one on */
2080 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2081 				} else {
2082 					struct sctp_queued_to_read *ctlOn;
2083 					unsigned char inserted = 0;
2084 
2085 					ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2086 					while (ctlOn) {
2087 						if (compare_with_wrap(control->sinfo_tsn,
2088 						    ctlOn->sinfo_tsn, MAX_TSN)) {
2089 							ctlOn = TAILQ_NEXT(ctlOn, next);
2090 						} else {
2091 							/* found it */
2092 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2093 							inserted = 1;
2094 							break;
2095 						}
2096 					}
2097 					if (inserted == 0) {
2098 						/*
2099 						 * must be put at end, use
2100 						 * prevP (all setup from
2101 						 * loop) to setup nextP.
2102 						 */
2103 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2104 					}
2105 				}
2106 			} else {
2107 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2108 				if (*abort_flag) {
2109 					return (0);
2110 				}
2111 			}
2112 		}
2113 	} else {
2114 		/* Into the re-assembly queue */
2115 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2116 		if (*abort_flag) {
2117 			/*
2118 			 * the assoc is now gone and chk was put onto the
2119 			 * reasm queue, which has all been freed.
2120 			 */
2121 			*m = NULL;
2122 			return (0);
2123 		}
2124 	}
2125 finish_express_del:
2126 	if (last_chunk) {
2127 		*m = NULL;
2128 	}
2129 	if (ordered) {
2130 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2131 	} else {
2132 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2133 	}
2134 	SCTP_STAT_INCR(sctps_recvdata);
2135 	/* Set it present please */
2136 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2137 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2138 	}
2139 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2140 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2141 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2142 	}
2143 	/* check the special flag for stream resets */
2144 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2145 	    ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2146 	    (asoc->cumulative_tsn == liste->tsn))
2147 	    ) {
2148 		/*
2149 		 * we have finished working through the backlogged TSN's now
2150 		 * time to reset streams. 1: call reset function. 2: free
2151 		 * pending_reply space 3: distribute any chunks in
2152 		 * pending_reply_queue.
2153 		 */
2154 		struct sctp_queued_to_read *ctl;
2155 
2156 		sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2157 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2158 		SCTP_FREE(liste, SCTP_M_STRESET);
2159 		/* sa_ignore FREED_MEMORY */
2160 		liste = TAILQ_FIRST(&asoc->resetHead);
2161 		ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2162 		if (ctl && (liste == NULL)) {
2163 			/* All can be removed */
2164 			while (ctl) {
2165 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2166 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2167 				if (*abort_flag) {
2168 					return (0);
2169 				}
2170 				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2171 			}
2172 		} else if (ctl) {
2173 			/* more than one in queue */
2174 			while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2175 				/*
2176 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2177 				 * process it which is the NOT of
2178 				 * ctl->sinfo_tsn > liste->tsn
2179 				 */
2180 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2181 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2182 				if (*abort_flag) {
2183 					return (0);
2184 				}
2185 				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2186 			}
2187 		}
2188 		/*
2189 		 * Now service re-assembly to pick up anything that has been
2190 		 * held on reassembly queue?
2191 		 */
2192 		sctp_deliver_reasm_check(stcb, asoc);
2193 		need_reasm_check = 0;
2194 	}
2195 	if (need_reasm_check) {
2196 		/* Another one waits ? */
2197 		sctp_deliver_reasm_check(stcb, asoc);
2198 	}
2199 	return (1);
2200 }
2201 
2202 int8_t sctp_map_lookup_tab[256] = {
2203 	0, 1, 0, 2, 0, 1, 0, 3,
2204 	0, 1, 0, 2, 0, 1, 0, 4,
2205 	0, 1, 0, 2, 0, 1, 0, 3,
2206 	0, 1, 0, 2, 0, 1, 0, 5,
2207 	0, 1, 0, 2, 0, 1, 0, 3,
2208 	0, 1, 0, 2, 0, 1, 0, 4,
2209 	0, 1, 0, 2, 0, 1, 0, 3,
2210 	0, 1, 0, 2, 0, 1, 0, 6,
2211 	0, 1, 0, 2, 0, 1, 0, 3,
2212 	0, 1, 0, 2, 0, 1, 0, 4,
2213 	0, 1, 0, 2, 0, 1, 0, 3,
2214 	0, 1, 0, 2, 0, 1, 0, 5,
2215 	0, 1, 0, 2, 0, 1, 0, 3,
2216 	0, 1, 0, 2, 0, 1, 0, 4,
2217 	0, 1, 0, 2, 0, 1, 0, 3,
2218 	0, 1, 0, 2, 0, 1, 0, 7,
2219 	0, 1, 0, 2, 0, 1, 0, 3,
2220 	0, 1, 0, 2, 0, 1, 0, 4,
2221 	0, 1, 0, 2, 0, 1, 0, 3,
2222 	0, 1, 0, 2, 0, 1, 0, 5,
2223 	0, 1, 0, 2, 0, 1, 0, 3,
2224 	0, 1, 0, 2, 0, 1, 0, 4,
2225 	0, 1, 0, 2, 0, 1, 0, 3,
2226 	0, 1, 0, 2, 0, 1, 0, 6,
2227 	0, 1, 0, 2, 0, 1, 0, 3,
2228 	0, 1, 0, 2, 0, 1, 0, 4,
2229 	0, 1, 0, 2, 0, 1, 0, 3,
2230 	0, 1, 0, 2, 0, 1, 0, 5,
2231 	0, 1, 0, 2, 0, 1, 0, 3,
2232 	0, 1, 0, 2, 0, 1, 0, 4,
2233 	0, 1, 0, 2, 0, 1, 0, 3,
2234 	0, 1, 0, 2, 0, 1, 0, 8
2235 };
2236 
2237 
2238 void
2239 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2240 {
2241 	/*
2242 	 * Now we also need to check the mapping array in a couple of ways.
2243 	 * 1) Did we move the cum-ack point?
2244 	 */
2245 	struct sctp_association *asoc;
2246 	int at;
2247 	int slide_from, slide_end, lgap, distance;
2248 
2249 	/* EY nr_mapping array variables */
2250 	/* int nr_at; */
2251 	/* int nr_last_all_ones = 0; */
2252 	/* int nr_slide_from, nr_slide_end, nr_lgap, nr_distance; */
2253 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2254 
2255 	asoc = &stcb->asoc;
2256 	at = 0;
2257 
2258 	old_cumack = asoc->cumulative_tsn;
2259 	old_base = asoc->mapping_array_base_tsn;
2260 	old_highest = asoc->highest_tsn_inside_map;
2261 	/*
2262 	 * We could probably improve this a small bit by calculating the
2263 	 * offset of the current cum-ack as the starting point.
2264 	 */
2265 	at = 0;
2266 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2267 		if (asoc->nr_mapping_array[slide_from] == 0xff) {
2268 			at += 8;
2269 		} else {
2270 			/* there is a 0 bit */
2271 			at += sctp_map_lookup_tab[asoc->nr_mapping_array[slide_from]];
2272 			break;
2273 		}
2274 	}
2275 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2276 
2277 	if (compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
2278 	    compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
2279 #ifdef INVARIANTS
2280 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2281 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2282 #else
2283 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2284 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2285 		sctp_print_mapping_array(asoc);
2286 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2287 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2288 		}
2289 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2290 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2291 #endif
2292 	}
2293 	if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
2294 	    asoc->highest_tsn_inside_map,
2295 	    MAX_TSN)) {
2296 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2297 	} else {
2298 		highest_tsn = asoc->highest_tsn_inside_map;
2299 	}
2300 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2301 		/* The complete array was completed by a single FR */
2302 		/* highest becomes the cum-ack */
2303 		int clr, i;
2304 
2305 		/* clear the array */
2306 		clr = ((at + 7) >> 3);
2307 		if (clr > asoc->mapping_array_size) {
2308 			clr = asoc->mapping_array_size;
2309 		}
2310 		memset(asoc->mapping_array, 0, clr);
2311 		memset(asoc->nr_mapping_array, 0, clr);
2312 		for (i = 0; i < asoc->mapping_array_size; i++) {
2313 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2314 				printf("Error Mapping array's not clean at clear\n");
2315 				sctp_print_mapping_array(asoc);
2316 			}
2317 		}
2318 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2319 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2320 	} else if (at >= 8) {
2321 		/* we can slide the mapping array down */
2322 		/* slide_from holds where we hit the first NON 0xff byte */
2323 
2324 		/*
2325 		 * now calculate the ceiling of the move using our highest
2326 		 * TSN value
2327 		 */
2328 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2329 		slide_end = (lgap >> 3);
2330 		if (slide_end < slide_from) {
2331 			sctp_print_mapping_array(asoc);
2332 #ifdef INVARIANTS
2333 			panic("impossible slide");
2334 #else
2335 			printf("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2336 			    lgap, slide_end, slide_from, at);
2337 			return;
2338 #endif
2339 		}
2340 		if (slide_end > asoc->mapping_array_size) {
2341 #ifdef INVARIANTS
2342 			panic("would overrun buffer");
2343 #else
2344 			printf("Gak, would have overrun map end:%d slide_end:%d\n",
2345 			    asoc->mapping_array_size, slide_end);
2346 			slide_end = asoc->mapping_array_size;
2347 #endif
2348 		}
2349 		distance = (slide_end - slide_from) + 1;
2350 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2351 			sctp_log_map(old_base, old_cumack, old_highest,
2352 			    SCTP_MAP_PREPARE_SLIDE);
2353 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2354 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2355 		}
2356 		if (distance + slide_from > asoc->mapping_array_size ||
2357 		    distance < 0) {
2358 			/*
2359 			 * Here we do NOT slide forward the array so that
2360 			 * hopefully when more data comes in to fill it up
2361 			 * we will be able to slide it forward. Really I
2362 			 * don't think this should happen :-0
2363 			 */
2364 
2365 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2366 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2367 				    (uint32_t) asoc->mapping_array_size,
2368 				    SCTP_MAP_SLIDE_NONE);
2369 			}
2370 		} else {
2371 			int ii;
2372 
2373 			for (ii = 0; ii < distance; ii++) {
2374 				asoc->mapping_array[ii] =
2375 				    asoc->mapping_array[slide_from + ii];
2376 				asoc->nr_mapping_array[ii] =
2377 				    asoc->nr_mapping_array[slide_from + ii];
2378 
2379 			}
2380 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2381 				asoc->mapping_array[ii] = 0;
2382 				asoc->nr_mapping_array[ii] = 0;
2383 			}
2384 			asoc->mapping_array_base_tsn += (slide_from << 3);
2385 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2386 				sctp_log_map(asoc->mapping_array_base_tsn,
2387 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2388 				    SCTP_MAP_SLIDE_RESULT);
2389 			}
2390 		}
2391 	}
2392 }
2393 
2394 
2395 void
2396 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap, int *abort_flag)
2397 {
2398 	struct sctp_association *asoc;
2399 	uint32_t highest_tsn;
2400 
2401 	asoc = &stcb->asoc;
2402 	if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
2403 	    asoc->highest_tsn_inside_map,
2404 	    MAX_TSN)) {
2405 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2406 	} else {
2407 		highest_tsn = asoc->highest_tsn_inside_map;
2408 	}
2409 
2410 	/*
2411 	 * Now we need to see if we need to queue a sack or just start the
2412 	 * timer (if allowed).
2413 	 */
2414 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2415 		/*
2416 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2417 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2418 		 * SACK
2419 		 */
2420 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2421 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2422 			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2423 		}
2424 		sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2425 		sctp_send_sack(stcb);
2426 	} else {
2427 		int is_a_gap;
2428 
2429 		/* is there a gap now ? */
2430 		is_a_gap = compare_with_wrap(highest_tsn, stcb->asoc.cumulative_tsn, MAX_TSN);
2431 
2432 		/*
2433 		 * CMT DAC algorithm: increase number of packets received
2434 		 * since last ack
2435 		 */
2436 		stcb->asoc.cmt_dac_pkts_rcvd++;
2437 
2438 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2439 							 * SACK */
2440 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2441 							 * longer is one */
2442 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2443 		    (is_a_gap) ||	/* is still a gap */
2444 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2445 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2446 		    ) {
2447 
2448 			if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off)) &&
2449 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2450 			    (stcb->asoc.send_sack == 0) &&
2451 			    (stcb->asoc.numduptsns == 0) &&
2452 			    (stcb->asoc.delayed_ack) &&
2453 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2454 
2455 				/*
2456 				 * CMT DAC algorithm: With CMT, delay acks
2457 				 * even in the face of
2458 				 *
2459 				 * reordering. Therefore, if acks that do not
2460 				 * have to be sent because of the above
2461 				 * reasons, will be delayed. That is, acks
2462 				 * that would have been sent due to gap
2463 				 * reports will be delayed with DAC. Start
2464 				 * the delayed ack timer.
2465 				 */
2466 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2467 				    stcb->sctp_ep, stcb, NULL);
2468 			} else {
2469 				/*
2470 				 * Ok we must build a SACK since the timer
2471 				 * is pending, we got our first packet OR
2472 				 * there are gaps or duplicates.
2473 				 */
2474 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2475 				sctp_send_sack(stcb);
2476 			}
2477 		} else {
2478 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2479 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2480 				    stcb->sctp_ep, stcb, NULL);
2481 			}
2482 		}
2483 	}
2484 }
2485 
2486 void
2487 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2488 {
2489 	struct sctp_tmit_chunk *chk;
2490 	uint32_t tsize, pd_point;
2491 	uint16_t nxt_todel;
2492 
2493 	if (asoc->fragmented_delivery_inprogress) {
2494 		sctp_service_reassembly(stcb, asoc);
2495 	}
2496 	/* Can we proceed further, i.e. the PD-API is complete */
2497 	if (asoc->fragmented_delivery_inprogress) {
2498 		/* no */
2499 		return;
2500 	}
2501 	/*
2502 	 * Now is there some other chunk I can deliver from the reassembly
2503 	 * queue.
2504 	 */
2505 doit_again:
2506 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2507 	if (chk == NULL) {
2508 		asoc->size_on_reasm_queue = 0;
2509 		asoc->cnt_on_reasm_queue = 0;
2510 		return;
2511 	}
2512 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2513 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2514 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2515 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2516 		/*
2517 		 * Yep the first one is here. We setup to start reception,
2518 		 * by backing down the TSN just in case we can't deliver.
2519 		 */
2520 
2521 		/*
2522 		 * Before we start though either all of the message should
2523 		 * be here or the socket buffer max or nothing on the
2524 		 * delivery queue and something can be delivered.
2525 		 */
2526 		if (stcb->sctp_socket) {
2527 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2528 			    stcb->sctp_ep->partial_delivery_point);
2529 		} else {
2530 			pd_point = stcb->sctp_ep->partial_delivery_point;
2531 		}
2532 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2533 			asoc->fragmented_delivery_inprogress = 1;
2534 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2535 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2536 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2537 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2538 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2539 			sctp_service_reassembly(stcb, asoc);
2540 			if (asoc->fragmented_delivery_inprogress == 0) {
2541 				goto doit_again;
2542 			}
2543 		}
2544 	}
2545 }
2546 
2547 int
2548 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2549     struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2550     struct sctp_nets *net, uint32_t * high_tsn)
2551 {
2552 	struct sctp_data_chunk *ch, chunk_buf;
2553 	struct sctp_association *asoc;
2554 	int num_chunks = 0;	/* number of control chunks processed */
2555 	int stop_proc = 0;
2556 	int chk_length, break_flag, last_chunk;
2557 	int abort_flag = 0, was_a_gap = 0;
2558 	struct mbuf *m;
2559 
2560 	/* set the rwnd */
2561 	sctp_set_rwnd(stcb, &stcb->asoc);
2562 
2563 	m = *mm;
2564 	SCTP_TCB_LOCK_ASSERT(stcb);
2565 	asoc = &stcb->asoc;
2566 	if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2567 	    stcb->asoc.cumulative_tsn, MAX_TSN)) {
2568 		/* there was a gap before this data was processed */
2569 		was_a_gap = 1;
2570 	}
2571 	/*
2572 	 * setup where we got the last DATA packet from for any SACK that
2573 	 * may need to go out. Don't bump the net. This is done ONLY when a
2574 	 * chunk is assigned.
2575 	 */
2576 	asoc->last_data_chunk_from = net;
2577 
2578 	/*-
2579 	 * Now before we proceed we must figure out if this is a wasted
2580 	 * cluster... i.e. it is a small packet sent in and yet the driver
2581 	 * underneath allocated a full cluster for it. If so we must copy it
2582 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2583 	 * with cluster starvation. Note for __Panda__ we don't do this
2584 	 * since it has clusters all the way down to 64 bytes.
2585 	 */
2586 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2587 		/* we only handle mbufs that are singletons.. not chains */
2588 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2589 		if (m) {
2590 			/* ok lets see if we can copy the data up */
2591 			caddr_t *from, *to;
2592 
2593 			/* get the pointers and copy */
2594 			to = mtod(m, caddr_t *);
2595 			from = mtod((*mm), caddr_t *);
2596 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2597 			/* copy the length and free up the old */
2598 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2599 			sctp_m_freem(*mm);
2600 			/* sucess, back copy */
2601 			*mm = m;
2602 		} else {
2603 			/* We are in trouble in the mbuf world .. yikes */
2604 			m = *mm;
2605 		}
2606 	}
2607 	/* get pointer to the first chunk header */
2608 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2609 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2610 	if (ch == NULL) {
2611 		return (1);
2612 	}
2613 	/*
2614 	 * process all DATA chunks...
2615 	 */
2616 	*high_tsn = asoc->cumulative_tsn;
2617 	break_flag = 0;
2618 	asoc->data_pkts_seen++;
2619 	while (stop_proc == 0) {
2620 		/* validate chunk length */
2621 		chk_length = ntohs(ch->ch.chunk_length);
2622 		if (length - *offset < chk_length) {
2623 			/* all done, mutulated chunk */
2624 			stop_proc = 1;
2625 			break;
2626 		}
2627 		if (ch->ch.chunk_type == SCTP_DATA) {
2628 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2629 				/*
2630 				 * Need to send an abort since we had a
2631 				 * invalid data chunk.
2632 				 */
2633 				struct mbuf *op_err;
2634 
2635 				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2636 				    0, M_DONTWAIT, 1, MT_DATA);
2637 
2638 				if (op_err) {
2639 					struct sctp_paramhdr *ph;
2640 					uint32_t *ippp;
2641 
2642 					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2643 					    (2 * sizeof(uint32_t));
2644 					ph = mtod(op_err, struct sctp_paramhdr *);
2645 					ph->param_type =
2646 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2647 					ph->param_length = htons(SCTP_BUF_LEN(op_err));
2648 					ippp = (uint32_t *) (ph + 1);
2649 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2650 					ippp++;
2651 					*ippp = asoc->cumulative_tsn;
2652 
2653 				}
2654 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2655 				sctp_abort_association(inp, stcb, m, iphlen, sh,
2656 				    op_err, 0, net->port);
2657 				return (2);
2658 			}
2659 #ifdef SCTP_AUDITING_ENABLED
2660 			sctp_audit_log(0xB1, 0);
2661 #endif
2662 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2663 				last_chunk = 1;
2664 			} else {
2665 				last_chunk = 0;
2666 			}
2667 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2668 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2669 			    last_chunk)) {
2670 				num_chunks++;
2671 			}
2672 			if (abort_flag)
2673 				return (2);
2674 
2675 			if (break_flag) {
2676 				/*
2677 				 * Set because of out of rwnd space and no
2678 				 * drop rep space left.
2679 				 */
2680 				stop_proc = 1;
2681 				break;
2682 			}
2683 		} else {
2684 			/* not a data chunk in the data region */
2685 			switch (ch->ch.chunk_type) {
2686 			case SCTP_INITIATION:
2687 			case SCTP_INITIATION_ACK:
2688 			case SCTP_SELECTIVE_ACK:
2689 			case SCTP_NR_SELECTIVE_ACK:	/* EY */
2690 			case SCTP_HEARTBEAT_REQUEST:
2691 			case SCTP_HEARTBEAT_ACK:
2692 			case SCTP_ABORT_ASSOCIATION:
2693 			case SCTP_SHUTDOWN:
2694 			case SCTP_SHUTDOWN_ACK:
2695 			case SCTP_OPERATION_ERROR:
2696 			case SCTP_COOKIE_ECHO:
2697 			case SCTP_COOKIE_ACK:
2698 			case SCTP_ECN_ECHO:
2699 			case SCTP_ECN_CWR:
2700 			case SCTP_SHUTDOWN_COMPLETE:
2701 			case SCTP_AUTHENTICATION:
2702 			case SCTP_ASCONF_ACK:
2703 			case SCTP_PACKET_DROPPED:
2704 			case SCTP_STREAM_RESET:
2705 			case SCTP_FORWARD_CUM_TSN:
2706 			case SCTP_ASCONF:
2707 				/*
2708 				 * Now, what do we do with KNOWN chunks that
2709 				 * are NOT in the right place?
2710 				 *
2711 				 * For now, I do nothing but ignore them. We
2712 				 * may later want to add sysctl stuff to
2713 				 * switch out and do either an ABORT() or
2714 				 * possibly process them.
2715 				 */
2716 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2717 					struct mbuf *op_err;
2718 
2719 					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2720 					sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
2721 					return (2);
2722 				}
2723 				break;
2724 			default:
2725 				/* unknown chunk type, use bit rules */
2726 				if (ch->ch.chunk_type & 0x40) {
2727 					/* Add a error report to the queue */
2728 					struct mbuf *merr;
2729 					struct sctp_paramhdr *phd;
2730 
2731 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2732 					if (merr) {
2733 						phd = mtod(merr, struct sctp_paramhdr *);
2734 						/*
2735 						 * We cheat and use param
2736 						 * type since we did not
2737 						 * bother to define a error
2738 						 * cause struct. They are
2739 						 * the same basic format
2740 						 * with different names.
2741 						 */
2742 						phd->param_type =
2743 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2744 						phd->param_length =
2745 						    htons(chk_length + sizeof(*phd));
2746 						SCTP_BUF_LEN(merr) = sizeof(*phd);
2747 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
2748 						    SCTP_SIZE32(chk_length),
2749 						    M_DONTWAIT);
2750 						if (SCTP_BUF_NEXT(merr)) {
2751 							sctp_queue_op_err(stcb, merr);
2752 						} else {
2753 							sctp_m_freem(merr);
2754 						}
2755 					}
2756 				}
2757 				if ((ch->ch.chunk_type & 0x80) == 0) {
2758 					/* discard the rest of this packet */
2759 					stop_proc = 1;
2760 				}	/* else skip this bad chunk and
2761 					 * continue... */
2762 				break;
2763 			};	/* switch of chunk type */
2764 		}
2765 		*offset += SCTP_SIZE32(chk_length);
2766 		if ((*offset >= length) || stop_proc) {
2767 			/* no more data left in the mbuf chain */
2768 			stop_proc = 1;
2769 			continue;
2770 		}
2771 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2772 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2773 		if (ch == NULL) {
2774 			*offset = length;
2775 			stop_proc = 1;
2776 			break;
2777 
2778 		}
2779 	}			/* while */
2780 	if (break_flag) {
2781 		/*
2782 		 * we need to report rwnd overrun drops.
2783 		 */
2784 		sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2785 	}
2786 	if (num_chunks) {
2787 		/*
2788 		 * Did we get data, if so update the time for auto-close and
2789 		 * give peer credit for being alive.
2790 		 */
2791 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2792 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2793 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2794 			    stcb->asoc.overall_error_count,
2795 			    0,
2796 			    SCTP_FROM_SCTP_INDATA,
2797 			    __LINE__);
2798 		}
2799 		stcb->asoc.overall_error_count = 0;
2800 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2801 	}
2802 	/* now service all of the reassm queue if needed */
2803 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2804 		sctp_service_queues(stcb, asoc);
2805 
2806 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2807 		/* Assure that we ack right away */
2808 		stcb->asoc.send_sack = 1;
2809 	}
2810 	/* Start a sack timer or QUEUE a SACK for sending */
2811 	if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) &&
2812 	    (stcb->asoc.mapping_array[0] != 0xff)) {
2813 		if ((stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) ||
2814 		    (stcb->asoc.delayed_ack == 0) ||
2815 		    (stcb->asoc.numduptsns) ||
2816 		    (stcb->asoc.send_sack == 1)) {
2817 			if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2818 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2819 			}
2820 			sctp_send_sack(stcb);
2821 		} else {
2822 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2823 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2824 				    stcb->sctp_ep, stcb, NULL);
2825 			}
2826 		}
2827 	} else {
2828 		sctp_sack_check(stcb, was_a_gap, &abort_flag);
2829 	}
2830 	if (abort_flag)
2831 		return (2);
2832 
2833 	return (0);
2834 }
2835 
2836 static int
2837 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2838     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2839     int *num_frs,
2840     uint32_t * biggest_newly_acked_tsn,
2841     uint32_t * this_sack_lowest_newack,
2842     int *ecn_seg_sums)
2843 {
2844 	struct sctp_tmit_chunk *tp1;
2845 	unsigned int theTSN;
2846 	int j, wake_him = 0, circled = 0;
2847 
2848 	/* Recover the tp1 we last saw */
2849 	tp1 = *p_tp1;
2850 	if (tp1 == NULL) {
2851 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2852 	}
2853 	for (j = frag_strt; j <= frag_end; j++) {
2854 		theTSN = j + last_tsn;
2855 		while (tp1) {
2856 			if (tp1->rec.data.doing_fast_retransmit)
2857 				(*num_frs) += 1;
2858 
2859 			/*-
2860 			 * CMT: CUCv2 algorithm. For each TSN being
2861 			 * processed from the sent queue, track the
2862 			 * next expected pseudo-cumack, or
2863 			 * rtx_pseudo_cumack, if required. Separate
2864 			 * cumack trackers for first transmissions,
2865 			 * and retransmissions.
2866 			 */
2867 			if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2868 			    (tp1->snd_count == 1)) {
2869 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2870 				tp1->whoTo->find_pseudo_cumack = 0;
2871 			}
2872 			if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2873 			    (tp1->snd_count > 1)) {
2874 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2875 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2876 			}
2877 			if (tp1->rec.data.TSN_seq == theTSN) {
2878 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2879 					/*-
2880 					 * must be held until
2881 					 * cum-ack passes
2882 					 */
2883 					/*-
2884 					 * ECN Nonce: Add the nonce
2885 					 * value to the sender's
2886 					 * nonce sum
2887 					 */
2888 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2889 						/*-
2890 						 * If it is less than RESEND, it is
2891 						 * now no-longer in flight.
2892 						 * Higher values may already be set
2893 						 * via previous Gap Ack Blocks...
2894 						 * i.e. ACKED or RESEND.
2895 						 */
2896 						if (compare_with_wrap(tp1->rec.data.TSN_seq,
2897 						    *biggest_newly_acked_tsn, MAX_TSN)) {
2898 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2899 						}
2900 						/*-
2901 						 * CMT: SFR algo (and HTNA) - set
2902 						 * saw_newack to 1 for dest being
2903 						 * newly acked. update
2904 						 * this_sack_highest_newack if
2905 						 * appropriate.
2906 						 */
2907 						if (tp1->rec.data.chunk_was_revoked == 0)
2908 							tp1->whoTo->saw_newack = 1;
2909 
2910 						if (compare_with_wrap(tp1->rec.data.TSN_seq,
2911 						    tp1->whoTo->this_sack_highest_newack,
2912 						    MAX_TSN)) {
2913 							tp1->whoTo->this_sack_highest_newack =
2914 							    tp1->rec.data.TSN_seq;
2915 						}
2916 						/*-
2917 						 * CMT DAC algo: also update
2918 						 * this_sack_lowest_newack
2919 						 */
2920 						if (*this_sack_lowest_newack == 0) {
2921 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2922 								sctp_log_sack(*this_sack_lowest_newack,
2923 								    last_tsn,
2924 								    tp1->rec.data.TSN_seq,
2925 								    0,
2926 								    0,
2927 								    SCTP_LOG_TSN_ACKED);
2928 							}
2929 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2930 						}
2931 						/*-
2932 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2933 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2934 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2935 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2936 						 * Separate pseudo_cumack trackers for first transmissions and
2937 						 * retransmissions.
2938 						 */
2939 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2940 							if (tp1->rec.data.chunk_was_revoked == 0) {
2941 								tp1->whoTo->new_pseudo_cumack = 1;
2942 							}
2943 							tp1->whoTo->find_pseudo_cumack = 1;
2944 						}
2945 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2946 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2947 						}
2948 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2949 							if (tp1->rec.data.chunk_was_revoked == 0) {
2950 								tp1->whoTo->new_pseudo_cumack = 1;
2951 							}
2952 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2953 						}
2954 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2955 							sctp_log_sack(*biggest_newly_acked_tsn,
2956 							    last_tsn,
2957 							    tp1->rec.data.TSN_seq,
2958 							    frag_strt,
2959 							    frag_end,
2960 							    SCTP_LOG_TSN_ACKED);
2961 						}
2962 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2963 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2964 							    tp1->whoTo->flight_size,
2965 							    tp1->book_size,
2966 							    (uintptr_t) tp1->whoTo,
2967 							    tp1->rec.data.TSN_seq);
2968 						}
2969 						sctp_flight_size_decrease(tp1);
2970 						sctp_total_flight_decrease(stcb, tp1);
2971 
2972 						tp1->whoTo->net_ack += tp1->send_size;
2973 						if (tp1->snd_count < 2) {
2974 							/*-
2975 							 * True non-retransmited chunk
2976 							 */
2977 							tp1->whoTo->net_ack2 += tp1->send_size;
2978 
2979 							/*-
2980 							 * update RTO too ?
2981 							 */
2982 							if (tp1->do_rtt) {
2983 								tp1->whoTo->RTO =
2984 								    sctp_calculate_rto(stcb,
2985 								    &stcb->asoc,
2986 								    tp1->whoTo,
2987 								    &tp1->sent_rcv_time,
2988 								    sctp_align_safe_nocopy);
2989 								tp1->do_rtt = 0;
2990 							}
2991 						}
2992 					}
2993 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2994 						(*ecn_seg_sums) += tp1->rec.data.ect_nonce;
2995 						(*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
2996 						if (compare_with_wrap(tp1->rec.data.TSN_seq,
2997 						    stcb->asoc.this_sack_highest_gap,
2998 						    MAX_TSN)) {
2999 							stcb->asoc.this_sack_highest_gap =
3000 							    tp1->rec.data.TSN_seq;
3001 						}
3002 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3003 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3004 #ifdef SCTP_AUDITING_ENABLED
3005 							sctp_audit_log(0xB2,
3006 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3007 #endif
3008 						}
3009 					}
3010 					/*-
3011 					 * All chunks NOT UNSENT fall through here and are marked
3012 					 * (leave PR-SCTP ones that are to skip alone though)
3013 					 */
3014 					if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
3015 						tp1->sent = SCTP_DATAGRAM_MARKED;
3016 
3017 					if (tp1->rec.data.chunk_was_revoked) {
3018 						/* deflate the cwnd */
3019 						tp1->whoTo->cwnd -= tp1->book_size;
3020 						tp1->rec.data.chunk_was_revoked = 0;
3021 					}
3022 					/* NR Sack code here */
3023 					if (nr_sacking) {
3024 						if (tp1->data) {
3025 							/*
3026 							 * sa_ignore
3027 							 * NO_NULL_CHK
3028 							 */
3029 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3030 							sctp_m_freem(tp1->data);
3031 							tp1->data = NULL;
3032 						}
3033 						wake_him++;
3034 					}
3035 				}
3036 				break;
3037 			}	/* if (tp1->TSN_seq == theTSN) */
3038 			if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
3039 			    MAX_TSN))
3040 				break;
3041 
3042 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3043 			if ((tp1 == NULL) && (circled == 0)) {
3044 				circled++;
3045 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3046 			}
3047 		}		/* end while (tp1) */
3048 		if (tp1 == NULL) {
3049 			circled = 0;
3050 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3051 		}
3052 		/* In case the fragments were not in order we must reset */
3053 	}			/* end for (j = fragStart */
3054 	*p_tp1 = tp1;
3055 	return (wake_him);	/* Return value only used for nr-sack */
3056 }
3057 
3058 
3059 static int
3060 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3061     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3062     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3063     int num_seg, int num_nr_seg, int *ecn_seg_sums)
3064 {
3065 	struct sctp_gap_ack_block *frag, block;
3066 	struct sctp_tmit_chunk *tp1;
3067 	int i;
3068 	int num_frs = 0;
3069 	int chunk_freed;
3070 	int non_revocable;
3071 	uint16_t frag_strt, frag_end;
3072 	uint32_t last_frag_high;
3073 
3074 	tp1 = NULL;
3075 	last_frag_high = 0;
3076 	chunk_freed = 0;
3077 
3078 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3079 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3080 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3081 		*offset += sizeof(block);
3082 		if (frag == NULL) {
3083 			return (chunk_freed);
3084 		}
3085 		frag_strt = ntohs(frag->start);
3086 		frag_end = ntohs(frag->end);
3087 		/* some sanity checks on the fragment offsets */
3088 		if (frag_strt > frag_end) {
3089 			/* this one is malformed, skip */
3090 			continue;
3091 		}
3092 		if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
3093 		    MAX_TSN))
3094 			*biggest_tsn_acked = frag_end + last_tsn;
3095 
3096 		/* mark acked dgs and find out the highestTSN being acked */
3097 		if (tp1 == NULL) {
3098 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3099 			/* save the locations of the last frags */
3100 			last_frag_high = frag_end + last_tsn;
3101 		} else {
3102 			/*
3103 			 * now lets see if we need to reset the queue due to
3104 			 * a out-of-order SACK fragment
3105 			 */
3106 			if (compare_with_wrap(frag_strt + last_tsn,
3107 			    last_frag_high, MAX_TSN)) {
3108 				/*
3109 				 * if the new frag starts after the last TSN
3110 				 * frag covered, we are ok and this one is
3111 				 * beyond the last one
3112 				 */
3113 				;
3114 			} else {
3115 				/*
3116 				 * ok, they have reset us, so we need to
3117 				 * reset the queue this will cause extra
3118 				 * hunting but hey, they chose the
3119 				 * performance hit when they failed to order
3120 				 * their gaps
3121 				 */
3122 				tp1 = TAILQ_FIRST(&asoc->sent_queue);
3123 			}
3124 			last_frag_high = frag_end + last_tsn;
3125 		}
3126 		if (i < num_seg) {
3127 			non_revocable = 0;
3128 		} else {
3129 			non_revocable = 1;
3130 		}
3131 		if (i == num_seg) {
3132 			tp1 = NULL;
3133 		}
3134 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3135 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3136 		    this_sack_lowest_newack, ecn_seg_sums)) {
3137 			chunk_freed = 1;
3138 		}
3139 	}
3140 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3141 		if (num_frs)
3142 			sctp_log_fr(*biggest_tsn_acked,
3143 			    *biggest_newly_acked_tsn,
3144 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3145 	}
3146 	return (chunk_freed);
3147 }
3148 
3149 static void
3150 sctp_check_for_revoked(struct sctp_tcb *stcb,
3151     struct sctp_association *asoc, uint32_t cumack,
3152     uint32_t biggest_tsn_acked)
3153 {
3154 	struct sctp_tmit_chunk *tp1;
3155 	int tot_revoked = 0;
3156 
3157 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3158 	while (tp1) {
3159 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3160 		    MAX_TSN)) {
3161 			/*
3162 			 * ok this guy is either ACK or MARKED. If it is
3163 			 * ACKED it has been previously acked but not this
3164 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3165 			 * again.
3166 			 */
3167 			if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3168 			    MAX_TSN))
3169 				break;
3170 
3171 
3172 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3173 				/* it has been revoked */
3174 				tp1->sent = SCTP_DATAGRAM_SENT;
3175 				tp1->rec.data.chunk_was_revoked = 1;
3176 				/*
3177 				 * We must add this stuff back in to assure
3178 				 * timers and such get started.
3179 				 */
3180 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3181 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3182 					    tp1->whoTo->flight_size,
3183 					    tp1->book_size,
3184 					    (uintptr_t) tp1->whoTo,
3185 					    tp1->rec.data.TSN_seq);
3186 				}
3187 				sctp_flight_size_increase(tp1);
3188 				sctp_total_flight_increase(stcb, tp1);
3189 				/*
3190 				 * We inflate the cwnd to compensate for our
3191 				 * artificial inflation of the flight_size.
3192 				 */
3193 				tp1->whoTo->cwnd += tp1->book_size;
3194 				tot_revoked++;
3195 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3196 					sctp_log_sack(asoc->last_acked_seq,
3197 					    cumack,
3198 					    tp1->rec.data.TSN_seq,
3199 					    0,
3200 					    0,
3201 					    SCTP_LOG_TSN_REVOKED);
3202 				}
3203 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3204 				/* it has been re-acked in this SACK */
3205 				tp1->sent = SCTP_DATAGRAM_ACKED;
3206 			}
3207 		}
3208 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3209 			break;
3210 		tp1 = TAILQ_NEXT(tp1, sctp_next);
3211 	}
3212 	if (tot_revoked > 0) {
3213 		/*
3214 		 * Setup the ecn nonce re-sync point. We do this since once
3215 		 * data is revoked we begin to retransmit things, which do
3216 		 * NOT have the ECN bits set. This means we are now out of
3217 		 * sync and must wait until we get back in sync with the
3218 		 * peer to check ECN bits.
3219 		 */
3220 		tp1 = TAILQ_FIRST(&asoc->send_queue);
3221 		if (tp1 == NULL) {
3222 			asoc->nonce_resync_tsn = asoc->sending_seq;
3223 		} else {
3224 			asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3225 		}
3226 		asoc->nonce_wait_for_ecne = 0;
3227 		asoc->nonce_sum_check = 0;
3228 	}
3229 }
3230 
3231 
3232 static void
3233 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3234     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3235 {
3236 	struct sctp_tmit_chunk *tp1;
3237 	int strike_flag = 0;
3238 	struct timeval now;
3239 	int tot_retrans = 0;
3240 	uint32_t sending_seq;
3241 	struct sctp_nets *net;
3242 	int num_dests_sacked = 0;
3243 
3244 	/*
3245 	 * select the sending_seq, this is either the next thing ready to be
3246 	 * sent but not transmitted, OR, the next seq we assign.
3247 	 */
3248 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3249 	if (tp1 == NULL) {
3250 		sending_seq = asoc->sending_seq;
3251 	} else {
3252 		sending_seq = tp1->rec.data.TSN_seq;
3253 	}
3254 
3255 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3256 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3257 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3258 			if (net->saw_newack)
3259 				num_dests_sacked++;
3260 		}
3261 	}
3262 	if (stcb->asoc.peer_supports_prsctp) {
3263 		(void)SCTP_GETTIME_TIMEVAL(&now);
3264 	}
3265 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3266 	while (tp1) {
3267 		strike_flag = 0;
3268 		if (tp1->no_fr_allowed) {
3269 			/* this one had a timeout or something */
3270 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3271 			continue;
3272 		}
3273 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3274 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3275 				sctp_log_fr(biggest_tsn_newly_acked,
3276 				    tp1->rec.data.TSN_seq,
3277 				    tp1->sent,
3278 				    SCTP_FR_LOG_CHECK_STRIKE);
3279 		}
3280 		if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3281 		    MAX_TSN) ||
3282 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3283 			/* done */
3284 			break;
3285 		}
3286 		if (stcb->asoc.peer_supports_prsctp) {
3287 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3288 				/* Is it expired? */
3289 				if (
3290 				/*
3291 				 * TODO sctp_constants.h needs alternative
3292 				 * time macros when _KERNEL is undefined.
3293 				 */
3294 				    (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3295 				    ) {
3296 					/* Yes so drop it */
3297 					if (tp1->data != NULL) {
3298 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3299 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3300 						    SCTP_SO_NOT_LOCKED);
3301 					}
3302 					tp1 = TAILQ_NEXT(tp1, sctp_next);
3303 					continue;
3304 				}
3305 			}
3306 		}
3307 		if (compare_with_wrap(tp1->rec.data.TSN_seq,
3308 		    asoc->this_sack_highest_gap, MAX_TSN)) {
3309 			/* we are beyond the tsn in the sack  */
3310 			break;
3311 		}
3312 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3313 			/* either a RESEND, ACKED, or MARKED */
3314 			/* skip */
3315 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3316 			continue;
3317 		}
3318 		/*
3319 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3320 		 */
3321 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3322 			/*
3323 			 * No new acks were receieved for data sent to this
3324 			 * dest. Therefore, according to the SFR algo for
3325 			 * CMT, no data sent to this dest can be marked for
3326 			 * FR using this SACK.
3327 			 */
3328 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3329 			continue;
3330 		} else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq,
3331 		    tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3332 			/*
3333 			 * CMT: New acks were receieved for data sent to
3334 			 * this dest. But no new acks were seen for data
3335 			 * sent after tp1. Therefore, according to the SFR
3336 			 * algo for CMT, tp1 cannot be marked for FR using
3337 			 * this SACK. This step covers part of the DAC algo
3338 			 * and the HTNA algo as well.
3339 			 */
3340 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3341 			continue;
3342 		}
3343 		/*
3344 		 * Here we check to see if we were have already done a FR
3345 		 * and if so we see if the biggest TSN we saw in the sack is
3346 		 * smaller than the recovery point. If so we don't strike
3347 		 * the tsn... otherwise we CAN strike the TSN.
3348 		 */
3349 		/*
3350 		 * @@@ JRI: Check for CMT if (accum_moved &&
3351 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3352 		 * 0)) {
3353 		 */
3354 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3355 			/*
3356 			 * Strike the TSN if in fast-recovery and cum-ack
3357 			 * moved.
3358 			 */
3359 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3360 				sctp_log_fr(biggest_tsn_newly_acked,
3361 				    tp1->rec.data.TSN_seq,
3362 				    tp1->sent,
3363 				    SCTP_FR_LOG_STRIKE_CHUNK);
3364 			}
3365 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3366 				tp1->sent++;
3367 			}
3368 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3369 				/*
3370 				 * CMT DAC algorithm: If SACK flag is set to
3371 				 * 0, then lowest_newack test will not pass
3372 				 * because it would have been set to the
3373 				 * cumack earlier. If not already to be
3374 				 * rtx'd, If not a mixed sack and if tp1 is
3375 				 * not between two sacked TSNs, then mark by
3376 				 * one more. NOTE that we are marking by one
3377 				 * additional time since the SACK DAC flag
3378 				 * indicates that two packets have been
3379 				 * received after this missing TSN.
3380 				 */
3381 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3382 				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3383 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3384 						sctp_log_fr(16 + num_dests_sacked,
3385 						    tp1->rec.data.TSN_seq,
3386 						    tp1->sent,
3387 						    SCTP_FR_LOG_STRIKE_CHUNK);
3388 					}
3389 					tp1->sent++;
3390 				}
3391 			}
3392 		} else if ((tp1->rec.data.doing_fast_retransmit) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
3393 			/*
3394 			 * For those that have done a FR we must take
3395 			 * special consideration if we strike. I.e the
3396 			 * biggest_newly_acked must be higher than the
3397 			 * sending_seq at the time we did the FR.
3398 			 */
3399 			if (
3400 #ifdef SCTP_FR_TO_ALTERNATE
3401 			/*
3402 			 * If FR's go to new networks, then we must only do
3403 			 * this for singly homed asoc's. However if the FR's
3404 			 * go to the same network (Armando's work) then its
3405 			 * ok to FR multiple times.
3406 			 */
3407 			    (asoc->numnets < 2)
3408 #else
3409 			    (1)
3410 #endif
3411 			    ) {
3412 
3413 				if ((compare_with_wrap(biggest_tsn_newly_acked,
3414 				    tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3415 				    (biggest_tsn_newly_acked ==
3416 				    tp1->rec.data.fast_retran_tsn)) {
3417 					/*
3418 					 * Strike the TSN, since this ack is
3419 					 * beyond where things were when we
3420 					 * did a FR.
3421 					 */
3422 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3423 						sctp_log_fr(biggest_tsn_newly_acked,
3424 						    tp1->rec.data.TSN_seq,
3425 						    tp1->sent,
3426 						    SCTP_FR_LOG_STRIKE_CHUNK);
3427 					}
3428 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3429 						tp1->sent++;
3430 					}
3431 					strike_flag = 1;
3432 					if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3433 						/*
3434 						 * CMT DAC algorithm: If
3435 						 * SACK flag is set to 0,
3436 						 * then lowest_newack test
3437 						 * will not pass because it
3438 						 * would have been set to
3439 						 * the cumack earlier. If
3440 						 * not already to be rtx'd,
3441 						 * If not a mixed sack and
3442 						 * if tp1 is not between two
3443 						 * sacked TSNs, then mark by
3444 						 * one more. NOTE that we
3445 						 * are marking by one
3446 						 * additional time since the
3447 						 * SACK DAC flag indicates
3448 						 * that two packets have
3449 						 * been received after this
3450 						 * missing TSN.
3451 						 */
3452 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3453 						    (num_dests_sacked == 1) &&
3454 						    compare_with_wrap(this_sack_lowest_newack,
3455 						    tp1->rec.data.TSN_seq, MAX_TSN)) {
3456 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3457 								sctp_log_fr(32 + num_dests_sacked,
3458 								    tp1->rec.data.TSN_seq,
3459 								    tp1->sent,
3460 								    SCTP_FR_LOG_STRIKE_CHUNK);
3461 							}
3462 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3463 								tp1->sent++;
3464 							}
3465 						}
3466 					}
3467 				}
3468 			}
3469 			/*
3470 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3471 			 * algo covers HTNA.
3472 			 */
3473 		} else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3474 		    biggest_tsn_newly_acked, MAX_TSN)) {
3475 			/*
3476 			 * We don't strike these: This is the  HTNA
3477 			 * algorithm i.e. we don't strike If our TSN is
3478 			 * larger than the Highest TSN Newly Acked.
3479 			 */
3480 			;
3481 		} else {
3482 			/* Strike the TSN */
3483 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3484 				sctp_log_fr(biggest_tsn_newly_acked,
3485 				    tp1->rec.data.TSN_seq,
3486 				    tp1->sent,
3487 				    SCTP_FR_LOG_STRIKE_CHUNK);
3488 			}
3489 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3490 				tp1->sent++;
3491 			}
3492 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3493 				/*
3494 				 * CMT DAC algorithm: If SACK flag is set to
3495 				 * 0, then lowest_newack test will not pass
3496 				 * because it would have been set to the
3497 				 * cumack earlier. If not already to be
3498 				 * rtx'd, If not a mixed sack and if tp1 is
3499 				 * not between two sacked TSNs, then mark by
3500 				 * one more. NOTE that we are marking by one
3501 				 * additional time since the SACK DAC flag
3502 				 * indicates that two packets have been
3503 				 * received after this missing TSN.
3504 				 */
3505 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3506 				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3507 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3508 						sctp_log_fr(48 + num_dests_sacked,
3509 						    tp1->rec.data.TSN_seq,
3510 						    tp1->sent,
3511 						    SCTP_FR_LOG_STRIKE_CHUNK);
3512 					}
3513 					tp1->sent++;
3514 				}
3515 			}
3516 		}
3517 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3518 			struct sctp_nets *alt;
3519 
3520 			/* fix counts and things */
3521 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3522 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3523 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3524 				    tp1->book_size,
3525 				    (uintptr_t) tp1->whoTo,
3526 				    tp1->rec.data.TSN_seq);
3527 			}
3528 			if (tp1->whoTo) {
3529 				tp1->whoTo->net_ack++;
3530 				sctp_flight_size_decrease(tp1);
3531 			}
3532 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3533 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3534 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3535 			}
3536 			/* add back to the rwnd */
3537 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3538 
3539 			/* remove from the total flight */
3540 			sctp_total_flight_decrease(stcb, tp1);
3541 
3542 			if ((stcb->asoc.peer_supports_prsctp) &&
3543 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3544 				/*
3545 				 * Has it been retransmitted tv_sec times? -
3546 				 * we store the retran count there.
3547 				 */
3548 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3549 					/* Yes, so drop it */
3550 					if (tp1->data != NULL) {
3551 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3552 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3553 						    SCTP_SO_NOT_LOCKED);
3554 					}
3555 					/* Make sure to flag we had a FR */
3556 					tp1->whoTo->net_ack++;
3557 					tp1 = TAILQ_NEXT(tp1, sctp_next);
3558 					continue;
3559 				}
3560 			}
3561 			/* printf("OK, we are now ready to FR this guy\n"); */
3562 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3563 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3564 				    0, SCTP_FR_MARKED);
3565 			}
3566 			if (strike_flag) {
3567 				/* This is a subsequent FR */
3568 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3569 			}
3570 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3571 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
3572 				/*
3573 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3574 				 * If CMT is being used, then pick dest with
3575 				 * largest ssthresh for any retransmission.
3576 				 */
3577 				tp1->no_fr_allowed = 1;
3578 				alt = tp1->whoTo;
3579 				/* sa_ignore NO_NULL_CHK */
3580 				if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
3581 					/*
3582 					 * JRS 5/18/07 - If CMT PF is on,
3583 					 * use the PF version of
3584 					 * find_alt_net()
3585 					 */
3586 					alt = sctp_find_alternate_net(stcb, alt, 2);
3587 				} else {
3588 					/*
3589 					 * JRS 5/18/07 - If only CMT is on,
3590 					 * use the CMT version of
3591 					 * find_alt_net()
3592 					 */
3593 					/* sa_ignore NO_NULL_CHK */
3594 					alt = sctp_find_alternate_net(stcb, alt, 1);
3595 				}
3596 				if (alt == NULL) {
3597 					alt = tp1->whoTo;
3598 				}
3599 				/*
3600 				 * CUCv2: If a different dest is picked for
3601 				 * the retransmission, then new
3602 				 * (rtx-)pseudo_cumack needs to be tracked
3603 				 * for orig dest. Let CUCv2 track new (rtx-)
3604 				 * pseudo-cumack always.
3605 				 */
3606 				if (tp1->whoTo) {
3607 					tp1->whoTo->find_pseudo_cumack = 1;
3608 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3609 				}
3610 			} else {/* CMT is OFF */
3611 
3612 #ifdef SCTP_FR_TO_ALTERNATE
3613 				/* Can we find an alternate? */
3614 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3615 #else
3616 				/*
3617 				 * default behavior is to NOT retransmit
3618 				 * FR's to an alternate. Armando Caro's
3619 				 * paper details why.
3620 				 */
3621 				alt = tp1->whoTo;
3622 #endif
3623 			}
3624 
3625 			tp1->rec.data.doing_fast_retransmit = 1;
3626 			tot_retrans++;
3627 			/* mark the sending seq for possible subsequent FR's */
3628 			/*
3629 			 * printf("Marking TSN for FR new value %x\n",
3630 			 * (uint32_t)tpi->rec.data.TSN_seq);
3631 			 */
3632 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3633 				/*
3634 				 * If the queue of send is empty then its
3635 				 * the next sequence number that will be
3636 				 * assigned so we subtract one from this to
3637 				 * get the one we last sent.
3638 				 */
3639 				tp1->rec.data.fast_retran_tsn = sending_seq;
3640 			} else {
3641 				/*
3642 				 * If there are chunks on the send queue
3643 				 * (unsent data that has made it from the
3644 				 * stream queues but not out the door, we
3645 				 * take the first one (which will have the
3646 				 * lowest TSN) and subtract one to get the
3647 				 * one we last sent.
3648 				 */
3649 				struct sctp_tmit_chunk *ttt;
3650 
3651 				ttt = TAILQ_FIRST(&asoc->send_queue);
3652 				tp1->rec.data.fast_retran_tsn =
3653 				    ttt->rec.data.TSN_seq;
3654 			}
3655 
3656 			if (tp1->do_rtt) {
3657 				/*
3658 				 * this guy had a RTO calculation pending on
3659 				 * it, cancel it
3660 				 */
3661 				tp1->do_rtt = 0;
3662 			}
3663 			if (alt != tp1->whoTo) {
3664 				/* yes, there is an alternate. */
3665 				sctp_free_remote_addr(tp1->whoTo);
3666 				/* sa_ignore FREED_MEMORY */
3667 				tp1->whoTo = alt;
3668 				atomic_add_int(&alt->ref_count, 1);
3669 			}
3670 		}
3671 		tp1 = TAILQ_NEXT(tp1, sctp_next);
3672 	}			/* while (tp1) */
3673 
3674 	if (tot_retrans > 0) {
3675 		/*
3676 		 * Setup the ecn nonce re-sync point. We do this since once
3677 		 * we go to FR something we introduce a Karn's rule scenario
3678 		 * and won't know the totals for the ECN bits.
3679 		 */
3680 		asoc->nonce_resync_tsn = sending_seq;
3681 		asoc->nonce_wait_for_ecne = 0;
3682 		asoc->nonce_sum_check = 0;
3683 	}
3684 }
3685 
3686 struct sctp_tmit_chunk *
3687 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3688     struct sctp_association *asoc)
3689 {
3690 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3691 	struct timeval now;
3692 	int now_filled = 0;
3693 
3694 	if (asoc->peer_supports_prsctp == 0) {
3695 		return (NULL);
3696 	}
3697 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3698 	while (tp1) {
3699 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3700 		    tp1->sent != SCTP_DATAGRAM_RESEND) {
3701 			/* no chance to advance, out of here */
3702 			break;
3703 		}
3704 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3705 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3706 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3707 				    asoc->advanced_peer_ack_point,
3708 				    tp1->rec.data.TSN_seq, 0, 0);
3709 			}
3710 		}
3711 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3712 			/*
3713 			 * We can't fwd-tsn past any that are reliable aka
3714 			 * retransmitted until the asoc fails.
3715 			 */
3716 			break;
3717 		}
3718 		if (!now_filled) {
3719 			(void)SCTP_GETTIME_TIMEVAL(&now);
3720 			now_filled = 1;
3721 		}
3722 		tp2 = TAILQ_NEXT(tp1, sctp_next);
3723 		/*
3724 		 * now we got a chunk which is marked for another
3725 		 * retransmission to a PR-stream but has run out its chances
3726 		 * already maybe OR has been marked to skip now. Can we skip
3727 		 * it if its a resend?
3728 		 */
3729 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3730 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3731 			/*
3732 			 * Now is this one marked for resend and its time is
3733 			 * now up?
3734 			 */
3735 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3736 				/* Yes so drop it */
3737 				if (tp1->data) {
3738 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3739 					    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3740 					    SCTP_SO_NOT_LOCKED);
3741 				}
3742 			} else {
3743 				/*
3744 				 * No, we are done when hit one for resend
3745 				 * whos time as not expired.
3746 				 */
3747 				break;
3748 			}
3749 		}
3750 		/*
3751 		 * Ok now if this chunk is marked to drop it we can clean up
3752 		 * the chunk, advance our peer ack point and we can check
3753 		 * the next chunk.
3754 		 */
3755 		if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3756 			/* advance PeerAckPoint goes forward */
3757 			if (compare_with_wrap(tp1->rec.data.TSN_seq,
3758 			    asoc->advanced_peer_ack_point,
3759 			    MAX_TSN)) {
3760 
3761 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3762 				a_adv = tp1;
3763 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3764 				/* No update but we do save the chk */
3765 				a_adv = tp1;
3766 			}
3767 		} else {
3768 			/*
3769 			 * If it is still in RESEND we can advance no
3770 			 * further
3771 			 */
3772 			break;
3773 		}
3774 		/*
3775 		 * If we hit here we just dumped tp1, move to next tsn on
3776 		 * sent queue.
3777 		 */
3778 		tp1 = tp2;
3779 	}
3780 	return (a_adv);
3781 }
3782 
3783 static int
3784 sctp_fs_audit(struct sctp_association *asoc)
3785 {
3786 	struct sctp_tmit_chunk *chk;
3787 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3788 	int entry_flight, entry_cnt, ret;
3789 
3790 	entry_flight = asoc->total_flight;
3791 	entry_cnt = asoc->total_flight_count;
3792 	ret = 0;
3793 
3794 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3795 		return (0);
3796 
3797 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3798 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3799 			printf("Chk TSN:%u size:%d inflight cnt:%d\n",
3800 			    chk->rec.data.TSN_seq,
3801 			    chk->send_size,
3802 			    chk->snd_count
3803 			    );
3804 			inflight++;
3805 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3806 			resend++;
3807 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3808 			inbetween++;
3809 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3810 			above++;
3811 		} else {
3812 			acked++;
3813 		}
3814 	}
3815 
3816 	if ((inflight > 0) || (inbetween > 0)) {
3817 #ifdef INVARIANTS
3818 		panic("Flight size-express incorrect? \n");
3819 #else
3820 		printf("asoc->total_flight:%d cnt:%d\n",
3821 		    entry_flight, entry_cnt);
3822 
3823 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3824 		    inflight, inbetween, resend, above, acked);
3825 		ret = 1;
3826 #endif
3827 	}
3828 	return (ret);
3829 }
3830 
3831 
3832 static void
3833 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3834     struct sctp_association *asoc,
3835     struct sctp_nets *net,
3836     struct sctp_tmit_chunk *tp1)
3837 {
3838 	tp1->window_probe = 0;
3839 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3840 		/* TSN's skipped we do NOT move back. */
3841 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3842 		    tp1->whoTo->flight_size,
3843 		    tp1->book_size,
3844 		    (uintptr_t) tp1->whoTo,
3845 		    tp1->rec.data.TSN_seq);
3846 		return;
3847 	}
3848 	/* First setup this by shrinking flight */
3849 	sctp_flight_size_decrease(tp1);
3850 	sctp_total_flight_decrease(stcb, tp1);
3851 	/* Now mark for resend */
3852 	tp1->sent = SCTP_DATAGRAM_RESEND;
3853 	asoc->sent_queue_retran_cnt++;
3854 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3855 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3856 		    tp1->whoTo->flight_size,
3857 		    tp1->book_size,
3858 		    (uintptr_t) tp1->whoTo,
3859 		    tp1->rec.data.TSN_seq);
3860 	}
3861 }
3862 
3863 void
3864 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3865     uint32_t rwnd, int nonce_sum_flag, int *abort_now)
3866 {
3867 	struct sctp_nets *net;
3868 	struct sctp_association *asoc;
3869 	struct sctp_tmit_chunk *tp1, *tp2;
3870 	uint32_t old_rwnd;
3871 	int win_probe_recovery = 0;
3872 	int win_probe_recovered = 0;
3873 	int j, done_once = 0;
3874 
3875 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3876 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3877 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3878 	}
3879 	SCTP_TCB_LOCK_ASSERT(stcb);
3880 #ifdef SCTP_ASOCLOG_OF_TSNS
3881 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3882 	stcb->asoc.cumack_log_at++;
3883 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3884 		stcb->asoc.cumack_log_at = 0;
3885 	}
3886 #endif
3887 	asoc = &stcb->asoc;
3888 	old_rwnd = asoc->peers_rwnd;
3889 	if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
3890 		/* old ack */
3891 		return;
3892 	} else if (asoc->last_acked_seq == cumack) {
3893 		/* Window update sack */
3894 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3895 		    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3896 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3897 			/* SWS sender side engages */
3898 			asoc->peers_rwnd = 0;
3899 		}
3900 		if (asoc->peers_rwnd > old_rwnd) {
3901 			goto again;
3902 		}
3903 		return;
3904 	}
3905 	/* First setup for CC stuff */
3906 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3907 		net->prev_cwnd = net->cwnd;
3908 		net->net_ack = 0;
3909 		net->net_ack2 = 0;
3910 
3911 		/*
3912 		 * CMT: Reset CUC and Fast recovery algo variables before
3913 		 * SACK processing
3914 		 */
3915 		net->new_pseudo_cumack = 0;
3916 		net->will_exit_fast_recovery = 0;
3917 	}
3918 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3919 		uint32_t send_s;
3920 
3921 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3922 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3923 			    sctpchunk_listhead);
3924 			send_s = tp1->rec.data.TSN_seq + 1;
3925 		} else {
3926 			send_s = asoc->sending_seq;
3927 		}
3928 		if ((cumack == send_s) ||
3929 		    compare_with_wrap(cumack, send_s, MAX_TSN)) {
3930 #ifndef INVARIANTS
3931 			struct mbuf *oper;
3932 
3933 #endif
3934 #ifdef INVARIANTS
3935 			panic("Impossible sack 1");
3936 #else
3937 
3938 			*abort_now = 1;
3939 			/* XXX */
3940 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3941 			    0, M_DONTWAIT, 1, MT_DATA);
3942 			if (oper) {
3943 				struct sctp_paramhdr *ph;
3944 				uint32_t *ippp;
3945 
3946 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3947 				    sizeof(uint32_t);
3948 				ph = mtod(oper, struct sctp_paramhdr *);
3949 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3950 				ph->param_length = htons(SCTP_BUF_LEN(oper));
3951 				ippp = (uint32_t *) (ph + 1);
3952 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3953 			}
3954 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3955 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
3956 			return;
3957 #endif
3958 		}
3959 	}
3960 	asoc->this_sack_highest_gap = cumack;
3961 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3962 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3963 		    stcb->asoc.overall_error_count,
3964 		    0,
3965 		    SCTP_FROM_SCTP_INDATA,
3966 		    __LINE__);
3967 	}
3968 	stcb->asoc.overall_error_count = 0;
3969 	if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
3970 		/* process the new consecutive TSN first */
3971 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
3972 		while (tp1) {
3973 			tp2 = TAILQ_NEXT(tp1, sctp_next);
3974 			if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
3975 			    MAX_TSN) ||
3976 			    cumack == tp1->rec.data.TSN_seq) {
3977 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3978 					printf("Warning, an unsent is now acked?\n");
3979 				}
3980 				/*
3981 				 * ECN Nonce: Add the nonce to the sender's
3982 				 * nonce sum
3983 				 */
3984 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
3985 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3986 					/*
3987 					 * If it is less than ACKED, it is
3988 					 * now no-longer in flight. Higher
3989 					 * values may occur during marking
3990 					 */
3991 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3992 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3993 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3994 							    tp1->whoTo->flight_size,
3995 							    tp1->book_size,
3996 							    (uintptr_t) tp1->whoTo,
3997 							    tp1->rec.data.TSN_seq);
3998 						}
3999 						sctp_flight_size_decrease(tp1);
4000 						/* sa_ignore NO_NULL_CHK */
4001 						sctp_total_flight_decrease(stcb, tp1);
4002 					}
4003 					tp1->whoTo->net_ack += tp1->send_size;
4004 					if (tp1->snd_count < 2) {
4005 						/*
4006 						 * True non-retransmited
4007 						 * chunk
4008 						 */
4009 						tp1->whoTo->net_ack2 +=
4010 						    tp1->send_size;
4011 
4012 						/* update RTO too? */
4013 						if (tp1->do_rtt) {
4014 							tp1->whoTo->RTO =
4015 							/*
4016 							 * sa_ignore
4017 							 * NO_NULL_CHK
4018 							 */
4019 							    sctp_calculate_rto(stcb,
4020 							    asoc, tp1->whoTo,
4021 							    &tp1->sent_rcv_time,
4022 							    sctp_align_safe_nocopy);
4023 							tp1->do_rtt = 0;
4024 						}
4025 					}
4026 					/*
4027 					 * CMT: CUCv2 algorithm. From the
4028 					 * cumack'd TSNs, for each TSN being
4029 					 * acked for the first time, set the
4030 					 * following variables for the
4031 					 * corresp destination.
4032 					 * new_pseudo_cumack will trigger a
4033 					 * cwnd update.
4034 					 * find_(rtx_)pseudo_cumack will
4035 					 * trigger search for the next
4036 					 * expected (rtx-)pseudo-cumack.
4037 					 */
4038 					tp1->whoTo->new_pseudo_cumack = 1;
4039 					tp1->whoTo->find_pseudo_cumack = 1;
4040 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4041 
4042 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4043 						/* sa_ignore NO_NULL_CHK */
4044 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4045 					}
4046 				}
4047 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4048 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4049 				}
4050 				if (tp1->rec.data.chunk_was_revoked) {
4051 					/* deflate the cwnd */
4052 					tp1->whoTo->cwnd -= tp1->book_size;
4053 					tp1->rec.data.chunk_was_revoked = 0;
4054 				}
4055 				tp1->sent = SCTP_DATAGRAM_ACKED;
4056 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4057 				if (tp1->data) {
4058 					/* sa_ignore NO_NULL_CHK */
4059 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4060 					sctp_m_freem(tp1->data);
4061 				}
4062 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4063 					sctp_log_sack(asoc->last_acked_seq,
4064 					    cumack,
4065 					    tp1->rec.data.TSN_seq,
4066 					    0,
4067 					    0,
4068 					    SCTP_LOG_FREE_SENT);
4069 				}
4070 				tp1->data = NULL;
4071 				asoc->sent_queue_cnt--;
4072 				sctp_free_a_chunk(stcb, tp1);
4073 				tp1 = tp2;
4074 			} else {
4075 				break;
4076 			}
4077 		}
4078 
4079 	}
4080 	/* sa_ignore NO_NULL_CHK */
4081 	if (stcb->sctp_socket) {
4082 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4083 		struct socket *so;
4084 
4085 #endif
4086 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4087 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4088 			/* sa_ignore NO_NULL_CHK */
4089 			sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4090 		}
4091 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4092 		so = SCTP_INP_SO(stcb->sctp_ep);
4093 		atomic_add_int(&stcb->asoc.refcnt, 1);
4094 		SCTP_TCB_UNLOCK(stcb);
4095 		SCTP_SOCKET_LOCK(so, 1);
4096 		SCTP_TCB_LOCK(stcb);
4097 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4098 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4099 			/* assoc was freed while we were unlocked */
4100 			SCTP_SOCKET_UNLOCK(so, 1);
4101 			return;
4102 		}
4103 #endif
4104 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4105 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4106 		SCTP_SOCKET_UNLOCK(so, 1);
4107 #endif
4108 	} else {
4109 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4110 			sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4111 		}
4112 	}
4113 
4114 	/* JRS - Use the congestion control given in the CC module */
4115 	if (asoc->last_acked_seq != cumack)
4116 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4117 
4118 	asoc->last_acked_seq = cumack;
4119 
4120 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4121 		/* nothing left in-flight */
4122 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4123 			net->flight_size = 0;
4124 			net->partial_bytes_acked = 0;
4125 		}
4126 		asoc->total_flight = 0;
4127 		asoc->total_flight_count = 0;
4128 	}
4129 	/* ECN Nonce updates */
4130 	if (asoc->ecn_nonce_allowed) {
4131 		if (asoc->nonce_sum_check) {
4132 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4133 				if (asoc->nonce_wait_for_ecne == 0) {
4134 					struct sctp_tmit_chunk *lchk;
4135 
4136 					lchk = TAILQ_FIRST(&asoc->send_queue);
4137 					asoc->nonce_wait_for_ecne = 1;
4138 					if (lchk) {
4139 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4140 					} else {
4141 						asoc->nonce_wait_tsn = asoc->sending_seq;
4142 					}
4143 				} else {
4144 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4145 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4146 						/*
4147 						 * Misbehaving peer. We need
4148 						 * to react to this guy
4149 						 */
4150 						asoc->ecn_allowed = 0;
4151 						asoc->ecn_nonce_allowed = 0;
4152 					}
4153 				}
4154 			}
4155 		} else {
4156 			/* See if Resynchronization Possible */
4157 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4158 				asoc->nonce_sum_check = 1;
4159 				/*
4160 				 * Now we must calculate what the base is.
4161 				 * We do this based on two things, we know
4162 				 * the total's for all the segments
4163 				 * gap-acked in the SACK (none). We also
4164 				 * know the SACK's nonce sum, its in
4165 				 * nonce_sum_flag. So we can build a truth
4166 				 * table to back-calculate the new value of
4167 				 * asoc->nonce_sum_expect_base:
4168 				 *
4169 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
4170 				 * 1                    0 1 0 1 1 1
4171 				 * 1 0
4172 				 */
4173 				asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4174 			}
4175 		}
4176 	}
4177 	/* RWND update */
4178 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4179 	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4180 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4181 		/* SWS sender side engages */
4182 		asoc->peers_rwnd = 0;
4183 	}
4184 	if (asoc->peers_rwnd > old_rwnd) {
4185 		win_probe_recovery = 1;
4186 	}
4187 	/* Now assure a timer where data is queued at */
4188 again:
4189 	j = 0;
4190 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4191 		int to_ticks;
4192 
4193 		if (win_probe_recovery && (net->window_probe)) {
4194 			win_probe_recovered = 1;
4195 			/*
4196 			 * Find first chunk that was used with window probe
4197 			 * and clear the sent
4198 			 */
4199 			/* sa_ignore FREED_MEMORY */
4200 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4201 				if (tp1->window_probe) {
4202 					/* move back to data send queue */
4203 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
4204 					break;
4205 				}
4206 			}
4207 		}
4208 		if (net->RTO == 0) {
4209 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4210 		} else {
4211 			to_ticks = MSEC_TO_TICKS(net->RTO);
4212 		}
4213 		if (net->flight_size) {
4214 			j++;
4215 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4216 			    sctp_timeout_handler, &net->rxt_timer);
4217 			if (net->window_probe) {
4218 				net->window_probe = 0;
4219 			}
4220 		} else {
4221 			if (net->window_probe) {
4222 				/*
4223 				 * In window probes we must assure a timer
4224 				 * is still running there
4225 				 */
4226 				net->window_probe = 0;
4227 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4228 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4229 					    sctp_timeout_handler, &net->rxt_timer);
4230 				}
4231 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4232 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4233 				    stcb, net,
4234 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4235 			}
4236 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4237 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4238 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4239 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4240 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4241 				}
4242 			}
4243 		}
4244 	}
4245 	if ((j == 0) &&
4246 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4247 	    (asoc->sent_queue_retran_cnt == 0) &&
4248 	    (win_probe_recovered == 0) &&
4249 	    (done_once == 0)) {
4250 		/*
4251 		 * huh, this should not happen unless all packets are
4252 		 * PR-SCTP and marked to skip of course.
4253 		 */
4254 		if (sctp_fs_audit(asoc)) {
4255 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4256 				net->flight_size = 0;
4257 			}
4258 			asoc->total_flight = 0;
4259 			asoc->total_flight_count = 0;
4260 			asoc->sent_queue_retran_cnt = 0;
4261 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4262 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4263 					sctp_flight_size_increase(tp1);
4264 					sctp_total_flight_increase(stcb, tp1);
4265 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4266 					asoc->sent_queue_retran_cnt++;
4267 				}
4268 			}
4269 		}
4270 		done_once = 1;
4271 		goto again;
4272 	}
4273 	/**********************************/
4274 	/* Now what about shutdown issues */
4275 	/**********************************/
4276 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4277 		/* nothing left on sendqueue.. consider done */
4278 		/* clean up */
4279 		if ((asoc->stream_queue_cnt == 1) &&
4280 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4281 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4282 		    (asoc->locked_on_sending)
4283 		    ) {
4284 			struct sctp_stream_queue_pending *sp;
4285 
4286 			/*
4287 			 * I may be in a state where we got all across.. but
4288 			 * cannot write more due to a shutdown... we abort
4289 			 * since the user did not indicate EOR in this case.
4290 			 * The sp will be cleaned during free of the asoc.
4291 			 */
4292 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4293 			    sctp_streamhead);
4294 			if ((sp) && (sp->length == 0)) {
4295 				/* Let cleanup code purge it */
4296 				if (sp->msg_is_complete) {
4297 					asoc->stream_queue_cnt--;
4298 				} else {
4299 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4300 					asoc->locked_on_sending = NULL;
4301 					asoc->stream_queue_cnt--;
4302 				}
4303 			}
4304 		}
4305 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4306 		    (asoc->stream_queue_cnt == 0)) {
4307 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4308 				/* Need to abort here */
4309 				struct mbuf *oper;
4310 
4311 		abort_out_now:
4312 				*abort_now = 1;
4313 				/* XXX */
4314 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4315 				    0, M_DONTWAIT, 1, MT_DATA);
4316 				if (oper) {
4317 					struct sctp_paramhdr *ph;
4318 					uint32_t *ippp;
4319 
4320 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4321 					    sizeof(uint32_t);
4322 					ph = mtod(oper, struct sctp_paramhdr *);
4323 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4324 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4325 					ippp = (uint32_t *) (ph + 1);
4326 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4327 				}
4328 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4329 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4330 			} else {
4331 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4332 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4333 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4334 				}
4335 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4336 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4337 				sctp_stop_timers_for_shutdown(stcb);
4338 				sctp_send_shutdown(stcb,
4339 				    stcb->asoc.primary_destination);
4340 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4341 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4342 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4343 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4344 			}
4345 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4346 		    (asoc->stream_queue_cnt == 0)) {
4347 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4348 				goto abort_out_now;
4349 			}
4350 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4351 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4352 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4353 			sctp_send_shutdown_ack(stcb,
4354 			    stcb->asoc.primary_destination);
4355 
4356 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4357 			    stcb->sctp_ep, stcb, asoc->primary_destination);
4358 		}
4359 	}
4360 	/*********************************************/
4361 	/* Here we perform PR-SCTP procedures        */
4362 	/* (section 4.2)                             */
4363 	/*********************************************/
4364 	/* C1. update advancedPeerAckPoint */
4365 	if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4366 		asoc->advanced_peer_ack_point = cumack;
4367 	}
4368 	/* PR-Sctp issues need to be addressed too */
4369 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4370 		struct sctp_tmit_chunk *lchk;
4371 		uint32_t old_adv_peer_ack_point;
4372 
4373 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4374 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4375 		/* C3. See if we need to send a Fwd-TSN */
4376 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cumack,
4377 		    MAX_TSN)) {
4378 			/*
4379 			 * ISSUE with ECN, see FWD-TSN processing for notes
4380 			 * on issues that will occur when the ECN NONCE
4381 			 * stuff is put into SCTP for cross checking.
4382 			 */
4383 			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
4384 			    MAX_TSN)) {
4385 				send_forward_tsn(stcb, asoc);
4386 				/*
4387 				 * ECN Nonce: Disable Nonce Sum check when
4388 				 * FWD TSN is sent and store resync tsn
4389 				 */
4390 				asoc->nonce_sum_check = 0;
4391 				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4392 			} else if (lchk) {
4393 				/* try to FR fwd-tsn's that get lost too */
4394 				lchk->rec.data.fwd_tsn_cnt++;
4395 				if (lchk->rec.data.fwd_tsn_cnt > 3) {
4396 					send_forward_tsn(stcb, asoc);
4397 					lchk->rec.data.fwd_tsn_cnt = 0;
4398 				}
4399 			}
4400 		}
4401 		if (lchk) {
4402 			/* Assure a timer is up */
4403 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4404 			    stcb->sctp_ep, stcb, lchk->whoTo);
4405 		}
4406 	}
4407 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4408 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4409 		    rwnd,
4410 		    stcb->asoc.peers_rwnd,
4411 		    stcb->asoc.total_flight,
4412 		    stcb->asoc.total_output_queue_size);
4413 	}
4414 }
4415 
4416 void
4417 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4418     struct sctp_tcb *stcb, struct sctp_nets *net_from,
4419     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4420     int *abort_now, uint8_t flags,
4421     uint32_t cum_ack, uint32_t rwnd)
4422 {
4423 	struct sctp_association *asoc;
4424 	struct sctp_tmit_chunk *tp1, *tp2;
4425 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4426 	uint32_t sav_cum_ack;
4427 	uint16_t wake_him = 0;
4428 	uint32_t send_s = 0;
4429 	long j;
4430 	int accum_moved = 0;
4431 	int will_exit_fast_recovery = 0;
4432 	uint32_t a_rwnd, old_rwnd;
4433 	int win_probe_recovery = 0;
4434 	int win_probe_recovered = 0;
4435 	struct sctp_nets *net = NULL;
4436 	int nonce_sum_flag, ecn_seg_sums = 0;
4437 	int done_once;
4438 	uint8_t reneged_all = 0;
4439 	uint8_t cmt_dac_flag;
4440 
4441 	/*
4442 	 * we take any chance we can to service our queues since we cannot
4443 	 * get awoken when the socket is read from :<
4444 	 */
4445 	/*
4446 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4447 	 * old sack, if so discard. 2) If there is nothing left in the send
4448 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4449 	 * too, update any rwnd change and verify no timers are running.
4450 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4451 	 * moved process these first and note that it moved. 4) Process any
4452 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4453 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4454 	 * sync up flightsizes and things, stop all timers and also check
4455 	 * for shutdown_pending state. If so then go ahead and send off the
4456 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4457 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4458 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4459 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4460 	 * if in shutdown_recv state.
4461 	 */
4462 	SCTP_TCB_LOCK_ASSERT(stcb);
4463 	/* CMT DAC algo */
4464 	this_sack_lowest_newack = 0;
4465 	j = 0;
4466 	SCTP_STAT_INCR(sctps_slowpath_sack);
4467 	last_tsn = cum_ack;
4468 	nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
4469 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4470 #ifdef SCTP_ASOCLOG_OF_TSNS
4471 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4472 	stcb->asoc.cumack_log_at++;
4473 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4474 		stcb->asoc.cumack_log_at = 0;
4475 	}
4476 #endif
4477 	a_rwnd = rwnd;
4478 
4479 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4480 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4481 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4482 	}
4483 	old_rwnd = stcb->asoc.peers_rwnd;
4484 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4485 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4486 		    stcb->asoc.overall_error_count,
4487 		    0,
4488 		    SCTP_FROM_SCTP_INDATA,
4489 		    __LINE__);
4490 	}
4491 	stcb->asoc.overall_error_count = 0;
4492 	asoc = &stcb->asoc;
4493 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4494 		sctp_log_sack(asoc->last_acked_seq,
4495 		    cum_ack,
4496 		    0,
4497 		    num_seg,
4498 		    num_dup,
4499 		    SCTP_LOG_NEW_SACK);
4500 	}
4501 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4502 		uint16_t i;
4503 		uint32_t *dupdata, dblock;
4504 
4505 		for (i = 0; i < num_dup; i++) {
4506 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4507 			    sizeof(uint32_t), (uint8_t *) & dblock);
4508 			if (dupdata == NULL) {
4509 				break;
4510 			}
4511 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4512 		}
4513 	}
4514 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4515 		/* reality check */
4516 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4517 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4518 			    sctpchunk_listhead);
4519 			send_s = tp1->rec.data.TSN_seq + 1;
4520 		} else {
4521 			tp1 = NULL;
4522 			send_s = asoc->sending_seq;
4523 		}
4524 		if (cum_ack == send_s ||
4525 		    compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4526 			struct mbuf *oper;
4527 
4528 			/*
4529 			 * no way, we have not even sent this TSN out yet.
4530 			 * Peer is hopelessly messed up with us.
4531 			 */
4532 			printf("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4533 			    cum_ack, send_s);
4534 			if (tp1) {
4535 				printf("Got send_s from tsn:%x + 1 of tp1:%p\n",
4536 				    tp1->rec.data.TSN_seq, tp1);
4537 			}
4538 	hopeless_peer:
4539 			*abort_now = 1;
4540 			/* XXX */
4541 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4542 			    0, M_DONTWAIT, 1, MT_DATA);
4543 			if (oper) {
4544 				struct sctp_paramhdr *ph;
4545 				uint32_t *ippp;
4546 
4547 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4548 				    sizeof(uint32_t);
4549 				ph = mtod(oper, struct sctp_paramhdr *);
4550 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4551 				ph->param_length = htons(SCTP_BUF_LEN(oper));
4552 				ippp = (uint32_t *) (ph + 1);
4553 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4554 			}
4555 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4556 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4557 			return;
4558 		}
4559 	}
4560 	/**********************/
4561 	/* 1) check the range */
4562 	/**********************/
4563 	if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4564 		/* acking something behind */
4565 		return;
4566 	}
4567 	sav_cum_ack = asoc->last_acked_seq;
4568 
4569 	/* update the Rwnd of the peer */
4570 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4571 	    TAILQ_EMPTY(&asoc->send_queue) &&
4572 	    (asoc->stream_queue_cnt == 0)) {
4573 		/* nothing left on send/sent and strmq */
4574 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4575 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4576 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4577 		}
4578 		asoc->peers_rwnd = a_rwnd;
4579 		if (asoc->sent_queue_retran_cnt) {
4580 			asoc->sent_queue_retran_cnt = 0;
4581 		}
4582 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4583 			/* SWS sender side engages */
4584 			asoc->peers_rwnd = 0;
4585 		}
4586 		/* stop any timers */
4587 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4588 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4589 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4590 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4591 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4592 					SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4593 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4594 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4595 				}
4596 			}
4597 			net->partial_bytes_acked = 0;
4598 			net->flight_size = 0;
4599 		}
4600 		asoc->total_flight = 0;
4601 		asoc->total_flight_count = 0;
4602 		return;
4603 	}
4604 	/*
4605 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4606 	 * things. The total byte count acked is tracked in netAckSz AND
4607 	 * netAck2 is used to track the total bytes acked that are un-
4608 	 * amibguious and were never retransmitted. We track these on a per
4609 	 * destination address basis.
4610 	 */
4611 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4612 		net->prev_cwnd = net->cwnd;
4613 		net->net_ack = 0;
4614 		net->net_ack2 = 0;
4615 
4616 		/*
4617 		 * CMT: Reset CUC and Fast recovery algo variables before
4618 		 * SACK processing
4619 		 */
4620 		net->new_pseudo_cumack = 0;
4621 		net->will_exit_fast_recovery = 0;
4622 	}
4623 	/* process the new consecutive TSN first */
4624 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4625 	while (tp1) {
4626 		if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
4627 		    MAX_TSN) ||
4628 		    last_tsn == tp1->rec.data.TSN_seq) {
4629 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4630 				/*
4631 				 * ECN Nonce: Add the nonce to the sender's
4632 				 * nonce sum
4633 				 */
4634 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4635 				accum_moved = 1;
4636 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4637 					/*
4638 					 * If it is less than ACKED, it is
4639 					 * now no-longer in flight. Higher
4640 					 * values may occur during marking
4641 					 */
4642 					if ((tp1->whoTo->dest_state &
4643 					    SCTP_ADDR_UNCONFIRMED) &&
4644 					    (tp1->snd_count < 2)) {
4645 						/*
4646 						 * If there was no retran
4647 						 * and the address is
4648 						 * un-confirmed and we sent
4649 						 * there and are now
4650 						 * sacked.. its confirmed,
4651 						 * mark it so.
4652 						 */
4653 						tp1->whoTo->dest_state &=
4654 						    ~SCTP_ADDR_UNCONFIRMED;
4655 					}
4656 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4657 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4658 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4659 							    tp1->whoTo->flight_size,
4660 							    tp1->book_size,
4661 							    (uintptr_t) tp1->whoTo,
4662 							    tp1->rec.data.TSN_seq);
4663 						}
4664 						sctp_flight_size_decrease(tp1);
4665 						sctp_total_flight_decrease(stcb, tp1);
4666 					}
4667 					tp1->whoTo->net_ack += tp1->send_size;
4668 
4669 					/* CMT SFR and DAC algos */
4670 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4671 					tp1->whoTo->saw_newack = 1;
4672 
4673 					if (tp1->snd_count < 2) {
4674 						/*
4675 						 * True non-retransmited
4676 						 * chunk
4677 						 */
4678 						tp1->whoTo->net_ack2 +=
4679 						    tp1->send_size;
4680 
4681 						/* update RTO too? */
4682 						if (tp1->do_rtt) {
4683 							tp1->whoTo->RTO =
4684 							    sctp_calculate_rto(stcb,
4685 							    asoc, tp1->whoTo,
4686 							    &tp1->sent_rcv_time,
4687 							    sctp_align_safe_nocopy);
4688 							tp1->do_rtt = 0;
4689 						}
4690 					}
4691 					/*
4692 					 * CMT: CUCv2 algorithm. From the
4693 					 * cumack'd TSNs, for each TSN being
4694 					 * acked for the first time, set the
4695 					 * following variables for the
4696 					 * corresp destination.
4697 					 * new_pseudo_cumack will trigger a
4698 					 * cwnd update.
4699 					 * find_(rtx_)pseudo_cumack will
4700 					 * trigger search for the next
4701 					 * expected (rtx-)pseudo-cumack.
4702 					 */
4703 					tp1->whoTo->new_pseudo_cumack = 1;
4704 					tp1->whoTo->find_pseudo_cumack = 1;
4705 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4706 
4707 
4708 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4709 						sctp_log_sack(asoc->last_acked_seq,
4710 						    cum_ack,
4711 						    tp1->rec.data.TSN_seq,
4712 						    0,
4713 						    0,
4714 						    SCTP_LOG_TSN_ACKED);
4715 					}
4716 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4717 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4718 					}
4719 				}
4720 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4721 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4722 #ifdef SCTP_AUDITING_ENABLED
4723 					sctp_audit_log(0xB3,
4724 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4725 #endif
4726 				}
4727 				if (tp1->rec.data.chunk_was_revoked) {
4728 					/* deflate the cwnd */
4729 					tp1->whoTo->cwnd -= tp1->book_size;
4730 					tp1->rec.data.chunk_was_revoked = 0;
4731 				}
4732 				tp1->sent = SCTP_DATAGRAM_ACKED;
4733 			}
4734 		} else {
4735 			break;
4736 		}
4737 		tp1 = TAILQ_NEXT(tp1, sctp_next);
4738 	}
4739 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4740 	/* always set this up to cum-ack */
4741 	asoc->this_sack_highest_gap = last_tsn;
4742 
4743 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4744 
4745 		/*
4746 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4747 		 * to be greater than the cumack. Also reset saw_newack to 0
4748 		 * for all dests.
4749 		 */
4750 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4751 			net->saw_newack = 0;
4752 			net->this_sack_highest_newack = last_tsn;
4753 		}
4754 
4755 		/*
4756 		 * thisSackHighestGap will increase while handling NEW
4757 		 * segments this_sack_highest_newack will increase while
4758 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4759 		 * used for CMT DAC algo. saw_newack will also change.
4760 		 */
4761 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4762 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4763 		    num_seg, num_nr_seg, &ecn_seg_sums)) {
4764 			wake_him++;
4765 		}
4766 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4767 			/*
4768 			 * validate the biggest_tsn_acked in the gap acks if
4769 			 * strict adherence is wanted.
4770 			 */
4771 			if ((biggest_tsn_acked == send_s) ||
4772 			    (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
4773 				/*
4774 				 * peer is either confused or we are under
4775 				 * attack. We must abort.
4776 				 */
4777 				printf("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4778 				    biggest_tsn_acked,
4779 				    send_s);
4780 
4781 				goto hopeless_peer;
4782 			}
4783 		}
4784 	}
4785 	/*******************************************/
4786 	/* cancel ALL T3-send timer if accum moved */
4787 	/*******************************************/
4788 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
4789 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4790 			if (net->new_pseudo_cumack)
4791 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4792 				    stcb, net,
4793 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4794 
4795 		}
4796 	} else {
4797 		if (accum_moved) {
4798 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4799 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4800 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4801 			}
4802 		}
4803 	}
4804 	/********************************************/
4805 	/* drop the acked chunks from the sendqueue */
4806 	/********************************************/
4807 	asoc->last_acked_seq = cum_ack;
4808 
4809 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4810 	if (tp1 == NULL)
4811 		goto done_with_it;
4812 	do {
4813 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
4814 		    MAX_TSN)) {
4815 			break;
4816 		}
4817 		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4818 			/* no more sent on list */
4819 			printf("Warning, tp1->sent == %d and its now acked?\n",
4820 			    tp1->sent);
4821 		}
4822 		tp2 = TAILQ_NEXT(tp1, sctp_next);
4823 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4824 		if (tp1->pr_sctp_on) {
4825 			if (asoc->pr_sctp_cnt != 0)
4826 				asoc->pr_sctp_cnt--;
4827 		}
4828 		if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
4829 		    (asoc->total_flight > 0)) {
4830 #ifdef INVARIANTS
4831 			panic("Warning flight size is postive and should be 0");
4832 #else
4833 			SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4834 			    asoc->total_flight);
4835 #endif
4836 			asoc->total_flight = 0;
4837 		}
4838 		if (tp1->data) {
4839 			/* sa_ignore NO_NULL_CHK */
4840 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4841 			sctp_m_freem(tp1->data);
4842 			if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4843 				asoc->sent_queue_cnt_removeable--;
4844 			}
4845 		}
4846 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4847 			sctp_log_sack(asoc->last_acked_seq,
4848 			    cum_ack,
4849 			    tp1->rec.data.TSN_seq,
4850 			    0,
4851 			    0,
4852 			    SCTP_LOG_FREE_SENT);
4853 		}
4854 		tp1->data = NULL;
4855 		asoc->sent_queue_cnt--;
4856 		sctp_free_a_chunk(stcb, tp1);
4857 		wake_him++;
4858 		tp1 = tp2;
4859 	} while (tp1 != NULL);
4860 
4861 done_with_it:
4862 	/* sa_ignore NO_NULL_CHK */
4863 	if ((wake_him) && (stcb->sctp_socket)) {
4864 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4865 		struct socket *so;
4866 
4867 #endif
4868 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4869 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4870 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
4871 		}
4872 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4873 		so = SCTP_INP_SO(stcb->sctp_ep);
4874 		atomic_add_int(&stcb->asoc.refcnt, 1);
4875 		SCTP_TCB_UNLOCK(stcb);
4876 		SCTP_SOCKET_LOCK(so, 1);
4877 		SCTP_TCB_LOCK(stcb);
4878 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4879 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4880 			/* assoc was freed while we were unlocked */
4881 			SCTP_SOCKET_UNLOCK(so, 1);
4882 			return;
4883 		}
4884 #endif
4885 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4886 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4887 		SCTP_SOCKET_UNLOCK(so, 1);
4888 #endif
4889 	} else {
4890 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4891 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
4892 		}
4893 	}
4894 
4895 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4896 		if (compare_with_wrap(asoc->last_acked_seq,
4897 		    asoc->fast_recovery_tsn, MAX_TSN) ||
4898 		    asoc->last_acked_seq == asoc->fast_recovery_tsn) {
4899 			/* Setup so we will exit RFC2582 fast recovery */
4900 			will_exit_fast_recovery = 1;
4901 		}
4902 	}
4903 	/*
4904 	 * Check for revoked fragments:
4905 	 *
4906 	 * if Previous sack - Had no frags then we can't have any revoked if
4907 	 * Previous sack - Had frag's then - If we now have frags aka
4908 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4909 	 * some of them. else - The peer revoked all ACKED fragments, since
4910 	 * we had some before and now we have NONE.
4911 	 */
4912 
4913 	if (num_seg)
4914 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4915 	else if (asoc->saw_sack_with_frags) {
4916 		int cnt_revoked = 0;
4917 
4918 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
4919 		if (tp1 != NULL) {
4920 			/* Peer revoked all dg's marked or acked */
4921 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4922 				/*
4923 				 * EY- maybe check only if it is nr_acked
4924 				 * nr_marked may not be possible
4925 				 */
4926 				if ((tp1->sent == SCTP_DATAGRAM_NR_ACKED) ||
4927 				    (tp1->sent == SCTP_DATAGRAM_NR_MARKED)) {
4928 					continue;
4929 				}
4930 				if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4931 					tp1->sent = SCTP_DATAGRAM_SENT;
4932 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4933 						sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4934 						    tp1->whoTo->flight_size,
4935 						    tp1->book_size,
4936 						    (uintptr_t) tp1->whoTo,
4937 						    tp1->rec.data.TSN_seq);
4938 					}
4939 					sctp_flight_size_increase(tp1);
4940 					sctp_total_flight_increase(stcb, tp1);
4941 					tp1->rec.data.chunk_was_revoked = 1;
4942 					/*
4943 					 * To ensure that this increase in
4944 					 * flightsize, which is artificial,
4945 					 * does not throttle the sender, we
4946 					 * also increase the cwnd
4947 					 * artificially.
4948 					 */
4949 					tp1->whoTo->cwnd += tp1->book_size;
4950 					cnt_revoked++;
4951 				}
4952 			}
4953 			if (cnt_revoked) {
4954 				reneged_all = 1;
4955 			}
4956 		}
4957 		asoc->saw_sack_with_frags = 0;
4958 	}
4959 	if (num_seg || num_nr_seg)
4960 		asoc->saw_sack_with_frags = 1;
4961 	else
4962 		asoc->saw_sack_with_frags = 0;
4963 
4964 	/* JRS - Use the congestion control given in the CC module */
4965 	asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4966 
4967 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4968 		/* nothing left in-flight */
4969 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4970 			/* stop all timers */
4971 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4972 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4973 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4974 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4975 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4976 				}
4977 			}
4978 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4979 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4980 			net->flight_size = 0;
4981 			net->partial_bytes_acked = 0;
4982 		}
4983 		asoc->total_flight = 0;
4984 		asoc->total_flight_count = 0;
4985 	}
4986 	/**********************************/
4987 	/* Now what about shutdown issues */
4988 	/**********************************/
4989 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4990 		/* nothing left on sendqueue.. consider done */
4991 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4992 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4993 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4994 		}
4995 		asoc->peers_rwnd = a_rwnd;
4996 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4997 			/* SWS sender side engages */
4998 			asoc->peers_rwnd = 0;
4999 		}
5000 		/* clean up */
5001 		if ((asoc->stream_queue_cnt == 1) &&
5002 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5003 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5004 		    (asoc->locked_on_sending)
5005 		    ) {
5006 			struct sctp_stream_queue_pending *sp;
5007 
5008 			/*
5009 			 * I may be in a state where we got all across.. but
5010 			 * cannot write more due to a shutdown... we abort
5011 			 * since the user did not indicate EOR in this case.
5012 			 */
5013 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
5014 			    sctp_streamhead);
5015 			if ((sp) && (sp->length == 0)) {
5016 				asoc->locked_on_sending = NULL;
5017 				if (sp->msg_is_complete) {
5018 					asoc->stream_queue_cnt--;
5019 				} else {
5020 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5021 					asoc->stream_queue_cnt--;
5022 				}
5023 			}
5024 		}
5025 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5026 		    (asoc->stream_queue_cnt == 0)) {
5027 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5028 				/* Need to abort here */
5029 				struct mbuf *oper;
5030 
5031 		abort_out_now:
5032 				*abort_now = 1;
5033 				/* XXX */
5034 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
5035 				    0, M_DONTWAIT, 1, MT_DATA);
5036 				if (oper) {
5037 					struct sctp_paramhdr *ph;
5038 					uint32_t *ippp;
5039 
5040 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5041 					    sizeof(uint32_t);
5042 					ph = mtod(oper, struct sctp_paramhdr *);
5043 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
5044 					ph->param_length = htons(SCTP_BUF_LEN(oper));
5045 					ippp = (uint32_t *) (ph + 1);
5046 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
5047 				}
5048 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
5049 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
5050 				return;
5051 			} else {
5052 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5053 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5054 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5055 				}
5056 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5057 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5058 				sctp_stop_timers_for_shutdown(stcb);
5059 				sctp_send_shutdown(stcb,
5060 				    stcb->asoc.primary_destination);
5061 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5062 				    stcb->sctp_ep, stcb, asoc->primary_destination);
5063 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5064 				    stcb->sctp_ep, stcb, asoc->primary_destination);
5065 			}
5066 			return;
5067 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5068 		    (asoc->stream_queue_cnt == 0)) {
5069 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5070 				goto abort_out_now;
5071 			}
5072 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5073 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5074 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5075 			sctp_send_shutdown_ack(stcb,
5076 			    stcb->asoc.primary_destination);
5077 
5078 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5079 			    stcb->sctp_ep, stcb, asoc->primary_destination);
5080 			return;
5081 		}
5082 	}
5083 	/*
5084 	 * Now here we are going to recycle net_ack for a different use...
5085 	 * HEADS UP.
5086 	 */
5087 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5088 		net->net_ack = 0;
5089 	}
5090 
5091 	/*
5092 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5093 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5094 	 * automatically ensure that.
5095 	 */
5096 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
5097 		this_sack_lowest_newack = cum_ack;
5098 	}
5099 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5100 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5101 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5102 	}
5103 	/* JRS - Use the congestion control given in the CC module */
5104 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5105 
5106 	/******************************************************************
5107 	 *  Here we do the stuff with ECN Nonce checking.
5108 	 *  We basically check to see if the nonce sum flag was incorrect
5109 	 *  or if resynchronization needs to be done. Also if we catch a
5110 	 *  misbehaving receiver we give him the kick.
5111 	 ******************************************************************/
5112 
5113 	if (asoc->ecn_nonce_allowed) {
5114 		if (asoc->nonce_sum_check) {
5115 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5116 				if (asoc->nonce_wait_for_ecne == 0) {
5117 					struct sctp_tmit_chunk *lchk;
5118 
5119 					lchk = TAILQ_FIRST(&asoc->send_queue);
5120 					asoc->nonce_wait_for_ecne = 1;
5121 					if (lchk) {
5122 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5123 					} else {
5124 						asoc->nonce_wait_tsn = asoc->sending_seq;
5125 					}
5126 				} else {
5127 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5128 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5129 						/*
5130 						 * Misbehaving peer. We need
5131 						 * to react to this guy
5132 						 */
5133 						asoc->ecn_allowed = 0;
5134 						asoc->ecn_nonce_allowed = 0;
5135 					}
5136 				}
5137 			}
5138 		} else {
5139 			/* See if Resynchronization Possible */
5140 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5141 				asoc->nonce_sum_check = 1;
5142 				/*
5143 				 * now we must calculate what the base is.
5144 				 * We do this based on two things, we know
5145 				 * the total's for all the segments
5146 				 * gap-acked in the SACK, its stored in
5147 				 * ecn_seg_sums. We also know the SACK's
5148 				 * nonce sum, its in nonce_sum_flag. So we
5149 				 * can build a truth table to back-calculate
5150 				 * the new value of
5151 				 * asoc->nonce_sum_expect_base:
5152 				 *
5153 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
5154 				 * 1                    0 1 0 1 1 1
5155 				 * 1 0
5156 				 */
5157 				asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5158 			}
5159 		}
5160 	}
5161 	/* Now are we exiting loss recovery ? */
5162 	if (will_exit_fast_recovery) {
5163 		/* Ok, we must exit fast recovery */
5164 		asoc->fast_retran_loss_recovery = 0;
5165 	}
5166 	if ((asoc->sat_t3_loss_recovery) &&
5167 	    ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5168 	    MAX_TSN) ||
5169 	    (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5170 		/* end satellite t3 loss recovery */
5171 		asoc->sat_t3_loss_recovery = 0;
5172 	}
5173 	/*
5174 	 * CMT Fast recovery
5175 	 */
5176 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5177 		if (net->will_exit_fast_recovery) {
5178 			/* Ok, we must exit fast recovery */
5179 			net->fast_retran_loss_recovery = 0;
5180 		}
5181 	}
5182 
5183 	/* Adjust and set the new rwnd value */
5184 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5185 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5186 		    asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5187 	}
5188 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5189 	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5190 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5191 		/* SWS sender side engages */
5192 		asoc->peers_rwnd = 0;
5193 	}
5194 	if (asoc->peers_rwnd > old_rwnd) {
5195 		win_probe_recovery = 1;
5196 	}
5197 	/*
5198 	 * Now we must setup so we have a timer up for anyone with
5199 	 * outstanding data.
5200 	 */
5201 	done_once = 0;
5202 again:
5203 	j = 0;
5204 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5205 		if (win_probe_recovery && (net->window_probe)) {
5206 			win_probe_recovered = 1;
5207 			/*-
5208 			 * Find first chunk that was used with
5209 			 * window probe and clear the event. Put
5210 			 * it back into the send queue as if has
5211 			 * not been sent.
5212 			 */
5213 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5214 				if (tp1->window_probe) {
5215 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
5216 					break;
5217 				}
5218 			}
5219 		}
5220 		if (net->flight_size) {
5221 			j++;
5222 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5223 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5224 				    stcb->sctp_ep, stcb, net);
5225 			}
5226 			if (net->window_probe) {
5227 				net->window_probe = 0;
5228 			}
5229 		} else {
5230 			if (net->window_probe) {
5231 				/*
5232 				 * In window probes we must assure a timer
5233 				 * is still running there
5234 				 */
5235 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5236 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5237 					    stcb->sctp_ep, stcb, net);
5238 
5239 				}
5240 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5241 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5242 				    stcb, net,
5243 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5244 			}
5245 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5246 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5247 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5248 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5249 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5250 				}
5251 			}
5252 		}
5253 	}
5254 	if ((j == 0) &&
5255 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5256 	    (asoc->sent_queue_retran_cnt == 0) &&
5257 	    (win_probe_recovered == 0) &&
5258 	    (done_once == 0)) {
5259 		/*
5260 		 * huh, this should not happen unless all packets are
5261 		 * PR-SCTP and marked to skip of course.
5262 		 */
5263 		if (sctp_fs_audit(asoc)) {
5264 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5265 				net->flight_size = 0;
5266 			}
5267 			asoc->total_flight = 0;
5268 			asoc->total_flight_count = 0;
5269 			asoc->sent_queue_retran_cnt = 0;
5270 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5271 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5272 					sctp_flight_size_increase(tp1);
5273 					sctp_total_flight_increase(stcb, tp1);
5274 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5275 					asoc->sent_queue_retran_cnt++;
5276 				}
5277 			}
5278 		}
5279 		done_once = 1;
5280 		goto again;
5281 	}
5282 	/*********************************************/
5283 	/* Here we perform PR-SCTP procedures        */
5284 	/* (section 4.2)                             */
5285 	/*********************************************/
5286 	/* C1. update advancedPeerAckPoint */
5287 	if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5288 		asoc->advanced_peer_ack_point = cum_ack;
5289 	}
5290 	/* C2. try to further move advancedPeerAckPoint ahead */
5291 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5292 		struct sctp_tmit_chunk *lchk;
5293 		uint32_t old_adv_peer_ack_point;
5294 
5295 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5296 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5297 		/* C3. See if we need to send a Fwd-TSN */
5298 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5299 		    MAX_TSN)) {
5300 			/*
5301 			 * ISSUE with ECN, see FWD-TSN processing for notes
5302 			 * on issues that will occur when the ECN NONCE
5303 			 * stuff is put into SCTP for cross checking.
5304 			 */
5305 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5306 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5307 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5308 				    old_adv_peer_ack_point);
5309 			}
5310 			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
5311 			    MAX_TSN)) {
5312 				send_forward_tsn(stcb, asoc);
5313 				/*
5314 				 * ECN Nonce: Disable Nonce Sum check when
5315 				 * FWD TSN is sent and store resync tsn
5316 				 */
5317 				asoc->nonce_sum_check = 0;
5318 				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5319 			} else if (lchk) {
5320 				/* try to FR fwd-tsn's that get lost too */
5321 				lchk->rec.data.fwd_tsn_cnt++;
5322 				if (lchk->rec.data.fwd_tsn_cnt > 3) {
5323 					send_forward_tsn(stcb, asoc);
5324 					lchk->rec.data.fwd_tsn_cnt = 0;
5325 				}
5326 			}
5327 		}
5328 		if (lchk) {
5329 			/* Assure a timer is up */
5330 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5331 			    stcb->sctp_ep, stcb, lchk->whoTo);
5332 		}
5333 	}
5334 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5335 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5336 		    a_rwnd,
5337 		    stcb->asoc.peers_rwnd,
5338 		    stcb->asoc.total_flight,
5339 		    stcb->asoc.total_output_queue_size);
5340 	}
5341 }
5342 
5343 void
5344 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5345     struct sctp_nets *netp, int *abort_flag)
5346 {
5347 	/* Copy cum-ack */
5348 	uint32_t cum_ack, a_rwnd;
5349 
5350 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5351 	/* Arrange so a_rwnd does NOT change */
5352 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5353 
5354 	/* Now call the express sack handling */
5355 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5356 }
5357 
5358 static void
5359 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5360     struct sctp_stream_in *strmin)
5361 {
5362 	struct sctp_queued_to_read *ctl, *nctl;
5363 	struct sctp_association *asoc;
5364 	int tt;
5365 
5366 	asoc = &stcb->asoc;
5367 	tt = strmin->last_sequence_delivered;
5368 	/*
5369 	 * First deliver anything prior to and including the stream no that
5370 	 * came in
5371 	 */
5372 	ctl = TAILQ_FIRST(&strmin->inqueue);
5373 	while (ctl) {
5374 		nctl = TAILQ_NEXT(ctl, next);
5375 		if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5376 		    (tt == ctl->sinfo_ssn)) {
5377 			/* this is deliverable now */
5378 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5379 			/* subtract pending on streams */
5380 			asoc->size_on_all_streams -= ctl->length;
5381 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5382 			/* deliver it to at least the delivery-q */
5383 			if (stcb->sctp_socket) {
5384 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5385 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5386 				    ctl,
5387 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5388 			}
5389 		} else {
5390 			/* no more delivery now. */
5391 			break;
5392 		}
5393 		ctl = nctl;
5394 	}
5395 	/*
5396 	 * now we must deliver things in queue the normal way  if any are
5397 	 * now ready.
5398 	 */
5399 	tt = strmin->last_sequence_delivered + 1;
5400 	ctl = TAILQ_FIRST(&strmin->inqueue);
5401 	while (ctl) {
5402 		nctl = TAILQ_NEXT(ctl, next);
5403 		if (tt == ctl->sinfo_ssn) {
5404 			/* this is deliverable now */
5405 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5406 			/* subtract pending on streams */
5407 			asoc->size_on_all_streams -= ctl->length;
5408 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5409 			/* deliver it to at least the delivery-q */
5410 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5411 			if (stcb->sctp_socket) {
5412 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5413 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5414 				    ctl,
5415 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5416 
5417 			}
5418 			tt = strmin->last_sequence_delivered + 1;
5419 		} else {
5420 			break;
5421 		}
5422 		ctl = nctl;
5423 	}
5424 }
5425 
5426 static void
5427 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5428     struct sctp_association *asoc,
5429     uint16_t stream, uint16_t seq)
5430 {
5431 	struct sctp_tmit_chunk *chk, *at;
5432 
5433 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5434 		/* For each one on here see if we need to toss it */
5435 		/*
5436 		 * For now large messages held on the reasmqueue that are
5437 		 * complete will be tossed too. We could in theory do more
5438 		 * work to spin through and stop after dumping one msg aka
5439 		 * seeing the start of a new msg at the head, and call the
5440 		 * delivery function... to see if it can be delivered... But
5441 		 * for now we just dump everything on the queue.
5442 		 */
5443 		chk = TAILQ_FIRST(&asoc->reasmqueue);
5444 		while (chk) {
5445 			at = TAILQ_NEXT(chk, sctp_next);
5446 			/*
5447 			 * Do not toss it if on a different stream or marked
5448 			 * for unordered delivery in which case the stream
5449 			 * sequence number has no meaning.
5450 			 */
5451 			if ((chk->rec.data.stream_number != stream) ||
5452 			    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5453 				chk = at;
5454 				continue;
5455 			}
5456 			if (chk->rec.data.stream_seq == seq) {
5457 				/* It needs to be tossed */
5458 				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5459 				if (compare_with_wrap(chk->rec.data.TSN_seq,
5460 				    asoc->tsn_last_delivered, MAX_TSN)) {
5461 					asoc->tsn_last_delivered =
5462 					    chk->rec.data.TSN_seq;
5463 					asoc->str_of_pdapi =
5464 					    chk->rec.data.stream_number;
5465 					asoc->ssn_of_pdapi =
5466 					    chk->rec.data.stream_seq;
5467 					asoc->fragment_flags =
5468 					    chk->rec.data.rcv_flags;
5469 				}
5470 				asoc->size_on_reasm_queue -= chk->send_size;
5471 				sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5472 
5473 				/* Clear up any stream problem */
5474 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5475 				    SCTP_DATA_UNORDERED &&
5476 				    (compare_with_wrap(chk->rec.data.stream_seq,
5477 				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5478 				    MAX_SEQ))) {
5479 					/*
5480 					 * We must dump forward this streams
5481 					 * sequence number if the chunk is
5482 					 * not unordered that is being
5483 					 * skipped. There is a chance that
5484 					 * if the peer does not include the
5485 					 * last fragment in its FWD-TSN we
5486 					 * WILL have a problem here since
5487 					 * you would have a partial chunk in
5488 					 * queue that may not be
5489 					 * deliverable. Also if a Partial
5490 					 * delivery API as started the user
5491 					 * may get a partial chunk. The next
5492 					 * read returning a new chunk...
5493 					 * really ugly but I see no way
5494 					 * around it! Maybe a notify??
5495 					 */
5496 					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5497 					    chk->rec.data.stream_seq;
5498 				}
5499 				if (chk->data) {
5500 					sctp_m_freem(chk->data);
5501 					chk->data = NULL;
5502 				}
5503 				sctp_free_a_chunk(stcb, chk);
5504 			} else if (compare_with_wrap(chk->rec.data.stream_seq, seq, MAX_SEQ)) {
5505 				/*
5506 				 * If the stream_seq is > than the purging
5507 				 * one, we are done
5508 				 */
5509 				break;
5510 			}
5511 			chk = at;
5512 		}
5513 	}
5514 }
5515 
5516 
5517 void
5518 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5519     struct sctp_forward_tsn_chunk *fwd,
5520     int *abort_flag, struct mbuf *m, int offset)
5521 {
5522 	/*
5523 	 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5524 	 * forward TSN, when the SACK comes back that acknowledges the
5525 	 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5526 	 * get quite tricky since we may have sent more data interveneing
5527 	 * and must carefully account for what the SACK says on the nonce
5528 	 * and any gaps that are reported. This work will NOT be done here,
5529 	 * but I note it here since it is really related to PR-SCTP and
5530 	 * FWD-TSN's
5531 	 */
5532 
5533 	/* The pr-sctp fwd tsn */
5534 	/*
5535 	 * here we will perform all the data receiver side steps for
5536 	 * processing FwdTSN, as required in by pr-sctp draft:
5537 	 *
5538 	 * Assume we get FwdTSN(x):
5539 	 *
5540 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5541 	 * others we have 3) examine and update re-ordering queue on
5542 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5543 	 * report where we are.
5544 	 */
5545 	struct sctp_association *asoc;
5546 	uint32_t new_cum_tsn, tsn, gap;
5547 	unsigned int i, fwd_sz, cumack_set_flag, m_size, fnd = 0;
5548 	uint32_t str_seq;
5549 	struct sctp_stream_in *strm;
5550 	struct sctp_tmit_chunk *chk, *at;
5551 	struct sctp_queued_to_read *ctl, *sv;
5552 
5553 	cumack_set_flag = 0;
5554 	asoc = &stcb->asoc;
5555 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5556 		SCTPDBG(SCTP_DEBUG_INDATA1,
5557 		    "Bad size too small/big fwd-tsn\n");
5558 		return;
5559 	}
5560 	m_size = (stcb->asoc.mapping_array_size << 3);
5561 	/*************************************************************/
5562 	/* 1. Here we update local cumTSN and shift the bitmap array */
5563 	/*************************************************************/
5564 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5565 
5566 	if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
5567 	    asoc->cumulative_tsn == new_cum_tsn) {
5568 		/* Already got there ... */
5569 		return;
5570 	}
5571 	if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
5572 	    MAX_TSN)) {
5573 		asoc->highest_tsn_inside_map = new_cum_tsn;
5574 
5575 	}
5576 	if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_nr_map,
5577 	    MAX_TSN)) {
5578 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5579 	}
5580 	/*
5581 	 * now we know the new TSN is more advanced, let's find the actual
5582 	 * gap
5583 	 */
5584 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5585 	asoc->cumulative_tsn = new_cum_tsn;
5586 	if (gap >= m_size) {
5587 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5588 			struct mbuf *oper;
5589 
5590 			/*
5591 			 * out of range (of single byte chunks in the rwnd I
5592 			 * give out). This must be an attacker.
5593 			 */
5594 			*abort_flag = 1;
5595 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5596 			    0, M_DONTWAIT, 1, MT_DATA);
5597 			if (oper) {
5598 				struct sctp_paramhdr *ph;
5599 				uint32_t *ippp;
5600 
5601 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5602 				    (sizeof(uint32_t) * 3);
5603 				ph = mtod(oper, struct sctp_paramhdr *);
5604 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5605 				ph->param_length = htons(SCTP_BUF_LEN(oper));
5606 				ippp = (uint32_t *) (ph + 1);
5607 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5608 				ippp++;
5609 				*ippp = asoc->highest_tsn_inside_map;
5610 				ippp++;
5611 				*ippp = new_cum_tsn;
5612 			}
5613 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5614 			sctp_abort_an_association(stcb->sctp_ep, stcb,
5615 			    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
5616 			return;
5617 		}
5618 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5619 
5620 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5621 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5622 		asoc->highest_tsn_inside_map = new_cum_tsn;
5623 
5624 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5625 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5626 
5627 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5628 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5629 		}
5630 		asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
5631 	} else {
5632 		SCTP_TCB_LOCK_ASSERT(stcb);
5633 		for (i = 0; i <= gap; i++) {
5634 			SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, i);
5635 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5636 			/* FIX ME add something to set up highest TSN in map */
5637 		}
5638 		if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
5639 			asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5640 		}
5641 		if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map, MAX_TSN) ||
5642 		    new_cum_tsn == asoc->highest_tsn_inside_map) {
5643 			/* We must back down to see what the new highest is */
5644 			for (tsn = new_cum_tsn; (compare_with_wrap(tsn, asoc->mapping_array_base_tsn, MAX_TSN) ||
5645 			    (tsn == asoc->mapping_array_base_tsn)); tsn--) {
5646 				SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
5647 				if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
5648 					asoc->highest_tsn_inside_map = tsn;
5649 					fnd = 1;
5650 					break;
5651 				}
5652 			}
5653 			if (!fnd) {
5654 				asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
5655 			}
5656 		}
5657 		/*
5658 		 * Now after marking all, slide thing forward but no sack
5659 		 * please.
5660 		 */
5661 		sctp_slide_mapping_arrays(stcb);
5662 	}
5663 	/*************************************************************/
5664 	/* 2. Clear up re-assembly queue                             */
5665 	/*************************************************************/
5666 	/*
5667 	 * First service it if pd-api is up, just in case we can progress it
5668 	 * forward
5669 	 */
5670 	if (asoc->fragmented_delivery_inprogress) {
5671 		sctp_service_reassembly(stcb, asoc);
5672 	}
5673 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5674 		/* For each one on here see if we need to toss it */
5675 		/*
5676 		 * For now large messages held on the reasmqueue that are
5677 		 * complete will be tossed too. We could in theory do more
5678 		 * work to spin through and stop after dumping one msg aka
5679 		 * seeing the start of a new msg at the head, and call the
5680 		 * delivery function... to see if it can be delivered... But
5681 		 * for now we just dump everything on the queue.
5682 		 */
5683 		chk = TAILQ_FIRST(&asoc->reasmqueue);
5684 		while (chk) {
5685 			at = TAILQ_NEXT(chk, sctp_next);
5686 			if ((compare_with_wrap(new_cum_tsn,
5687 			    chk->rec.data.TSN_seq, MAX_TSN)) ||
5688 			    (new_cum_tsn == chk->rec.data.TSN_seq)) {
5689 				/* It needs to be tossed */
5690 				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5691 				if (compare_with_wrap(chk->rec.data.TSN_seq,
5692 				    asoc->tsn_last_delivered, MAX_TSN)) {
5693 					asoc->tsn_last_delivered =
5694 					    chk->rec.data.TSN_seq;
5695 					asoc->str_of_pdapi =
5696 					    chk->rec.data.stream_number;
5697 					asoc->ssn_of_pdapi =
5698 					    chk->rec.data.stream_seq;
5699 					asoc->fragment_flags =
5700 					    chk->rec.data.rcv_flags;
5701 				}
5702 				asoc->size_on_reasm_queue -= chk->send_size;
5703 				sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5704 
5705 				/* Clear up any stream problem */
5706 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5707 				    SCTP_DATA_UNORDERED &&
5708 				    (compare_with_wrap(chk->rec.data.stream_seq,
5709 				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5710 				    MAX_SEQ))) {
5711 					/*
5712 					 * We must dump forward this streams
5713 					 * sequence number if the chunk is
5714 					 * not unordered that is being
5715 					 * skipped. There is a chance that
5716 					 * if the peer does not include the
5717 					 * last fragment in its FWD-TSN we
5718 					 * WILL have a problem here since
5719 					 * you would have a partial chunk in
5720 					 * queue that may not be
5721 					 * deliverable. Also if a Partial
5722 					 * delivery API as started the user
5723 					 * may get a partial chunk. The next
5724 					 * read returning a new chunk...
5725 					 * really ugly but I see no way
5726 					 * around it! Maybe a notify??
5727 					 */
5728 					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5729 					    chk->rec.data.stream_seq;
5730 				}
5731 				if (chk->data) {
5732 					sctp_m_freem(chk->data);
5733 					chk->data = NULL;
5734 				}
5735 				sctp_free_a_chunk(stcb, chk);
5736 			} else {
5737 				/*
5738 				 * Ok we have gone beyond the end of the
5739 				 * fwd-tsn's mark.
5740 				 */
5741 				break;
5742 			}
5743 			chk = at;
5744 		}
5745 	}
5746 	/*******************************************************/
5747 	/* 3. Update the PR-stream re-ordering queues and fix  */
5748 	/* delivery issues as needed.                       */
5749 	/*******************************************************/
5750 	fwd_sz -= sizeof(*fwd);
5751 	if (m && fwd_sz) {
5752 		/* New method. */
5753 		unsigned int num_str;
5754 		struct sctp_strseq *stseq, strseqbuf;
5755 
5756 		offset += sizeof(*fwd);
5757 
5758 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5759 		num_str = fwd_sz / sizeof(struct sctp_strseq);
5760 		for (i = 0; i < num_str; i++) {
5761 			uint16_t st;
5762 
5763 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5764 			    sizeof(struct sctp_strseq),
5765 			    (uint8_t *) & strseqbuf);
5766 			offset += sizeof(struct sctp_strseq);
5767 			if (stseq == NULL) {
5768 				break;
5769 			}
5770 			/* Convert */
5771 			st = ntohs(stseq->stream);
5772 			stseq->stream = st;
5773 			st = ntohs(stseq->sequence);
5774 			stseq->sequence = st;
5775 
5776 			/* now process */
5777 
5778 			/*
5779 			 * Ok we now look for the stream/seq on the read
5780 			 * queue where its not all delivered. If we find it
5781 			 * we transmute the read entry into a PDI_ABORTED.
5782 			 */
5783 			if (stseq->stream >= asoc->streamincnt) {
5784 				/* screwed up streams, stop!  */
5785 				break;
5786 			}
5787 			if ((asoc->str_of_pdapi == stseq->stream) &&
5788 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5789 				/*
5790 				 * If this is the one we were partially
5791 				 * delivering now then we no longer are.
5792 				 * Note this will change with the reassembly
5793 				 * re-write.
5794 				 */
5795 				asoc->fragmented_delivery_inprogress = 0;
5796 			}
5797 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5798 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5799 				if ((ctl->sinfo_stream == stseq->stream) &&
5800 				    (ctl->sinfo_ssn == stseq->sequence)) {
5801 					str_seq = (stseq->stream << 16) | stseq->sequence;
5802 					ctl->end_added = 1;
5803 					ctl->pdapi_aborted = 1;
5804 					sv = stcb->asoc.control_pdapi;
5805 					stcb->asoc.control_pdapi = ctl;
5806 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5807 					    stcb,
5808 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5809 					    (void *)&str_seq,
5810 					    SCTP_SO_NOT_LOCKED);
5811 					stcb->asoc.control_pdapi = sv;
5812 					break;
5813 				} else if ((ctl->sinfo_stream == stseq->stream) &&
5814 				    (compare_with_wrap(ctl->sinfo_ssn, stseq->sequence, MAX_SEQ))) {
5815 					/* We are past our victim SSN */
5816 					break;
5817 				}
5818 			}
5819 			strm = &asoc->strmin[stseq->stream];
5820 			if (compare_with_wrap(stseq->sequence,
5821 			    strm->last_sequence_delivered, MAX_SEQ)) {
5822 				/* Update the sequence number */
5823 				strm->last_sequence_delivered =
5824 				    stseq->sequence;
5825 			}
5826 			/* now kick the stream the new way */
5827 			/* sa_ignore NO_NULL_CHK */
5828 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5829 		}
5830 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5831 	}
5832 	if (TAILQ_FIRST(&asoc->reasmqueue)) {
5833 		/* now lets kick out and check for more fragmented delivery */
5834 		/* sa_ignore NO_NULL_CHK */
5835 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5836 	}
5837 }
5838