xref: /freebsd/sys/netinet/sctp_indata.c (revision aa79fe245de7616cda41b69a296a5ce209c95c45)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 
48 #define SCTP_CALC_TSN_TO_GAP(gap, tsn, mapping_tsn) do { \
49 					if ((compare_with_wrap(tsn, mapping_tsn, MAX_TSN)) || \
50                         (tsn == mapping_tsn)) { \
51 						gap = tsn - mapping_tsn; \
52 					} else { \
53 						gap = (MAX_TSN - mapping_tsn) + tsn + 1; \
54 					} \
55                   } while(0)
56 
57 #define SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc) do { \
58                     if (asoc->mapping_array_base_tsn == asoc->nr_mapping_array_base_tsn) { \
59                        SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, nr_gap); \
60                     } else {\
61                        int lgap; \
62                        SCTP_CALC_TSN_TO_GAP(lgap, tsn, asoc->mapping_array_base_tsn); \
63                        SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, lgap); \
64                     } \
65                   } while(0)
66 
67 /*
68  * NOTES: On the outbound side of things I need to check the sack timer to
69  * see if I should generate a sack into the chunk queue (if I have data to
70  * send that is and will be sending it .. for bundling.
71  *
72  * The callback in sctp_usrreq.c will get called when the socket is read from.
73  * This will cause sctp_service_queues() to get called on the top entry in
74  * the list.
75  */
76 
77 void
78 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
79 {
80 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
81 }
82 
83 /* Calculate what the rwnd would be */
84 uint32_t
85 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
86 {
87 	uint32_t calc = 0;
88 
89 	/*
90 	 * This is really set wrong with respect to a 1-2-m socket. Since
91 	 * the sb_cc is the count that everyone as put up. When we re-write
92 	 * sctp_soreceive then we will fix this so that ONLY this
93 	 * associations data is taken into account.
94 	 */
95 	if (stcb->sctp_socket == NULL)
96 		return (calc);
97 
98 	if (stcb->asoc.sb_cc == 0 &&
99 	    asoc->size_on_reasm_queue == 0 &&
100 	    asoc->size_on_all_streams == 0) {
101 		/* Full rwnd granted */
102 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
103 		return (calc);
104 	}
105 	/* get actual space */
106 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
107 
108 	/*
109 	 * take out what has NOT been put on socket queue and we yet hold
110 	 * for putting up.
111 	 */
112 	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
113 	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
114 
115 	if (calc == 0) {
116 		/* out of space */
117 		return (calc);
118 	}
119 	/* what is the overhead of all these rwnd's */
120 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
121 	/*
122 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
123 	 * even it is 0. SWS engaged
124 	 */
125 	if (calc < stcb->asoc.my_rwnd_control_len) {
126 		calc = 1;
127 	}
128 	return (calc);
129 }
130 
131 
132 
133 /*
134  * Build out our readq entry based on the incoming packet.
135  */
136 struct sctp_queued_to_read *
137 sctp_build_readq_entry(struct sctp_tcb *stcb,
138     struct sctp_nets *net,
139     uint32_t tsn, uint32_t ppid,
140     uint32_t context, uint16_t stream_no,
141     uint16_t stream_seq, uint8_t flags,
142     struct mbuf *dm)
143 {
144 	struct sctp_queued_to_read *read_queue_e = NULL;
145 
146 	sctp_alloc_a_readq(stcb, read_queue_e);
147 	if (read_queue_e == NULL) {
148 		goto failed_build;
149 	}
150 	read_queue_e->sinfo_stream = stream_no;
151 	read_queue_e->sinfo_ssn = stream_seq;
152 	read_queue_e->sinfo_flags = (flags << 8);
153 	read_queue_e->sinfo_ppid = ppid;
154 	read_queue_e->sinfo_context = stcb->asoc.context;
155 	read_queue_e->sinfo_timetolive = 0;
156 	read_queue_e->sinfo_tsn = tsn;
157 	read_queue_e->sinfo_cumtsn = tsn;
158 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 	read_queue_e->whoFrom = net;
160 	read_queue_e->length = 0;
161 	atomic_add_int(&net->ref_count, 1);
162 	read_queue_e->data = dm;
163 	read_queue_e->spec_flags = 0;
164 	read_queue_e->tail_mbuf = NULL;
165 	read_queue_e->aux_data = NULL;
166 	read_queue_e->stcb = stcb;
167 	read_queue_e->port_from = stcb->rport;
168 	read_queue_e->do_not_ref_stcb = 0;
169 	read_queue_e->end_added = 0;
170 	read_queue_e->some_taken = 0;
171 	read_queue_e->pdapi_aborted = 0;
172 failed_build:
173 	return (read_queue_e);
174 }
175 
176 
177 /*
178  * Build out our readq entry based on the incoming packet.
179  */
180 static struct sctp_queued_to_read *
181 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
182     struct sctp_tmit_chunk *chk)
183 {
184 	struct sctp_queued_to_read *read_queue_e = NULL;
185 
186 	sctp_alloc_a_readq(stcb, read_queue_e);
187 	if (read_queue_e == NULL) {
188 		goto failed_build;
189 	}
190 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
191 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
192 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
193 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
194 	read_queue_e->sinfo_context = stcb->asoc.context;
195 	read_queue_e->sinfo_timetolive = 0;
196 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
197 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
198 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
199 	read_queue_e->whoFrom = chk->whoTo;
200 	read_queue_e->aux_data = NULL;
201 	read_queue_e->length = 0;
202 	atomic_add_int(&chk->whoTo->ref_count, 1);
203 	read_queue_e->data = chk->data;
204 	read_queue_e->tail_mbuf = NULL;
205 	read_queue_e->stcb = stcb;
206 	read_queue_e->port_from = stcb->rport;
207 	read_queue_e->spec_flags = 0;
208 	read_queue_e->do_not_ref_stcb = 0;
209 	read_queue_e->end_added = 0;
210 	read_queue_e->some_taken = 0;
211 	read_queue_e->pdapi_aborted = 0;
212 failed_build:
213 	return (read_queue_e);
214 }
215 
216 
217 struct mbuf *
218 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
219     struct sctp_sndrcvinfo *sinfo)
220 {
221 	struct sctp_sndrcvinfo *outinfo;
222 	struct cmsghdr *cmh;
223 	struct mbuf *ret;
224 	int len;
225 	int use_extended = 0;
226 
227 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
228 		/* user does not want the sndrcv ctl */
229 		return (NULL);
230 	}
231 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
232 		use_extended = 1;
233 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
234 	} else {
235 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
236 	}
237 
238 
239 	ret = sctp_get_mbuf_for_msg(len,
240 	    0, M_DONTWAIT, 1, MT_DATA);
241 
242 	if (ret == NULL) {
243 		/* No space */
244 		return (ret);
245 	}
246 	/* We need a CMSG header followed by the struct  */
247 	cmh = mtod(ret, struct cmsghdr *);
248 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
249 	cmh->cmsg_level = IPPROTO_SCTP;
250 	if (use_extended) {
251 		cmh->cmsg_type = SCTP_EXTRCV;
252 		cmh->cmsg_len = len;
253 		memcpy(outinfo, sinfo, len);
254 	} else {
255 		cmh->cmsg_type = SCTP_SNDRCV;
256 		cmh->cmsg_len = len;
257 		*outinfo = *sinfo;
258 	}
259 	SCTP_BUF_LEN(ret) = cmh->cmsg_len;
260 	return (ret);
261 }
262 
263 
264 char *
265 sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
266     int *control_len,
267     struct sctp_sndrcvinfo *sinfo)
268 {
269 	struct sctp_sndrcvinfo *outinfo;
270 	struct cmsghdr *cmh;
271 	char *buf;
272 	int len;
273 	int use_extended = 0;
274 
275 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
276 		/* user does not want the sndrcv ctl */
277 		return (NULL);
278 	}
279 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
280 		use_extended = 1;
281 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
282 	} else {
283 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
284 	}
285 	SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
286 	if (buf == NULL) {
287 		/* No space */
288 		return (buf);
289 	}
290 	/* We need a CMSG header followed by the struct  */
291 	cmh = (struct cmsghdr *)buf;
292 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
293 	cmh->cmsg_level = IPPROTO_SCTP;
294 	if (use_extended) {
295 		cmh->cmsg_type = SCTP_EXTRCV;
296 		cmh->cmsg_len = len;
297 		memcpy(outinfo, sinfo, len);
298 	} else {
299 		cmh->cmsg_type = SCTP_SNDRCV;
300 		cmh->cmsg_len = len;
301 		*outinfo = *sinfo;
302 	}
303 	*control_len = len;
304 	return (buf);
305 }
306 
307 
308 /*
309  * We are delivering currently from the reassembly queue. We must continue to
310  * deliver until we either: 1) run out of space. 2) run out of sequential
311  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
312  */
313 static void
314 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
315 {
316 	struct sctp_tmit_chunk *chk;
317 	uint16_t nxt_todel;
318 	uint16_t stream_no;
319 	int end = 0;
320 	int cntDel;
321 
322 	/* EY if any out-of-order delivered, then tag it nr on nr_map */
323 	uint32_t nr_tsn, nr_gap;
324 
325 	struct sctp_queued_to_read *control, *ctl, *ctlat;
326 
327 	if (stcb == NULL)
328 		return;
329 
330 	cntDel = stream_no = 0;
331 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
332 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
333 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
334 		/* socket above is long gone or going.. */
335 abandon:
336 		asoc->fragmented_delivery_inprogress = 0;
337 		chk = TAILQ_FIRST(&asoc->reasmqueue);
338 		while (chk) {
339 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
340 			asoc->size_on_reasm_queue -= chk->send_size;
341 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
342 			/*
343 			 * Lose the data pointer, since its in the socket
344 			 * buffer
345 			 */
346 			if (chk->data) {
347 				sctp_m_freem(chk->data);
348 				chk->data = NULL;
349 			}
350 			/* Now free the address and data */
351 			sctp_free_a_chunk(stcb, chk);
352 			/* sa_ignore FREED_MEMORY */
353 			chk = TAILQ_FIRST(&asoc->reasmqueue);
354 		}
355 		return;
356 	}
357 	SCTP_TCB_LOCK_ASSERT(stcb);
358 	do {
359 		chk = TAILQ_FIRST(&asoc->reasmqueue);
360 		if (chk == NULL) {
361 			return;
362 		}
363 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
364 			/* Can't deliver more :< */
365 			return;
366 		}
367 		stream_no = chk->rec.data.stream_number;
368 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
369 		if (nxt_todel != chk->rec.data.stream_seq &&
370 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
371 			/*
372 			 * Not the next sequence to deliver in its stream OR
373 			 * unordered
374 			 */
375 			return;
376 		}
377 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
378 
379 			control = sctp_build_readq_entry_chk(stcb, chk);
380 			if (control == NULL) {
381 				/* out of memory? */
382 				return;
383 			}
384 			/* save it off for our future deliveries */
385 			stcb->asoc.control_pdapi = control;
386 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
387 				end = 1;
388 			else
389 				end = 0;
390 			sctp_add_to_readq(stcb->sctp_ep,
391 			    stcb, control, &stcb->sctp_socket->so_rcv, end, SCTP_SO_NOT_LOCKED);
392 			cntDel++;
393 		} else {
394 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
395 				end = 1;
396 			else
397 				end = 0;
398 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
399 			    stcb->asoc.control_pdapi,
400 			    chk->data, end, chk->rec.data.TSN_seq,
401 			    &stcb->sctp_socket->so_rcv)) {
402 				/*
403 				 * something is very wrong, either
404 				 * control_pdapi is NULL, or the tail_mbuf
405 				 * is corrupt, or there is a EOM already on
406 				 * the mbuf chain.
407 				 */
408 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
409 					goto abandon;
410 				} else {
411 #ifdef INVARIANTS
412 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
413 						panic("This should not happen control_pdapi NULL?");
414 					}
415 					/* if we did not panic, it was a EOM */
416 					panic("Bad chunking ??");
417 #else
418 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
419 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
420 					}
421 					SCTP_PRINTF("Bad chunking ??\n");
422 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
423 
424 #endif
425 					goto abandon;
426 				}
427 			}
428 			cntDel++;
429 		}
430 		/* pull it we did it */
431 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
432 		/*
433 		 * EY this is the chunk that should be tagged nr gapped
434 		 * calculate the gap and such then tag this TSN nr
435 		 * chk->rec.data.TSN_seq
436 		 */
437 		/*
438 		 * EY!-TODO- this tsn should be tagged nr only if it is
439 		 * out-of-order, the if statement should be modified
440 		 */
441 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
442 
443 			nr_tsn = chk->rec.data.TSN_seq;
444 			SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
445 			if ((nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3)) ||
446 			    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
447 				/*
448 				 * EY The 1st should never happen, as in
449 				 * process_a_data_chunk method this check
450 				 * should be done
451 				 */
452 				/*
453 				 * EY The 2nd should never happen, because
454 				 * nr_mapping_array is always expanded when
455 				 * mapping_array is expanded
456 				 */
457 				printf("Impossible nr_gap ack range failed\n");
458 			} else {
459 				SCTP_TCB_LOCK_ASSERT(stcb);
460 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
461 				SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
462 				if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
463 					asoc->highest_tsn_inside_nr_map = nr_tsn;
464 			}
465 		}
466 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
467 			asoc->fragmented_delivery_inprogress = 0;
468 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
469 				asoc->strmin[stream_no].last_sequence_delivered++;
470 			}
471 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
472 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
473 			}
474 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
475 			/*
476 			 * turn the flag back on since we just  delivered
477 			 * yet another one.
478 			 */
479 			asoc->fragmented_delivery_inprogress = 1;
480 		}
481 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
482 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
483 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
484 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
485 
486 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
487 		asoc->size_on_reasm_queue -= chk->send_size;
488 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
489 		/* free up the chk */
490 		chk->data = NULL;
491 		sctp_free_a_chunk(stcb, chk);
492 
493 		if (asoc->fragmented_delivery_inprogress == 0) {
494 			/*
495 			 * Now lets see if we can deliver the next one on
496 			 * the stream
497 			 */
498 			struct sctp_stream_in *strm;
499 
500 			strm = &asoc->strmin[stream_no];
501 			nxt_todel = strm->last_sequence_delivered + 1;
502 			ctl = TAILQ_FIRST(&strm->inqueue);
503 			if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
504 				while (ctl != NULL) {
505 					/* Deliver more if we can. */
506 					if (nxt_todel == ctl->sinfo_ssn) {
507 						ctlat = TAILQ_NEXT(ctl, next);
508 						TAILQ_REMOVE(&strm->inqueue, ctl, next);
509 						asoc->size_on_all_streams -= ctl->length;
510 						sctp_ucount_decr(asoc->cnt_on_all_streams);
511 						strm->last_sequence_delivered++;
512 						/*
513 						 * EY will be used to
514 						 * calculate nr-gap
515 						 */
516 						nr_tsn = ctl->sinfo_tsn;
517 						sctp_add_to_readq(stcb->sctp_ep, stcb,
518 						    ctl,
519 						    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
520 						/*
521 						 * EY -now something is
522 						 * delivered, calculate
523 						 * nr_gap and tag this tsn
524 						 * NR
525 						 */
526 						if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
527 							SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
528 							if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
529 							    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
530 								printf("Impossible NR gap calculation?\n");
531 								/*
532 								 * EY The
533 								 * 1st
534 								 * should
535 								 * never
536 								 * happen,
537 								 * as in
538 								 * process_a_
539 								 * data_chunk
540 								 *  method
541 								 * this
542 								 * check
543 								 * should be
544 								 * done
545 								 */
546 								/*
547 								 * EY The
548 								 * 2nd
549 								 * should
550 								 * never
551 								 * happen,
552 								 * because
553 								 * nr_mapping
554 								 * _array is
555 								 * always
556 								 * expanded
557 								 * when
558 								 * mapping_ar
559 								 * ray is
560 								 * expanded
561 								 */
562 							} else {
563 								SCTP_TCB_LOCK_ASSERT(stcb);
564 								SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
565 								SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
566 								if (compare_with_wrap(nr_tsn,
567 								    asoc->highest_tsn_inside_nr_map,
568 								    MAX_TSN))
569 									asoc->highest_tsn_inside_nr_map = nr_tsn;
570 							}
571 						}
572 						ctl = ctlat;
573 					} else {
574 						break;
575 					}
576 					nxt_todel = strm->last_sequence_delivered + 1;
577 				}
578 			}
579 			break;
580 		}
581 		/* sa_ignore FREED_MEMORY */
582 		chk = TAILQ_FIRST(&asoc->reasmqueue);
583 	} while (chk);
584 }
585 
586 /*
587  * Queue the chunk either right into the socket buffer if it is the next one
588  * to go OR put it in the correct place in the delivery queue.  If we do
589  * append to the so_buf, keep doing so until we are out of order. One big
590  * question still remains, what to do when the socket buffer is FULL??
591  */
592 static void
593 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
594     struct sctp_queued_to_read *control, int *abort_flag)
595 {
596 	/*
597 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
598 	 * all the data in one stream this could happen quite rapidly. One
599 	 * could use the TSN to keep track of things, but this scheme breaks
600 	 * down in the other type of stream useage that could occur. Send a
601 	 * single msg to stream 0, send 4Billion messages to stream 1, now
602 	 * send a message to stream 0. You have a situation where the TSN
603 	 * has wrapped but not in the stream. Is this worth worrying about
604 	 * or should we just change our queue sort at the bottom to be by
605 	 * TSN.
606 	 *
607 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
608 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
609 	 * assignment this could happen... and I don't see how this would be
610 	 * a violation. So for now I am undecided an will leave the sort by
611 	 * SSN alone. Maybe a hybred approach is the answer
612 	 *
613 	 */
614 	struct sctp_stream_in *strm;
615 	struct sctp_queued_to_read *at;
616 	int queue_needed;
617 	uint16_t nxt_todel;
618 	struct mbuf *oper;
619 
620 	/* EY- will be used to calculate nr-gap for a tsn */
621 	uint32_t nr_tsn, nr_gap;
622 
623 	queue_needed = 1;
624 	asoc->size_on_all_streams += control->length;
625 	sctp_ucount_incr(asoc->cnt_on_all_streams);
626 	strm = &asoc->strmin[control->sinfo_stream];
627 	nxt_todel = strm->last_sequence_delivered + 1;
628 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
629 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
630 	}
631 	SCTPDBG(SCTP_DEBUG_INDATA1,
632 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
633 	    (uint32_t) control->sinfo_stream,
634 	    (uint32_t) strm->last_sequence_delivered,
635 	    (uint32_t) nxt_todel);
636 	if (compare_with_wrap(strm->last_sequence_delivered,
637 	    control->sinfo_ssn, MAX_SEQ) ||
638 	    (strm->last_sequence_delivered == control->sinfo_ssn)) {
639 		/* The incoming sseq is behind where we last delivered? */
640 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
641 		    control->sinfo_ssn, strm->last_sequence_delivered);
642 protocol_error:
643 		/*
644 		 * throw it in the stream so it gets cleaned up in
645 		 * association destruction
646 		 */
647 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
648 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
649 		    0, M_DONTWAIT, 1, MT_DATA);
650 		if (oper) {
651 			struct sctp_paramhdr *ph;
652 			uint32_t *ippp;
653 
654 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
655 			    (sizeof(uint32_t) * 3);
656 			ph = mtod(oper, struct sctp_paramhdr *);
657 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
658 			ph->param_length = htons(SCTP_BUF_LEN(oper));
659 			ippp = (uint32_t *) (ph + 1);
660 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
661 			ippp++;
662 			*ippp = control->sinfo_tsn;
663 			ippp++;
664 			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
665 		}
666 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
667 		sctp_abort_an_association(stcb->sctp_ep, stcb,
668 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
669 
670 		*abort_flag = 1;
671 		return;
672 
673 	}
674 	if (nxt_todel == control->sinfo_ssn) {
675 		/* can be delivered right away? */
676 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
677 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
678 		}
679 		/* EY it wont be queued if it could be delivered directly */
680 		queue_needed = 0;
681 		asoc->size_on_all_streams -= control->length;
682 		sctp_ucount_decr(asoc->cnt_on_all_streams);
683 		strm->last_sequence_delivered++;
684 		/* EY will be used to calculate nr-gap */
685 		nr_tsn = control->sinfo_tsn;
686 		sctp_add_to_readq(stcb->sctp_ep, stcb,
687 		    control,
688 		    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
689 
690 		/*
691 		 * EY this is the chunk that should be tagged nr gapped
692 		 * calculate the gap and such then tag this TSN nr
693 		 * chk->rec.data.TSN_seq
694 		 */
695 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
696 			SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
697 			if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
698 			    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
699 				printf("Impossible nr_tsn set 2?\n");
700 				/*
701 				 * EY The 1st should never happen, as in
702 				 * process_a_data_chunk method this check
703 				 * should be done
704 				 */
705 				/*
706 				 * EY The 2nd should never happen, because
707 				 * nr_mapping_array is always expanded when
708 				 * mapping_array is expanded
709 				 */
710 			} else {
711 				SCTP_TCB_LOCK_ASSERT(stcb);
712 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
713 				SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
714 				if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
715 					asoc->highest_tsn_inside_nr_map = nr_tsn;
716 			}
717 		}
718 		control = TAILQ_FIRST(&strm->inqueue);
719 		while (control != NULL) {
720 			/* all delivered */
721 			nxt_todel = strm->last_sequence_delivered + 1;
722 			if (nxt_todel == control->sinfo_ssn) {
723 				at = TAILQ_NEXT(control, next);
724 				TAILQ_REMOVE(&strm->inqueue, control, next);
725 				asoc->size_on_all_streams -= control->length;
726 				sctp_ucount_decr(asoc->cnt_on_all_streams);
727 				strm->last_sequence_delivered++;
728 				/*
729 				 * We ignore the return of deliver_data here
730 				 * since we always can hold the chunk on the
731 				 * d-queue. And we have a finite number that
732 				 * can be delivered from the strq.
733 				 */
734 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
735 					sctp_log_strm_del(control, NULL,
736 					    SCTP_STR_LOG_FROM_IMMED_DEL);
737 				}
738 				/* EY will be used to calculate nr-gap */
739 				nr_tsn = control->sinfo_tsn;
740 				sctp_add_to_readq(stcb->sctp_ep, stcb,
741 				    control,
742 				    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
743 				/*
744 				 * EY this is the chunk that should be
745 				 * tagged nr gapped calculate the gap and
746 				 * such then tag this TSN nr
747 				 * chk->rec.data.TSN_seq
748 				 */
749 				if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
750 					SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
751 					if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
752 					    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
753 						printf("Impossible nr TSN set 3?\n");
754 						/*
755 						 * EY The 1st should never
756 						 * happen, as in
757 						 * process_a_data_chunk
758 						 * method this check should
759 						 * be done
760 						 */
761 						/*
762 						 * EY The 2nd should never
763 						 * happen, because
764 						 * nr_mapping_array is
765 						 * always expanded when
766 						 * mapping_array is expanded
767 						 */
768 					} else {
769 						SCTP_TCB_LOCK_ASSERT(stcb);
770 						SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
771 						SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
772 						if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map,
773 						    MAX_TSN))
774 							asoc->highest_tsn_inside_nr_map = nr_tsn;
775 					}
776 				}
777 				control = at;
778 				continue;
779 			}
780 			break;
781 		}
782 	}
783 	if (queue_needed) {
784 		/*
785 		 * Ok, we did not deliver this guy, find the correct place
786 		 * to put it on the queue.
787 		 */
788 		if ((compare_with_wrap(asoc->cumulative_tsn,
789 		    control->sinfo_tsn, MAX_TSN)) ||
790 		    (control->sinfo_tsn == asoc->cumulative_tsn)) {
791 			goto protocol_error;
792 		}
793 		if (TAILQ_EMPTY(&strm->inqueue)) {
794 			/* Empty queue */
795 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
796 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
797 			}
798 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
799 		} else {
800 			TAILQ_FOREACH(at, &strm->inqueue, next) {
801 				if (compare_with_wrap(at->sinfo_ssn,
802 				    control->sinfo_ssn, MAX_SEQ)) {
803 					/*
804 					 * one in queue is bigger than the
805 					 * new one, insert before this one
806 					 */
807 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
808 						sctp_log_strm_del(control, at,
809 						    SCTP_STR_LOG_FROM_INSERT_MD);
810 					}
811 					TAILQ_INSERT_BEFORE(at, control, next);
812 					break;
813 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
814 					/*
815 					 * Gak, He sent me a duplicate str
816 					 * seq number
817 					 */
818 					/*
819 					 * foo bar, I guess I will just free
820 					 * this new guy, should we abort
821 					 * too? FIX ME MAYBE? Or it COULD be
822 					 * that the SSN's have wrapped.
823 					 * Maybe I should compare to TSN
824 					 * somehow... sigh for now just blow
825 					 * away the chunk!
826 					 */
827 
828 					if (control->data)
829 						sctp_m_freem(control->data);
830 					control->data = NULL;
831 					asoc->size_on_all_streams -= control->length;
832 					sctp_ucount_decr(asoc->cnt_on_all_streams);
833 					if (control->whoFrom)
834 						sctp_free_remote_addr(control->whoFrom);
835 					control->whoFrom = NULL;
836 					sctp_free_a_readq(stcb, control);
837 					return;
838 				} else {
839 					if (TAILQ_NEXT(at, next) == NULL) {
840 						/*
841 						 * We are at the end, insert
842 						 * it after this one
843 						 */
844 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
845 							sctp_log_strm_del(control, at,
846 							    SCTP_STR_LOG_FROM_INSERT_TL);
847 						}
848 						TAILQ_INSERT_AFTER(&strm->inqueue,
849 						    at, control, next);
850 						break;
851 					}
852 				}
853 			}
854 		}
855 	}
856 }
857 
858 /*
859  * Returns two things: You get the total size of the deliverable parts of the
860  * first fragmented message on the reassembly queue. And you get a 1 back if
861  * all of the message is ready or a 0 back if the message is still incomplete
862  */
863 static int
864 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
865 {
866 	struct sctp_tmit_chunk *chk;
867 	uint32_t tsn;
868 
869 	*t_size = 0;
870 	chk = TAILQ_FIRST(&asoc->reasmqueue);
871 	if (chk == NULL) {
872 		/* nothing on the queue */
873 		return (0);
874 	}
875 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
876 		/* Not a first on the queue */
877 		return (0);
878 	}
879 	tsn = chk->rec.data.TSN_seq;
880 	while (chk) {
881 		if (tsn != chk->rec.data.TSN_seq) {
882 			return (0);
883 		}
884 		*t_size += chk->send_size;
885 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
886 			return (1);
887 		}
888 		tsn++;
889 		chk = TAILQ_NEXT(chk, sctp_next);
890 	}
891 	return (0);
892 }
893 
894 static void
895 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
896 {
897 	struct sctp_tmit_chunk *chk;
898 	uint16_t nxt_todel;
899 	uint32_t tsize;
900 
901 doit_again:
902 	chk = TAILQ_FIRST(&asoc->reasmqueue);
903 	if (chk == NULL) {
904 		/* Huh? */
905 		asoc->size_on_reasm_queue = 0;
906 		asoc->cnt_on_reasm_queue = 0;
907 		return;
908 	}
909 	if (asoc->fragmented_delivery_inprogress == 0) {
910 		nxt_todel =
911 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
912 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
913 		    (nxt_todel == chk->rec.data.stream_seq ||
914 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
915 			/*
916 			 * Yep the first one is here and its ok to deliver
917 			 * but should we?
918 			 */
919 			if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
920 			    (tsize >= stcb->sctp_ep->partial_delivery_point))) {
921 
922 				/*
923 				 * Yes, we setup to start reception, by
924 				 * backing down the TSN just in case we
925 				 * can't deliver. If we
926 				 */
927 				asoc->fragmented_delivery_inprogress = 1;
928 				asoc->tsn_last_delivered =
929 				    chk->rec.data.TSN_seq - 1;
930 				asoc->str_of_pdapi =
931 				    chk->rec.data.stream_number;
932 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
933 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
934 				asoc->fragment_flags = chk->rec.data.rcv_flags;
935 				sctp_service_reassembly(stcb, asoc);
936 			}
937 		}
938 	} else {
939 		/*
940 		 * Service re-assembly will deliver stream data queued at
941 		 * the end of fragmented delivery.. but it wont know to go
942 		 * back and call itself again... we do that here with the
943 		 * got doit_again
944 		 */
945 		sctp_service_reassembly(stcb, asoc);
946 		if (asoc->fragmented_delivery_inprogress == 0) {
947 			/*
948 			 * finished our Fragmented delivery, could be more
949 			 * waiting?
950 			 */
951 			goto doit_again;
952 		}
953 	}
954 }
955 
956 /*
957  * Dump onto the re-assembly queue, in its proper place. After dumping on the
958  * queue, see if anthing can be delivered. If so pull it off (or as much as
959  * we can. If we run out of space then we must dump what we can and set the
960  * appropriate flag to say we queued what we could.
961  */
962 static void
963 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
964     struct sctp_tmit_chunk *chk, int *abort_flag)
965 {
966 	struct mbuf *oper;
967 	uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
968 	u_char last_flags;
969 	struct sctp_tmit_chunk *at, *prev, *next;
970 
971 	prev = next = NULL;
972 	cum_ackp1 = asoc->tsn_last_delivered + 1;
973 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
974 		/* This is the first one on the queue */
975 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
976 		/*
977 		 * we do not check for delivery of anything when only one
978 		 * fragment is here
979 		 */
980 		asoc->size_on_reasm_queue = chk->send_size;
981 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
982 		if (chk->rec.data.TSN_seq == cum_ackp1) {
983 			if (asoc->fragmented_delivery_inprogress == 0 &&
984 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
985 			    SCTP_DATA_FIRST_FRAG) {
986 				/*
987 				 * An empty queue, no delivery inprogress,
988 				 * we hit the next one and it does NOT have
989 				 * a FIRST fragment mark.
990 				 */
991 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
992 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
993 				    0, M_DONTWAIT, 1, MT_DATA);
994 
995 				if (oper) {
996 					struct sctp_paramhdr *ph;
997 					uint32_t *ippp;
998 
999 					SCTP_BUF_LEN(oper) =
1000 					    sizeof(struct sctp_paramhdr) +
1001 					    (sizeof(uint32_t) * 3);
1002 					ph = mtod(oper, struct sctp_paramhdr *);
1003 					ph->param_type =
1004 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1005 					ph->param_length = htons(SCTP_BUF_LEN(oper));
1006 					ippp = (uint32_t *) (ph + 1);
1007 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
1008 					ippp++;
1009 					*ippp = chk->rec.data.TSN_seq;
1010 					ippp++;
1011 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1012 
1013 				}
1014 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
1015 				sctp_abort_an_association(stcb->sctp_ep, stcb,
1016 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1017 				*abort_flag = 1;
1018 			} else if (asoc->fragmented_delivery_inprogress &&
1019 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1020 				/*
1021 				 * We are doing a partial delivery and the
1022 				 * NEXT chunk MUST be either the LAST or
1023 				 * MIDDLE fragment NOT a FIRST
1024 				 */
1025 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
1026 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1027 				    0, M_DONTWAIT, 1, MT_DATA);
1028 				if (oper) {
1029 					struct sctp_paramhdr *ph;
1030 					uint32_t *ippp;
1031 
1032 					SCTP_BUF_LEN(oper) =
1033 					    sizeof(struct sctp_paramhdr) +
1034 					    (3 * sizeof(uint32_t));
1035 					ph = mtod(oper, struct sctp_paramhdr *);
1036 					ph->param_type =
1037 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1038 					ph->param_length = htons(SCTP_BUF_LEN(oper));
1039 					ippp = (uint32_t *) (ph + 1);
1040 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
1041 					ippp++;
1042 					*ippp = chk->rec.data.TSN_seq;
1043 					ippp++;
1044 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1045 				}
1046 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
1047 				sctp_abort_an_association(stcb->sctp_ep, stcb,
1048 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1049 				*abort_flag = 1;
1050 			} else if (asoc->fragmented_delivery_inprogress) {
1051 				/*
1052 				 * Here we are ok with a MIDDLE or LAST
1053 				 * piece
1054 				 */
1055 				if (chk->rec.data.stream_number !=
1056 				    asoc->str_of_pdapi) {
1057 					/* Got to be the right STR No */
1058 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
1059 					    chk->rec.data.stream_number,
1060 					    asoc->str_of_pdapi);
1061 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1062 					    0, M_DONTWAIT, 1, MT_DATA);
1063 					if (oper) {
1064 						struct sctp_paramhdr *ph;
1065 						uint32_t *ippp;
1066 
1067 						SCTP_BUF_LEN(oper) =
1068 						    sizeof(struct sctp_paramhdr) +
1069 						    (sizeof(uint32_t) * 3);
1070 						ph = mtod(oper,
1071 						    struct sctp_paramhdr *);
1072 						ph->param_type =
1073 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1074 						ph->param_length =
1075 						    htons(SCTP_BUF_LEN(oper));
1076 						ippp = (uint32_t *) (ph + 1);
1077 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
1078 						ippp++;
1079 						*ippp = chk->rec.data.TSN_seq;
1080 						ippp++;
1081 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1082 					}
1083 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
1084 					sctp_abort_an_association(stcb->sctp_ep,
1085 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1086 					*abort_flag = 1;
1087 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
1088 					    SCTP_DATA_UNORDERED &&
1089 					    chk->rec.data.stream_seq !=
1090 				    asoc->ssn_of_pdapi) {
1091 					/* Got to be the right STR Seq */
1092 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
1093 					    chk->rec.data.stream_seq,
1094 					    asoc->ssn_of_pdapi);
1095 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1096 					    0, M_DONTWAIT, 1, MT_DATA);
1097 					if (oper) {
1098 						struct sctp_paramhdr *ph;
1099 						uint32_t *ippp;
1100 
1101 						SCTP_BUF_LEN(oper) =
1102 						    sizeof(struct sctp_paramhdr) +
1103 						    (3 * sizeof(uint32_t));
1104 						ph = mtod(oper,
1105 						    struct sctp_paramhdr *);
1106 						ph->param_type =
1107 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1108 						ph->param_length =
1109 						    htons(SCTP_BUF_LEN(oper));
1110 						ippp = (uint32_t *) (ph + 1);
1111 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1112 						ippp++;
1113 						*ippp = chk->rec.data.TSN_seq;
1114 						ippp++;
1115 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1116 
1117 					}
1118 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
1119 					sctp_abort_an_association(stcb->sctp_ep,
1120 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1121 					*abort_flag = 1;
1122 				}
1123 			}
1124 		}
1125 		return;
1126 	}
1127 	/* Find its place */
1128 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1129 		if (compare_with_wrap(at->rec.data.TSN_seq,
1130 		    chk->rec.data.TSN_seq, MAX_TSN)) {
1131 			/*
1132 			 * one in queue is bigger than the new one, insert
1133 			 * before this one
1134 			 */
1135 			/* A check */
1136 			asoc->size_on_reasm_queue += chk->send_size;
1137 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1138 			next = at;
1139 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1140 			break;
1141 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1142 			/* Gak, He sent me a duplicate str seq number */
1143 			/*
1144 			 * foo bar, I guess I will just free this new guy,
1145 			 * should we abort too? FIX ME MAYBE? Or it COULD be
1146 			 * that the SSN's have wrapped. Maybe I should
1147 			 * compare to TSN somehow... sigh for now just blow
1148 			 * away the chunk!
1149 			 */
1150 			if (chk->data) {
1151 				sctp_m_freem(chk->data);
1152 				chk->data = NULL;
1153 			}
1154 			sctp_free_a_chunk(stcb, chk);
1155 			return;
1156 		} else {
1157 			last_flags = at->rec.data.rcv_flags;
1158 			last_tsn = at->rec.data.TSN_seq;
1159 			prev = at;
1160 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1161 				/*
1162 				 * We are at the end, insert it after this
1163 				 * one
1164 				 */
1165 				/* check it first */
1166 				asoc->size_on_reasm_queue += chk->send_size;
1167 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1168 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1169 				break;
1170 			}
1171 		}
1172 	}
1173 	/* Now the audits */
1174 	if (prev) {
1175 		prev_tsn = chk->rec.data.TSN_seq - 1;
1176 		if (prev_tsn == prev->rec.data.TSN_seq) {
1177 			/*
1178 			 * Ok the one I am dropping onto the end is the
1179 			 * NEXT. A bit of valdiation here.
1180 			 */
1181 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1182 			    SCTP_DATA_FIRST_FRAG ||
1183 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1184 			    SCTP_DATA_MIDDLE_FRAG) {
1185 				/*
1186 				 * Insert chk MUST be a MIDDLE or LAST
1187 				 * fragment
1188 				 */
1189 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1190 				    SCTP_DATA_FIRST_FRAG) {
1191 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1192 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1193 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1194 					    0, M_DONTWAIT, 1, MT_DATA);
1195 					if (oper) {
1196 						struct sctp_paramhdr *ph;
1197 						uint32_t *ippp;
1198 
1199 						SCTP_BUF_LEN(oper) =
1200 						    sizeof(struct sctp_paramhdr) +
1201 						    (3 * sizeof(uint32_t));
1202 						ph = mtod(oper,
1203 						    struct sctp_paramhdr *);
1204 						ph->param_type =
1205 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1206 						ph->param_length =
1207 						    htons(SCTP_BUF_LEN(oper));
1208 						ippp = (uint32_t *) (ph + 1);
1209 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1210 						ippp++;
1211 						*ippp = chk->rec.data.TSN_seq;
1212 						ippp++;
1213 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1214 
1215 					}
1216 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1217 					sctp_abort_an_association(stcb->sctp_ep,
1218 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1219 					*abort_flag = 1;
1220 					return;
1221 				}
1222 				if (chk->rec.data.stream_number !=
1223 				    prev->rec.data.stream_number) {
1224 					/*
1225 					 * Huh, need the correct STR here,
1226 					 * they must be the same.
1227 					 */
1228 					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1229 					    chk->rec.data.stream_number,
1230 					    prev->rec.data.stream_number);
1231 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1232 					    0, M_DONTWAIT, 1, MT_DATA);
1233 					if (oper) {
1234 						struct sctp_paramhdr *ph;
1235 						uint32_t *ippp;
1236 
1237 						SCTP_BUF_LEN(oper) =
1238 						    sizeof(struct sctp_paramhdr) +
1239 						    (3 * sizeof(uint32_t));
1240 						ph = mtod(oper,
1241 						    struct sctp_paramhdr *);
1242 						ph->param_type =
1243 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1244 						ph->param_length =
1245 						    htons(SCTP_BUF_LEN(oper));
1246 						ippp = (uint32_t *) (ph + 1);
1247 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1248 						ippp++;
1249 						*ippp = chk->rec.data.TSN_seq;
1250 						ippp++;
1251 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1252 					}
1253 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1254 					sctp_abort_an_association(stcb->sctp_ep,
1255 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1256 
1257 					*abort_flag = 1;
1258 					return;
1259 				}
1260 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1261 				    chk->rec.data.stream_seq !=
1262 				    prev->rec.data.stream_seq) {
1263 					/*
1264 					 * Huh, need the correct STR here,
1265 					 * they must be the same.
1266 					 */
1267 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1268 					    chk->rec.data.stream_seq,
1269 					    prev->rec.data.stream_seq);
1270 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1271 					    0, M_DONTWAIT, 1, MT_DATA);
1272 					if (oper) {
1273 						struct sctp_paramhdr *ph;
1274 						uint32_t *ippp;
1275 
1276 						SCTP_BUF_LEN(oper) =
1277 						    sizeof(struct sctp_paramhdr) +
1278 						    (3 * sizeof(uint32_t));
1279 						ph = mtod(oper,
1280 						    struct sctp_paramhdr *);
1281 						ph->param_type =
1282 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1283 						ph->param_length =
1284 						    htons(SCTP_BUF_LEN(oper));
1285 						ippp = (uint32_t *) (ph + 1);
1286 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1287 						ippp++;
1288 						*ippp = chk->rec.data.TSN_seq;
1289 						ippp++;
1290 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1291 					}
1292 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1293 					sctp_abort_an_association(stcb->sctp_ep,
1294 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1295 
1296 					*abort_flag = 1;
1297 					return;
1298 				}
1299 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1300 			    SCTP_DATA_LAST_FRAG) {
1301 				/* Insert chk MUST be a FIRST */
1302 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1303 				    SCTP_DATA_FIRST_FRAG) {
1304 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1305 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1306 					    0, M_DONTWAIT, 1, MT_DATA);
1307 					if (oper) {
1308 						struct sctp_paramhdr *ph;
1309 						uint32_t *ippp;
1310 
1311 						SCTP_BUF_LEN(oper) =
1312 						    sizeof(struct sctp_paramhdr) +
1313 						    (3 * sizeof(uint32_t));
1314 						ph = mtod(oper,
1315 						    struct sctp_paramhdr *);
1316 						ph->param_type =
1317 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1318 						ph->param_length =
1319 						    htons(SCTP_BUF_LEN(oper));
1320 						ippp = (uint32_t *) (ph + 1);
1321 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1322 						ippp++;
1323 						*ippp = chk->rec.data.TSN_seq;
1324 						ippp++;
1325 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1326 
1327 					}
1328 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1329 					sctp_abort_an_association(stcb->sctp_ep,
1330 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1331 
1332 					*abort_flag = 1;
1333 					return;
1334 				}
1335 			}
1336 		}
1337 	}
1338 	if (next) {
1339 		post_tsn = chk->rec.data.TSN_seq + 1;
1340 		if (post_tsn == next->rec.data.TSN_seq) {
1341 			/*
1342 			 * Ok the one I am inserting ahead of is my NEXT
1343 			 * one. A bit of valdiation here.
1344 			 */
1345 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1346 				/* Insert chk MUST be a last fragment */
1347 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1348 				    != SCTP_DATA_LAST_FRAG) {
1349 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1350 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1351 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1352 					    0, M_DONTWAIT, 1, MT_DATA);
1353 					if (oper) {
1354 						struct sctp_paramhdr *ph;
1355 						uint32_t *ippp;
1356 
1357 						SCTP_BUF_LEN(oper) =
1358 						    sizeof(struct sctp_paramhdr) +
1359 						    (3 * sizeof(uint32_t));
1360 						ph = mtod(oper,
1361 						    struct sctp_paramhdr *);
1362 						ph->param_type =
1363 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1364 						ph->param_length =
1365 						    htons(SCTP_BUF_LEN(oper));
1366 						ippp = (uint32_t *) (ph + 1);
1367 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1368 						ippp++;
1369 						*ippp = chk->rec.data.TSN_seq;
1370 						ippp++;
1371 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1372 					}
1373 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1374 					sctp_abort_an_association(stcb->sctp_ep,
1375 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1376 
1377 					*abort_flag = 1;
1378 					return;
1379 				}
1380 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1381 				    SCTP_DATA_MIDDLE_FRAG ||
1382 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1383 			    SCTP_DATA_LAST_FRAG) {
1384 				/*
1385 				 * Insert chk CAN be MIDDLE or FIRST NOT
1386 				 * LAST
1387 				 */
1388 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1389 				    SCTP_DATA_LAST_FRAG) {
1390 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1391 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1392 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1393 					    0, M_DONTWAIT, 1, MT_DATA);
1394 					if (oper) {
1395 						struct sctp_paramhdr *ph;
1396 						uint32_t *ippp;
1397 
1398 						SCTP_BUF_LEN(oper) =
1399 						    sizeof(struct sctp_paramhdr) +
1400 						    (3 * sizeof(uint32_t));
1401 						ph = mtod(oper,
1402 						    struct sctp_paramhdr *);
1403 						ph->param_type =
1404 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1405 						ph->param_length =
1406 						    htons(SCTP_BUF_LEN(oper));
1407 						ippp = (uint32_t *) (ph + 1);
1408 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1409 						ippp++;
1410 						*ippp = chk->rec.data.TSN_seq;
1411 						ippp++;
1412 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1413 
1414 					}
1415 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1416 					sctp_abort_an_association(stcb->sctp_ep,
1417 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1418 
1419 					*abort_flag = 1;
1420 					return;
1421 				}
1422 				if (chk->rec.data.stream_number !=
1423 				    next->rec.data.stream_number) {
1424 					/*
1425 					 * Huh, need the correct STR here,
1426 					 * they must be the same.
1427 					 */
1428 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1429 					    chk->rec.data.stream_number,
1430 					    next->rec.data.stream_number);
1431 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1432 					    0, M_DONTWAIT, 1, MT_DATA);
1433 					if (oper) {
1434 						struct sctp_paramhdr *ph;
1435 						uint32_t *ippp;
1436 
1437 						SCTP_BUF_LEN(oper) =
1438 						    sizeof(struct sctp_paramhdr) +
1439 						    (3 * sizeof(uint32_t));
1440 						ph = mtod(oper,
1441 						    struct sctp_paramhdr *);
1442 						ph->param_type =
1443 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1444 						ph->param_length =
1445 						    htons(SCTP_BUF_LEN(oper));
1446 						ippp = (uint32_t *) (ph + 1);
1447 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1448 						ippp++;
1449 						*ippp = chk->rec.data.TSN_seq;
1450 						ippp++;
1451 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1452 
1453 					}
1454 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1455 					sctp_abort_an_association(stcb->sctp_ep,
1456 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1457 
1458 					*abort_flag = 1;
1459 					return;
1460 				}
1461 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1462 				    chk->rec.data.stream_seq !=
1463 				    next->rec.data.stream_seq) {
1464 					/*
1465 					 * Huh, need the correct STR here,
1466 					 * they must be the same.
1467 					 */
1468 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1469 					    chk->rec.data.stream_seq,
1470 					    next->rec.data.stream_seq);
1471 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1472 					    0, M_DONTWAIT, 1, MT_DATA);
1473 					if (oper) {
1474 						struct sctp_paramhdr *ph;
1475 						uint32_t *ippp;
1476 
1477 						SCTP_BUF_LEN(oper) =
1478 						    sizeof(struct sctp_paramhdr) +
1479 						    (3 * sizeof(uint32_t));
1480 						ph = mtod(oper,
1481 						    struct sctp_paramhdr *);
1482 						ph->param_type =
1483 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1484 						ph->param_length =
1485 						    htons(SCTP_BUF_LEN(oper));
1486 						ippp = (uint32_t *) (ph + 1);
1487 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1488 						ippp++;
1489 						*ippp = chk->rec.data.TSN_seq;
1490 						ippp++;
1491 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1492 					}
1493 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1494 					sctp_abort_an_association(stcb->sctp_ep,
1495 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1496 
1497 					*abort_flag = 1;
1498 					return;
1499 				}
1500 			}
1501 		}
1502 	}
1503 	/* Do we need to do some delivery? check */
1504 	sctp_deliver_reasm_check(stcb, asoc);
1505 }
1506 
1507 /*
1508  * This is an unfortunate routine. It checks to make sure a evil guy is not
1509  * stuffing us full of bad packet fragments. A broken peer could also do this
1510  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1511  * :< more cycles.
1512  */
1513 static int
1514 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1515     uint32_t TSN_seq)
1516 {
1517 	struct sctp_tmit_chunk *at;
1518 	uint32_t tsn_est;
1519 
1520 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1521 		if (compare_with_wrap(TSN_seq,
1522 		    at->rec.data.TSN_seq, MAX_TSN)) {
1523 			/* is it one bigger? */
1524 			tsn_est = at->rec.data.TSN_seq + 1;
1525 			if (tsn_est == TSN_seq) {
1526 				/* yep. It better be a last then */
1527 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1528 				    SCTP_DATA_LAST_FRAG) {
1529 					/*
1530 					 * Ok this guy belongs next to a guy
1531 					 * that is NOT last, it should be a
1532 					 * middle/last, not a complete
1533 					 * chunk.
1534 					 */
1535 					return (1);
1536 				} else {
1537 					/*
1538 					 * This guy is ok since its a LAST
1539 					 * and the new chunk is a fully
1540 					 * self- contained one.
1541 					 */
1542 					return (0);
1543 				}
1544 			}
1545 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1546 			/* Software error since I have a dup? */
1547 			return (1);
1548 		} else {
1549 			/*
1550 			 * Ok, 'at' is larger than new chunk but does it
1551 			 * need to be right before it.
1552 			 */
1553 			tsn_est = TSN_seq + 1;
1554 			if (tsn_est == at->rec.data.TSN_seq) {
1555 				/* Yep, It better be a first */
1556 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1557 				    SCTP_DATA_FIRST_FRAG) {
1558 					return (1);
1559 				} else {
1560 					return (0);
1561 				}
1562 			}
1563 		}
1564 	}
1565 	return (0);
1566 }
1567 
1568 
1569 static int
1570 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1571     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1572     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1573     int *break_flag, int last_chunk)
1574 {
1575 	/* Process a data chunk */
1576 	/* struct sctp_tmit_chunk *chk; */
1577 	struct sctp_tmit_chunk *chk;
1578 	uint32_t tsn, gap;
1579 
1580 	/* EY - for nr_sack */
1581 	uint32_t nr_gap;
1582 	struct mbuf *dmbuf;
1583 	int indx, the_len;
1584 	int need_reasm_check = 0;
1585 	uint16_t strmno, strmseq;
1586 	struct mbuf *oper;
1587 	struct sctp_queued_to_read *control;
1588 	int ordered;
1589 	uint32_t protocol_id;
1590 	uint8_t chunk_flags;
1591 	struct sctp_stream_reset_list *liste;
1592 
1593 	chk = NULL;
1594 	tsn = ntohl(ch->dp.tsn);
1595 	chunk_flags = ch->ch.chunk_flags;
1596 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1597 		asoc->send_sack = 1;
1598 	}
1599 	protocol_id = ch->dp.protocol_id;
1600 	ordered = ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0);
1601 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1602 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1603 	}
1604 	if (stcb == NULL) {
1605 		return (0);
1606 	}
1607 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1608 	if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1609 	    asoc->cumulative_tsn == tsn) {
1610 		/* It is a duplicate */
1611 		SCTP_STAT_INCR(sctps_recvdupdata);
1612 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1613 			/* Record a dup for the next outbound sack */
1614 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1615 			asoc->numduptsns++;
1616 		}
1617 		asoc->send_sack = 1;
1618 		return (0);
1619 	}
1620 	/* Calculate the number of TSN's between the base and this TSN */
1621 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1622 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1623 		/* Can't hold the bit in the mapping at max array, toss it */
1624 		return (0);
1625 	}
1626 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1627 		SCTP_TCB_LOCK_ASSERT(stcb);
1628 		if (sctp_expand_mapping_array(asoc, gap)) {
1629 			/* Can't expand, drop it */
1630 			return (0);
1631 		}
1632 	}
1633 	/* EY - for nr_sack */
1634 	nr_gap = gap;
1635 
1636 	if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1637 		*high_tsn = tsn;
1638 	}
1639 	/* See if we have received this one already */
1640 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
1641 		SCTP_STAT_INCR(sctps_recvdupdata);
1642 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1643 			/* Record a dup for the next outbound sack */
1644 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1645 			asoc->numduptsns++;
1646 		}
1647 		asoc->send_sack = 1;
1648 		return (0);
1649 	}
1650 	/*
1651 	 * Check to see about the GONE flag, duplicates would cause a sack
1652 	 * to be sent up above
1653 	 */
1654 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1655 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1656 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1657 	    ) {
1658 		/*
1659 		 * wait a minute, this guy is gone, there is no longer a
1660 		 * receiver. Send peer an ABORT!
1661 		 */
1662 		struct mbuf *op_err;
1663 
1664 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1665 		sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1666 		*abort_flag = 1;
1667 		return (0);
1668 	}
1669 	/*
1670 	 * Now before going further we see if there is room. If NOT then we
1671 	 * MAY let one through only IF this TSN is the one we are waiting
1672 	 * for on a partial delivery API.
1673 	 */
1674 
1675 	/* now do the tests */
1676 	if (((asoc->cnt_on_all_streams +
1677 	    asoc->cnt_on_reasm_queue +
1678 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1679 	    (((int)asoc->my_rwnd) <= 0)) {
1680 		/*
1681 		 * When we have NO room in the rwnd we check to make sure
1682 		 * the reader is doing its job...
1683 		 */
1684 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1685 			/* some to read, wake-up */
1686 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1687 			struct socket *so;
1688 
1689 			so = SCTP_INP_SO(stcb->sctp_ep);
1690 			atomic_add_int(&stcb->asoc.refcnt, 1);
1691 			SCTP_TCB_UNLOCK(stcb);
1692 			SCTP_SOCKET_LOCK(so, 1);
1693 			SCTP_TCB_LOCK(stcb);
1694 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1695 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1696 				/* assoc was freed while we were unlocked */
1697 				SCTP_SOCKET_UNLOCK(so, 1);
1698 				return (0);
1699 			}
1700 #endif
1701 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1702 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1703 			SCTP_SOCKET_UNLOCK(so, 1);
1704 #endif
1705 		}
1706 		/* now is it in the mapping array of what we have accepted? */
1707 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1708 			/* Nope not in the valid range dump it */
1709 			sctp_set_rwnd(stcb, asoc);
1710 			if ((asoc->cnt_on_all_streams +
1711 			    asoc->cnt_on_reasm_queue +
1712 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1713 				SCTP_STAT_INCR(sctps_datadropchklmt);
1714 			} else {
1715 				SCTP_STAT_INCR(sctps_datadroprwnd);
1716 			}
1717 			indx = *break_flag;
1718 			*break_flag = 1;
1719 			return (0);
1720 		}
1721 	}
1722 	strmno = ntohs(ch->dp.stream_id);
1723 	if (strmno >= asoc->streamincnt) {
1724 		struct sctp_paramhdr *phdr;
1725 		struct mbuf *mb;
1726 
1727 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1728 		    0, M_DONTWAIT, 1, MT_DATA);
1729 		if (mb != NULL) {
1730 			/* add some space up front so prepend will work well */
1731 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1732 			phdr = mtod(mb, struct sctp_paramhdr *);
1733 			/*
1734 			 * Error causes are just param's and this one has
1735 			 * two back to back phdr, one with the error type
1736 			 * and size, the other with the streamid and a rsvd
1737 			 */
1738 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1739 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1740 			phdr->param_length =
1741 			    htons(sizeof(struct sctp_paramhdr) * 2);
1742 			phdr++;
1743 			/* We insert the stream in the type field */
1744 			phdr->param_type = ch->dp.stream_id;
1745 			/* And set the length to 0 for the rsvd field */
1746 			phdr->param_length = 0;
1747 			sctp_queue_op_err(stcb, mb);
1748 		}
1749 		SCTP_STAT_INCR(sctps_badsid);
1750 		SCTP_TCB_LOCK_ASSERT(stcb);
1751 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1752 		/* EY set this tsn present in  nr_sack's nr_mapping_array */
1753 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
1754 			SCTP_TCB_LOCK_ASSERT(stcb);
1755 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1756 			SCTP_REVERSE_OUT_TSN_PRES(gap, tsn, asoc);
1757 		}
1758 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1759 			/* we have a new high score */
1760 			asoc->highest_tsn_inside_map = tsn;
1761 			/* EY nr_sack version of the above */
1762 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack)
1763 				asoc->highest_tsn_inside_nr_map = tsn;
1764 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1765 				sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
1766 			}
1767 		}
1768 		if (tsn == (asoc->cumulative_tsn + 1)) {
1769 			/* Update cum-ack */
1770 			asoc->cumulative_tsn = tsn;
1771 		}
1772 		return (0);
1773 	}
1774 	/*
1775 	 * Before we continue lets validate that we are not being fooled by
1776 	 * an evil attacker. We can only have 4k chunks based on our TSN
1777 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1778 	 * way our stream sequence numbers could have wrapped. We of course
1779 	 * only validate the FIRST fragment so the bit must be set.
1780 	 */
1781 	strmseq = ntohs(ch->dp.stream_sequence);
1782 #ifdef SCTP_ASOCLOG_OF_TSNS
1783 	SCTP_TCB_LOCK_ASSERT(stcb);
1784 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1785 		asoc->tsn_in_at = 0;
1786 		asoc->tsn_in_wrapped = 1;
1787 	}
1788 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1789 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1790 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1791 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1792 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1793 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1794 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1795 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1796 	asoc->tsn_in_at++;
1797 #endif
1798 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1799 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1800 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1801 	    (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1802 	    strmseq, MAX_SEQ) ||
1803 	    asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1804 		/* The incoming sseq is behind where we last delivered? */
1805 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1806 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1807 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1808 		    0, M_DONTWAIT, 1, MT_DATA);
1809 		if (oper) {
1810 			struct sctp_paramhdr *ph;
1811 			uint32_t *ippp;
1812 
1813 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1814 			    (3 * sizeof(uint32_t));
1815 			ph = mtod(oper, struct sctp_paramhdr *);
1816 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1817 			ph->param_length = htons(SCTP_BUF_LEN(oper));
1818 			ippp = (uint32_t *) (ph + 1);
1819 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1820 			ippp++;
1821 			*ippp = tsn;
1822 			ippp++;
1823 			*ippp = ((strmno << 16) | strmseq);
1824 
1825 		}
1826 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1827 		sctp_abort_an_association(stcb->sctp_ep, stcb,
1828 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1829 		*abort_flag = 1;
1830 		return (0);
1831 	}
1832 	/************************************
1833 	 * From here down we may find ch-> invalid
1834 	 * so its a good idea NOT to use it.
1835 	 *************************************/
1836 
1837 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1838 	if (last_chunk == 0) {
1839 		dmbuf = SCTP_M_COPYM(*m,
1840 		    (offset + sizeof(struct sctp_data_chunk)),
1841 		    the_len, M_DONTWAIT);
1842 #ifdef SCTP_MBUF_LOGGING
1843 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1844 			struct mbuf *mat;
1845 
1846 			mat = dmbuf;
1847 			while (mat) {
1848 				if (SCTP_BUF_IS_EXTENDED(mat)) {
1849 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1850 				}
1851 				mat = SCTP_BUF_NEXT(mat);
1852 			}
1853 		}
1854 #endif
1855 	} else {
1856 		/* We can steal the last chunk */
1857 		int l_len;
1858 
1859 		dmbuf = *m;
1860 		/* lop off the top part */
1861 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1862 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1863 			l_len = SCTP_BUF_LEN(dmbuf);
1864 		} else {
1865 			/*
1866 			 * need to count up the size hopefully does not hit
1867 			 * this to often :-0
1868 			 */
1869 			struct mbuf *lat;
1870 
1871 			l_len = 0;
1872 			lat = dmbuf;
1873 			while (lat) {
1874 				l_len += SCTP_BUF_LEN(lat);
1875 				lat = SCTP_BUF_NEXT(lat);
1876 			}
1877 		}
1878 		if (l_len > the_len) {
1879 			/* Trim the end round bytes off  too */
1880 			m_adj(dmbuf, -(l_len - the_len));
1881 		}
1882 	}
1883 	if (dmbuf == NULL) {
1884 		SCTP_STAT_INCR(sctps_nomem);
1885 		return (0);
1886 	}
1887 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1888 	    asoc->fragmented_delivery_inprogress == 0 &&
1889 	    TAILQ_EMPTY(&asoc->resetHead) &&
1890 	    ((ordered == 0) ||
1891 	    ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1892 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1893 		/* Candidate for express delivery */
1894 		/*
1895 		 * Its not fragmented, No PD-API is up, Nothing in the
1896 		 * delivery queue, Its un-ordered OR ordered and the next to
1897 		 * deliver AND nothing else is stuck on the stream queue,
1898 		 * And there is room for it in the socket buffer. Lets just
1899 		 * stuff it up the buffer....
1900 		 */
1901 
1902 		/* It would be nice to avoid this copy if we could :< */
1903 		sctp_alloc_a_readq(stcb, control);
1904 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1905 		    protocol_id,
1906 		    stcb->asoc.context,
1907 		    strmno, strmseq,
1908 		    chunk_flags,
1909 		    dmbuf);
1910 		if (control == NULL) {
1911 			goto failed_express_del;
1912 		}
1913 		sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
1914 
1915 		/*
1916 		 * EY here I should check if this delivered tsn is
1917 		 * out_of_order, if yes then update the nr_map
1918 		 */
1919 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
1920 			/*
1921 			 * EY check if the mapping_array and nr_mapping
1922 			 * array are consistent
1923 			 */
1924 			if (asoc->mapping_array_base_tsn != asoc->nr_mapping_array_base_tsn)
1925 				/*
1926 				 * printf("EY-IN
1927 				 * sctp_process_a_data_chunk(5): Something
1928 				 * is wrong the map base tsn" "\nEY-and
1929 				 * nr_map base tsn should be equal.");
1930 				 */
1931 				/* EY debugging block */
1932 			{
1933 				/*
1934 				 * printf("\nEY-Calculating an
1935 				 * nr_gap!!\nmapping_array_size = %d
1936 				 * nr_mapping_array_size = %d"
1937 				 * "\nEY-mapping_array_base = %d
1938 				 * nr_mapping_array_base =
1939 				 * %d\nEY-highest_tsn_inside_map = %d"
1940 				 * "highest_tsn_inside_nr_map = %d\nEY-TSN =
1941 				 * %d nr_gap = %d",asoc->mapping_array_size,
1942 				 * asoc->nr_mapping_array_size,
1943 				 * asoc->mapping_array_base_tsn,
1944 				 * asoc->nr_mapping_array_base_tsn,
1945 				 * asoc->highest_tsn_inside_map,
1946 				 * asoc->highest_tsn_inside_nr_map,tsn,nr_gap
1947 				 * );
1948 				 */
1949 			}
1950 			/* EY - not %100 sure about the lock thing */
1951 			SCTP_TCB_LOCK_ASSERT(stcb);
1952 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
1953 			SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc);
1954 			if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
1955 				asoc->highest_tsn_inside_nr_map = tsn;
1956 		}
1957 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1958 			/* for ordered, bump what we delivered */
1959 			asoc->strmin[strmno].last_sequence_delivered++;
1960 		}
1961 		SCTP_STAT_INCR(sctps_recvexpress);
1962 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1963 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1964 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1965 		}
1966 		control = NULL;
1967 		goto finish_express_del;
1968 	}
1969 failed_express_del:
1970 	/* If we reach here this is a new chunk */
1971 	chk = NULL;
1972 	control = NULL;
1973 	/* Express for fragmented delivery? */
1974 	if ((asoc->fragmented_delivery_inprogress) &&
1975 	    (stcb->asoc.control_pdapi) &&
1976 	    (asoc->str_of_pdapi == strmno) &&
1977 	    (asoc->ssn_of_pdapi == strmseq)
1978 	    ) {
1979 		control = stcb->asoc.control_pdapi;
1980 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1981 			/* Can't be another first? */
1982 			goto failed_pdapi_express_del;
1983 		}
1984 		if (tsn == (control->sinfo_tsn + 1)) {
1985 			/* Yep, we can add it on */
1986 			int end = 0;
1987 			uint32_t cumack;
1988 
1989 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1990 				end = 1;
1991 			}
1992 			cumack = asoc->cumulative_tsn;
1993 			if ((cumack + 1) == tsn)
1994 				cumack = tsn;
1995 
1996 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1997 			    tsn,
1998 			    &stcb->sctp_socket->so_rcv)) {
1999 				SCTP_PRINTF("Append fails end:%d\n", end);
2000 				goto failed_pdapi_express_del;
2001 			}
2002 			/*
2003 			 * EY It is appended to the read queue in prev if
2004 			 * block here I should check if this delivered tsn
2005 			 * is out_of_order, if yes then update the nr_map
2006 			 */
2007 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2008 				/* EY debugging block */
2009 				{
2010 					/*
2011 					 * printf("\nEY-Calculating an
2012 					 * nr_gap!!\nEY-mapping_array_size =
2013 					 * %d nr_mapping_array_size = %d"
2014 					 * "\nEY-mapping_array_base = %d
2015 					 * nr_mapping_array_base =
2016 					 * %d\nEY-highest_tsn_inside_map =
2017 					 * %d" "highest_tsn_inside_nr_map =
2018 					 * %d\nEY-TSN = %d nr_gap =
2019 					 * %d",asoc->mapping_array_size,
2020 					 * asoc->nr_mapping_array_size,
2021 					 * asoc->mapping_array_base_tsn,
2022 					 * asoc->nr_mapping_array_base_tsn,
2023 					 * asoc->highest_tsn_inside_map,
2024 					 * asoc->highest_tsn_inside_nr_map,ts
2025 					 * n,nr_gap);
2026 					 */
2027 				}
2028 				/* EY - not %100 sure about the lock thing */
2029 				SCTP_TCB_LOCK_ASSERT(stcb);
2030 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
2031 				SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc);
2032 				if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
2033 					asoc->highest_tsn_inside_nr_map = tsn;
2034 			}
2035 			SCTP_STAT_INCR(sctps_recvexpressm);
2036 			control->sinfo_tsn = tsn;
2037 			asoc->tsn_last_delivered = tsn;
2038 			asoc->fragment_flags = chunk_flags;
2039 			asoc->tsn_of_pdapi_last_delivered = tsn;
2040 			asoc->last_flags_delivered = chunk_flags;
2041 			asoc->last_strm_seq_delivered = strmseq;
2042 			asoc->last_strm_no_delivered = strmno;
2043 			if (end) {
2044 				/* clean up the flags and such */
2045 				asoc->fragmented_delivery_inprogress = 0;
2046 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2047 					asoc->strmin[strmno].last_sequence_delivered++;
2048 				}
2049 				stcb->asoc.control_pdapi = NULL;
2050 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
2051 					/*
2052 					 * There could be another message
2053 					 * ready
2054 					 */
2055 					need_reasm_check = 1;
2056 				}
2057 			}
2058 			control = NULL;
2059 			goto finish_express_del;
2060 		}
2061 	}
2062 failed_pdapi_express_del:
2063 	control = NULL;
2064 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2065 		sctp_alloc_a_chunk(stcb, chk);
2066 		if (chk == NULL) {
2067 			/* No memory so we drop the chunk */
2068 			SCTP_STAT_INCR(sctps_nomem);
2069 			if (last_chunk == 0) {
2070 				/* we copied it, free the copy */
2071 				sctp_m_freem(dmbuf);
2072 			}
2073 			return (0);
2074 		}
2075 		chk->rec.data.TSN_seq = tsn;
2076 		chk->no_fr_allowed = 0;
2077 		chk->rec.data.stream_seq = strmseq;
2078 		chk->rec.data.stream_number = strmno;
2079 		chk->rec.data.payloadtype = protocol_id;
2080 		chk->rec.data.context = stcb->asoc.context;
2081 		chk->rec.data.doing_fast_retransmit = 0;
2082 		chk->rec.data.rcv_flags = chunk_flags;
2083 		chk->asoc = asoc;
2084 		chk->send_size = the_len;
2085 		chk->whoTo = net;
2086 		atomic_add_int(&net->ref_count, 1);
2087 		chk->data = dmbuf;
2088 	} else {
2089 		sctp_alloc_a_readq(stcb, control);
2090 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2091 		    protocol_id,
2092 		    stcb->asoc.context,
2093 		    strmno, strmseq,
2094 		    chunk_flags,
2095 		    dmbuf);
2096 		if (control == NULL) {
2097 			/* No memory so we drop the chunk */
2098 			SCTP_STAT_INCR(sctps_nomem);
2099 			if (last_chunk == 0) {
2100 				/* we copied it, free the copy */
2101 				sctp_m_freem(dmbuf);
2102 			}
2103 			return (0);
2104 		}
2105 		control->length = the_len;
2106 	}
2107 
2108 	/* Mark it as received */
2109 	/* Now queue it where it belongs */
2110 	if (control != NULL) {
2111 		/* First a sanity check */
2112 		if (asoc->fragmented_delivery_inprogress) {
2113 			/*
2114 			 * Ok, we have a fragmented delivery in progress if
2115 			 * this chunk is next to deliver OR belongs in our
2116 			 * view to the reassembly, the peer is evil or
2117 			 * broken.
2118 			 */
2119 			uint32_t estimate_tsn;
2120 
2121 			estimate_tsn = asoc->tsn_last_delivered + 1;
2122 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
2123 			    (estimate_tsn == control->sinfo_tsn)) {
2124 				/* Evil/Broke peer */
2125 				sctp_m_freem(control->data);
2126 				control->data = NULL;
2127 				if (control->whoFrom) {
2128 					sctp_free_remote_addr(control->whoFrom);
2129 					control->whoFrom = NULL;
2130 				}
2131 				sctp_free_a_readq(stcb, control);
2132 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2133 				    0, M_DONTWAIT, 1, MT_DATA);
2134 				if (oper) {
2135 					struct sctp_paramhdr *ph;
2136 					uint32_t *ippp;
2137 
2138 					SCTP_BUF_LEN(oper) =
2139 					    sizeof(struct sctp_paramhdr) +
2140 					    (3 * sizeof(uint32_t));
2141 					ph = mtod(oper, struct sctp_paramhdr *);
2142 					ph->param_type =
2143 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2144 					ph->param_length = htons(SCTP_BUF_LEN(oper));
2145 					ippp = (uint32_t *) (ph + 1);
2146 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
2147 					ippp++;
2148 					*ippp = tsn;
2149 					ippp++;
2150 					*ippp = ((strmno << 16) | strmseq);
2151 				}
2152 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
2153 				sctp_abort_an_association(stcb->sctp_ep, stcb,
2154 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2155 
2156 				*abort_flag = 1;
2157 				return (0);
2158 			} else {
2159 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2160 					sctp_m_freem(control->data);
2161 					control->data = NULL;
2162 					if (control->whoFrom) {
2163 						sctp_free_remote_addr(control->whoFrom);
2164 						control->whoFrom = NULL;
2165 					}
2166 					sctp_free_a_readq(stcb, control);
2167 
2168 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2169 					    0, M_DONTWAIT, 1, MT_DATA);
2170 					if (oper) {
2171 						struct sctp_paramhdr *ph;
2172 						uint32_t *ippp;
2173 
2174 						SCTP_BUF_LEN(oper) =
2175 						    sizeof(struct sctp_paramhdr) +
2176 						    (3 * sizeof(uint32_t));
2177 						ph = mtod(oper,
2178 						    struct sctp_paramhdr *);
2179 						ph->param_type =
2180 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2181 						ph->param_length =
2182 						    htons(SCTP_BUF_LEN(oper));
2183 						ippp = (uint32_t *) (ph + 1);
2184 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
2185 						ippp++;
2186 						*ippp = tsn;
2187 						ippp++;
2188 						*ippp = ((strmno << 16) | strmseq);
2189 					}
2190 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2191 					sctp_abort_an_association(stcb->sctp_ep,
2192 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2193 
2194 					*abort_flag = 1;
2195 					return (0);
2196 				}
2197 			}
2198 		} else {
2199 			/* No PDAPI running */
2200 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2201 				/*
2202 				 * Reassembly queue is NOT empty validate
2203 				 * that this tsn does not need to be in
2204 				 * reasembly queue. If it does then our peer
2205 				 * is broken or evil.
2206 				 */
2207 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2208 					sctp_m_freem(control->data);
2209 					control->data = NULL;
2210 					if (control->whoFrom) {
2211 						sctp_free_remote_addr(control->whoFrom);
2212 						control->whoFrom = NULL;
2213 					}
2214 					sctp_free_a_readq(stcb, control);
2215 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2216 					    0, M_DONTWAIT, 1, MT_DATA);
2217 					if (oper) {
2218 						struct sctp_paramhdr *ph;
2219 						uint32_t *ippp;
2220 
2221 						SCTP_BUF_LEN(oper) =
2222 						    sizeof(struct sctp_paramhdr) +
2223 						    (3 * sizeof(uint32_t));
2224 						ph = mtod(oper,
2225 						    struct sctp_paramhdr *);
2226 						ph->param_type =
2227 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2228 						ph->param_length =
2229 						    htons(SCTP_BUF_LEN(oper));
2230 						ippp = (uint32_t *) (ph + 1);
2231 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2232 						ippp++;
2233 						*ippp = tsn;
2234 						ippp++;
2235 						*ippp = ((strmno << 16) | strmseq);
2236 					}
2237 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2238 					sctp_abort_an_association(stcb->sctp_ep,
2239 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2240 
2241 					*abort_flag = 1;
2242 					return (0);
2243 				}
2244 			}
2245 		}
2246 		/* ok, if we reach here we have passed the sanity checks */
2247 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2248 			/* queue directly into socket buffer */
2249 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2250 			    control,
2251 			    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
2252 
2253 			/*
2254 			 * EY It is added to the read queue in prev if block
2255 			 * here I should check if this delivered tsn is
2256 			 * out_of_order, if yes then update the nr_map
2257 			 */
2258 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2259 				/*
2260 				 * EY check if the mapping_array and
2261 				 * nr_mapping array are consistent
2262 				 */
2263 				if (asoc->mapping_array_base_tsn != asoc->nr_mapping_array_base_tsn)
2264 					/*
2265 					 * printf("EY-IN
2266 					 * sctp_process_a_data_chunk(6):
2267 					 * Something is wrong the map base
2268 					 * tsn" "\nEY-and nr_map base tsn
2269 					 * should be equal.");
2270 					 */
2271 					/*
2272 					 * EY - not %100 sure about the lock
2273 					 * thing, i think we don't need the
2274 					 * below,
2275 					 */
2276 					/* SCTP_TCB_LOCK_ASSERT(stcb); */
2277 				{
2278 					/*
2279 					 * printf("\nEY-Calculating an
2280 					 * nr_gap!!\nEY-mapping_array_size =
2281 					 * %d nr_mapping_array_size = %d"
2282 					 * "\nEY-mapping_array_base = %d
2283 					 * nr_mapping_array_base =
2284 					 * %d\nEY-highest_tsn_inside_map =
2285 					 * %d" "highest_tsn_inside_nr_map =
2286 					 * %d\nEY-TSN = %d nr_gap =
2287 					 * %d",asoc->mapping_array_size,
2288 					 * asoc->nr_mapping_array_size,
2289 					 * asoc->mapping_array_base_tsn,
2290 					 * asoc->nr_mapping_array_base_tsn,
2291 					 * asoc->highest_tsn_inside_map,
2292 					 * asoc->highest_tsn_inside_nr_map,ts
2293 					 * n,nr_gap);
2294 					 */
2295 				}
2296 				SCTP_TCB_LOCK_ASSERT(stcb);
2297 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
2298 				SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc);
2299 				if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
2300 					asoc->highest_tsn_inside_nr_map = tsn;
2301 			}
2302 		} else {
2303 			/*
2304 			 * Special check for when streams are resetting. We
2305 			 * could be more smart about this and check the
2306 			 * actual stream to see if it is not being reset..
2307 			 * that way we would not create a HOLB when amongst
2308 			 * streams being reset and those not being reset.
2309 			 *
2310 			 * We take complete messages that have a stream reset
2311 			 * intervening (aka the TSN is after where our
2312 			 * cum-ack needs to be) off and put them on a
2313 			 * pending_reply_queue. The reassembly ones we do
2314 			 * not have to worry about since they are all sorted
2315 			 * and proceessed by TSN order. It is only the
2316 			 * singletons I must worry about.
2317 			 */
2318 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2319 			    ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)))
2320 			    ) {
2321 				/*
2322 				 * yep its past where we need to reset... go
2323 				 * ahead and queue it.
2324 				 */
2325 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2326 					/* first one on */
2327 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2328 				} else {
2329 					struct sctp_queued_to_read *ctlOn;
2330 					unsigned char inserted = 0;
2331 
2332 					ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2333 					while (ctlOn) {
2334 						if (compare_with_wrap(control->sinfo_tsn,
2335 						    ctlOn->sinfo_tsn, MAX_TSN)) {
2336 							ctlOn = TAILQ_NEXT(ctlOn, next);
2337 						} else {
2338 							/* found it */
2339 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2340 							inserted = 1;
2341 							break;
2342 						}
2343 					}
2344 					if (inserted == 0) {
2345 						/*
2346 						 * must be put at end, use
2347 						 * prevP (all setup from
2348 						 * loop) to setup nextP.
2349 						 */
2350 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2351 					}
2352 				}
2353 			} else {
2354 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2355 				if (*abort_flag) {
2356 					return (0);
2357 				}
2358 			}
2359 		}
2360 	} else {
2361 		/* Into the re-assembly queue */
2362 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2363 		if (*abort_flag) {
2364 			/*
2365 			 * the assoc is now gone and chk was put onto the
2366 			 * reasm queue, which has all been freed.
2367 			 */
2368 			*m = NULL;
2369 			return (0);
2370 		}
2371 	}
2372 finish_express_del:
2373 	if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
2374 		/* we have a new high score */
2375 		asoc->highest_tsn_inside_map = tsn;
2376 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2377 			sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2378 		}
2379 	}
2380 	if (tsn == (asoc->cumulative_tsn + 1)) {
2381 		/* Update cum-ack */
2382 		asoc->cumulative_tsn = tsn;
2383 	}
2384 	if (last_chunk) {
2385 		*m = NULL;
2386 	}
2387 	if (ordered) {
2388 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2389 	} else {
2390 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2391 	}
2392 	SCTP_STAT_INCR(sctps_recvdata);
2393 	/* Set it present please */
2394 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2395 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2396 	}
2397 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2398 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2399 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2400 	}
2401 	SCTP_TCB_LOCK_ASSERT(stcb);
2402 	SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2403 
2404 	if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
2405 	    asoc->peer_supports_nr_sack &&
2406 	    (SCTP_BASE_SYSCTL(sctp_do_drain) == 0)) {
2407 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2408 		SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc);
2409 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
2410 			asoc->highest_tsn_inside_nr_map = tsn;
2411 		}
2412 	}
2413 	/* check the special flag for stream resets */
2414 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2415 	    ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2416 	    (asoc->cumulative_tsn == liste->tsn))
2417 	    ) {
2418 		/*
2419 		 * we have finished working through the backlogged TSN's now
2420 		 * time to reset streams. 1: call reset function. 2: free
2421 		 * pending_reply space 3: distribute any chunks in
2422 		 * pending_reply_queue.
2423 		 */
2424 		struct sctp_queued_to_read *ctl;
2425 
2426 		sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2427 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2428 		SCTP_FREE(liste, SCTP_M_STRESET);
2429 		/* sa_ignore FREED_MEMORY */
2430 		liste = TAILQ_FIRST(&asoc->resetHead);
2431 		ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2432 		if (ctl && (liste == NULL)) {
2433 			/* All can be removed */
2434 			while (ctl) {
2435 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2436 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2437 				if (*abort_flag) {
2438 					return (0);
2439 				}
2440 				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2441 			}
2442 		} else if (ctl) {
2443 			/* more than one in queue */
2444 			while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2445 				/*
2446 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2447 				 * process it which is the NOT of
2448 				 * ctl->sinfo_tsn > liste->tsn
2449 				 */
2450 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2451 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2452 				if (*abort_flag) {
2453 					return (0);
2454 				}
2455 				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2456 			}
2457 		}
2458 		/*
2459 		 * Now service re-assembly to pick up anything that has been
2460 		 * held on reassembly queue?
2461 		 */
2462 		sctp_deliver_reasm_check(stcb, asoc);
2463 		need_reasm_check = 0;
2464 	}
2465 	if (need_reasm_check) {
2466 		/* Another one waits ? */
2467 		sctp_deliver_reasm_check(stcb, asoc);
2468 	}
2469 	return (1);
2470 }
2471 
2472 int8_t sctp_map_lookup_tab[256] = {
2473 	-1, 0, -1, 1, -1, 0, -1, 2,
2474 	-1, 0, -1, 1, -1, 0, -1, 3,
2475 	-1, 0, -1, 1, -1, 0, -1, 2,
2476 	-1, 0, -1, 1, -1, 0, -1, 4,
2477 	-1, 0, -1, 1, -1, 0, -1, 2,
2478 	-1, 0, -1, 1, -1, 0, -1, 3,
2479 	-1, 0, -1, 1, -1, 0, -1, 2,
2480 	-1, 0, -1, 1, -1, 0, -1, 5,
2481 	-1, 0, -1, 1, -1, 0, -1, 2,
2482 	-1, 0, -1, 1, -1, 0, -1, 3,
2483 	-1, 0, -1, 1, -1, 0, -1, 2,
2484 	-1, 0, -1, 1, -1, 0, -1, 4,
2485 	-1, 0, -1, 1, -1, 0, -1, 2,
2486 	-1, 0, -1, 1, -1, 0, -1, 3,
2487 	-1, 0, -1, 1, -1, 0, -1, 2,
2488 	-1, 0, -1, 1, -1, 0, -1, 6,
2489 	-1, 0, -1, 1, -1, 0, -1, 2,
2490 	-1, 0, -1, 1, -1, 0, -1, 3,
2491 	-1, 0, -1, 1, -1, 0, -1, 2,
2492 	-1, 0, -1, 1, -1, 0, -1, 4,
2493 	-1, 0, -1, 1, -1, 0, -1, 2,
2494 	-1, 0, -1, 1, -1, 0, -1, 3,
2495 	-1, 0, -1, 1, -1, 0, -1, 2,
2496 	-1, 0, -1, 1, -1, 0, -1, 5,
2497 	-1, 0, -1, 1, -1, 0, -1, 2,
2498 	-1, 0, -1, 1, -1, 0, -1, 3,
2499 	-1, 0, -1, 1, -1, 0, -1, 2,
2500 	-1, 0, -1, 1, -1, 0, -1, 4,
2501 	-1, 0, -1, 1, -1, 0, -1, 2,
2502 	-1, 0, -1, 1, -1, 0, -1, 3,
2503 	-1, 0, -1, 1, -1, 0, -1, 2,
2504 	-1, 0, -1, 1, -1, 0, -1, 7,
2505 };
2506 
2507 
2508 void
2509 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
2510 {
2511 	/*
2512 	 * Now we also need to check the mapping array in a couple of ways.
2513 	 * 1) Did we move the cum-ack point?
2514 	 */
2515 	struct sctp_association *asoc;
2516 	int at;
2517 	int last_all_ones = 0;
2518 	int slide_from, slide_end, lgap, distance;
2519 
2520 	/* EY nr_mapping array variables */
2521 	/* int nr_at; */
2522 	/* int nr_last_all_ones = 0; */
2523 	/* int nr_slide_from, nr_slide_end, nr_lgap, nr_distance; */
2524 
2525 	uint32_t old_cumack, old_base, old_highest;
2526 	unsigned char aux_array[64];
2527 
2528 	/*
2529 	 * EY! Don't think this is required but I am immitating the code for
2530 	 * map just to make sure
2531 	 */
2532 	unsigned char nr_aux_array[64];
2533 
2534 	asoc = &stcb->asoc;
2535 	at = 0;
2536 
2537 	old_cumack = asoc->cumulative_tsn;
2538 	old_base = asoc->mapping_array_base_tsn;
2539 	old_highest = asoc->highest_tsn_inside_map;
2540 	if (asoc->mapping_array_size < 64)
2541 		memcpy(aux_array, asoc->mapping_array,
2542 		    asoc->mapping_array_size);
2543 	else
2544 		memcpy(aux_array, asoc->mapping_array, 64);
2545 	/* EY do the same for nr_mapping_array */
2546 	if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2547 
2548 		if (asoc->nr_mapping_array_size != asoc->mapping_array_size) {
2549 			/*
2550 			 * printf("\nEY-IN sack_check method: \nEY-" "The
2551 			 * size of map and nr_map are inconsitent")
2552 			 */ ;
2553 		}
2554 		if (asoc->nr_mapping_array_base_tsn != asoc->mapping_array_base_tsn) {
2555 			/*
2556 			 * printf("\nEY-IN sack_check method VERY CRUCIAL
2557 			 * error: \nEY-" "The base tsns of map and nr_map
2558 			 * are inconsitent")
2559 			 */ ;
2560 		}
2561 		/* EY! just immitating the above code */
2562 		if (asoc->nr_mapping_array_size < 64)
2563 			memcpy(nr_aux_array, asoc->nr_mapping_array,
2564 			    asoc->nr_mapping_array_size);
2565 		else
2566 			memcpy(aux_array, asoc->nr_mapping_array, 64);
2567 	}
2568 	/*
2569 	 * We could probably improve this a small bit by calculating the
2570 	 * offset of the current cum-ack as the starting point.
2571 	 */
2572 	at = 0;
2573 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2574 
2575 		if (asoc->mapping_array[slide_from] == 0xff) {
2576 			at += 8;
2577 			last_all_ones = 1;
2578 		} else {
2579 			/* there is a 0 bit */
2580 			at += sctp_map_lookup_tab[asoc->mapping_array[slide_from]];
2581 			last_all_ones = 0;
2582 			break;
2583 		}
2584 	}
2585 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - last_all_ones);
2586 	/* at is one off, since in the table a embedded -1 is present */
2587 	at++;
2588 
2589 	if (compare_with_wrap(asoc->cumulative_tsn,
2590 	    asoc->highest_tsn_inside_map,
2591 	    MAX_TSN)) {
2592 #ifdef INVARIANTS
2593 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2594 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2595 #else
2596 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2597 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2598 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2599 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2600 		}
2601 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2602 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2603 #endif
2604 	}
2605 	if ((asoc->cumulative_tsn == asoc->highest_tsn_inside_map) && (at >= 8)) {
2606 		/* The complete array was completed by a single FR */
2607 		/* higest becomes the cum-ack */
2608 		int clr;
2609 
2610 		asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
2611 		/* clear the array */
2612 		clr = (at >> 3) + 1;
2613 		if (clr > asoc->mapping_array_size) {
2614 			clr = asoc->mapping_array_size;
2615 		}
2616 		memset(asoc->mapping_array, 0, clr);
2617 		/* base becomes one ahead of the cum-ack */
2618 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2619 
2620 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2621 
2622 			if (clr > asoc->nr_mapping_array_size)
2623 				clr = asoc->nr_mapping_array_size;
2624 
2625 			memset(asoc->nr_mapping_array, 0, clr);
2626 			/* base becomes one ahead of the cum-ack */
2627 			asoc->nr_mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2628 			asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2629 		}
2630 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2631 			sctp_log_map(old_base, old_cumack, old_highest,
2632 			    SCTP_MAP_PREPARE_SLIDE);
2633 			sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2634 			    asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
2635 		}
2636 	} else if (at >= 8) {
2637 		/* we can slide the mapping array down */
2638 		/* slide_from holds where we hit the first NON 0xff byte */
2639 
2640 		/*
2641 		 * now calculate the ceiling of the move using our highest
2642 		 * TSN value
2643 		 */
2644 		if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
2645 			lgap = asoc->highest_tsn_inside_map -
2646 			    asoc->mapping_array_base_tsn;
2647 		} else {
2648 			lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
2649 			    asoc->highest_tsn_inside_map + 1;
2650 		}
2651 		slide_end = lgap >> 3;
2652 		if (slide_end < slide_from) {
2653 #ifdef INVARIANTS
2654 			panic("impossible slide");
2655 #else
2656 			printf("impossible slide?\n");
2657 			return;
2658 #endif
2659 		}
2660 		if (slide_end > asoc->mapping_array_size) {
2661 #ifdef INVARIANTS
2662 			panic("would overrun buffer");
2663 #else
2664 			printf("Gak, would have overrun map end:%d slide_end:%d\n",
2665 			    asoc->mapping_array_size, slide_end);
2666 			slide_end = asoc->mapping_array_size;
2667 #endif
2668 		}
2669 		distance = (slide_end - slide_from) + 1;
2670 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2671 			sctp_log_map(old_base, old_cumack, old_highest,
2672 			    SCTP_MAP_PREPARE_SLIDE);
2673 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2674 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2675 		}
2676 		if (distance + slide_from > asoc->mapping_array_size ||
2677 		    distance < 0) {
2678 			/*
2679 			 * Here we do NOT slide forward the array so that
2680 			 * hopefully when more data comes in to fill it up
2681 			 * we will be able to slide it forward. Really I
2682 			 * don't think this should happen :-0
2683 			 */
2684 
2685 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2686 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2687 				    (uint32_t) asoc->mapping_array_size,
2688 				    SCTP_MAP_SLIDE_NONE);
2689 			}
2690 		} else {
2691 			int ii;
2692 
2693 			for (ii = 0; ii < distance; ii++) {
2694 				asoc->mapping_array[ii] =
2695 				    asoc->mapping_array[slide_from + ii];
2696 			}
2697 			for (ii = distance; ii <= slide_end; ii++) {
2698 				asoc->mapping_array[ii] = 0;
2699 			}
2700 			asoc->mapping_array_base_tsn += (slide_from << 3);
2701 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2702 				sctp_log_map(asoc->mapping_array_base_tsn,
2703 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2704 				    SCTP_MAP_SLIDE_RESULT);
2705 			}
2706 			/*
2707 			 * EY if doing nr_sacks then slide the
2708 			 * nr_mapping_array accordingly please
2709 			 */
2710 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2711 				for (ii = 0; ii < distance; ii++) {
2712 					asoc->nr_mapping_array[ii] =
2713 					    asoc->nr_mapping_array[slide_from + ii];
2714 				}
2715 				for (ii = distance; ii <= slide_end; ii++) {
2716 					asoc->nr_mapping_array[ii] = 0;
2717 				}
2718 				asoc->nr_mapping_array_base_tsn += (slide_from << 3);
2719 			}
2720 		}
2721 	}
2722 	/*
2723 	 * Now we need to see if we need to queue a sack or just start the
2724 	 * timer (if allowed).
2725 	 */
2726 	if (ok_to_sack) {
2727 		if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2728 			/*
2729 			 * Ok special case, in SHUTDOWN-SENT case. here we
2730 			 * maker sure SACK timer is off and instead send a
2731 			 * SHUTDOWN and a SACK
2732 			 */
2733 			if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2734 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2735 				    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2736 			}
2737 			sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2738 			/*
2739 			 * EY if nr_sacks used then send an nr-sack , a sack
2740 			 * otherwise
2741 			 */
2742 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
2743 				sctp_send_nr_sack(stcb);
2744 			else
2745 				sctp_send_sack(stcb);
2746 		} else {
2747 			int is_a_gap;
2748 
2749 			/* is there a gap now ? */
2750 			is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2751 			    stcb->asoc.cumulative_tsn, MAX_TSN);
2752 
2753 			/*
2754 			 * CMT DAC algorithm: increase number of packets
2755 			 * received since last ack
2756 			 */
2757 			stcb->asoc.cmt_dac_pkts_rcvd++;
2758 
2759 			if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2760 								 * SACK */
2761 			    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2762 								 * longer is one */
2763 			    (stcb->asoc.numduptsns) ||	/* we have dup's */
2764 			    (is_a_gap) ||	/* is still a gap */
2765 			    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2766 			    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2767 			    ) {
2768 
2769 				if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off)) &&
2770 				    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2771 				    (stcb->asoc.send_sack == 0) &&
2772 				    (stcb->asoc.numduptsns == 0) &&
2773 				    (stcb->asoc.delayed_ack) &&
2774 				    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2775 
2776 					/*
2777 					 * CMT DAC algorithm: With CMT,
2778 					 * delay acks even in the face of
2779 					 *
2780 					 * reordering. Therefore, if acks that
2781 					 * do not have to be sent because of
2782 					 * the above reasons, will be
2783 					 * delayed. That is, acks that would
2784 					 * have been sent due to gap reports
2785 					 * will be delayed with DAC. Start
2786 					 * the delayed ack timer.
2787 					 */
2788 					sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2789 					    stcb->sctp_ep, stcb, NULL);
2790 				} else {
2791 					/*
2792 					 * Ok we must build a SACK since the
2793 					 * timer is pending, we got our
2794 					 * first packet OR there are gaps or
2795 					 * duplicates.
2796 					 */
2797 					(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2798 					/*
2799 					 * EY if nr_sacks used then send an
2800 					 * nr-sack , a sack otherwise
2801 					 */
2802 					if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
2803 						sctp_send_nr_sack(stcb);
2804 					else
2805 						sctp_send_sack(stcb);
2806 				}
2807 			} else {
2808 				if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2809 					sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2810 					    stcb->sctp_ep, stcb, NULL);
2811 				}
2812 			}
2813 		}
2814 	}
2815 }
2816 
2817 void
2818 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2819 {
2820 	struct sctp_tmit_chunk *chk;
2821 	uint32_t tsize;
2822 	uint16_t nxt_todel;
2823 
2824 	if (asoc->fragmented_delivery_inprogress) {
2825 		sctp_service_reassembly(stcb, asoc);
2826 	}
2827 	/* Can we proceed further, i.e. the PD-API is complete */
2828 	if (asoc->fragmented_delivery_inprogress) {
2829 		/* no */
2830 		return;
2831 	}
2832 	/*
2833 	 * Now is there some other chunk I can deliver from the reassembly
2834 	 * queue.
2835 	 */
2836 doit_again:
2837 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2838 	if (chk == NULL) {
2839 		asoc->size_on_reasm_queue = 0;
2840 		asoc->cnt_on_reasm_queue = 0;
2841 		return;
2842 	}
2843 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2844 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2845 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2846 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2847 		/*
2848 		 * Yep the first one is here. We setup to start reception,
2849 		 * by backing down the TSN just in case we can't deliver.
2850 		 */
2851 
2852 		/*
2853 		 * Before we start though either all of the message should
2854 		 * be here or 1/4 the socket buffer max or nothing on the
2855 		 * delivery queue and something can be delivered.
2856 		 */
2857 		if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
2858 		    (tsize >= stcb->sctp_ep->partial_delivery_point))) {
2859 			asoc->fragmented_delivery_inprogress = 1;
2860 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2861 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2862 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2863 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2864 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2865 			sctp_service_reassembly(stcb, asoc);
2866 			if (asoc->fragmented_delivery_inprogress == 0) {
2867 				goto doit_again;
2868 			}
2869 		}
2870 	}
2871 }
2872 
2873 int
2874 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2875     struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2876     struct sctp_nets *net, uint32_t * high_tsn)
2877 {
2878 	struct sctp_data_chunk *ch, chunk_buf;
2879 	struct sctp_association *asoc;
2880 	int num_chunks = 0;	/* number of control chunks processed */
2881 	int stop_proc = 0;
2882 	int chk_length, break_flag, last_chunk;
2883 	int abort_flag = 0, was_a_gap = 0;
2884 	struct mbuf *m;
2885 
2886 	/* set the rwnd */
2887 	sctp_set_rwnd(stcb, &stcb->asoc);
2888 
2889 	m = *mm;
2890 	SCTP_TCB_LOCK_ASSERT(stcb);
2891 	asoc = &stcb->asoc;
2892 	if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2893 	    stcb->asoc.cumulative_tsn, MAX_TSN)) {
2894 		/* there was a gap before this data was processed */
2895 		was_a_gap = 1;
2896 	}
2897 	/*
2898 	 * setup where we got the last DATA packet from for any SACK that
2899 	 * may need to go out. Don't bump the net. This is done ONLY when a
2900 	 * chunk is assigned.
2901 	 */
2902 	asoc->last_data_chunk_from = net;
2903 
2904 	/*-
2905 	 * Now before we proceed we must figure out if this is a wasted
2906 	 * cluster... i.e. it is a small packet sent in and yet the driver
2907 	 * underneath allocated a full cluster for it. If so we must copy it
2908 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2909 	 * with cluster starvation. Note for __Panda__ we don't do this
2910 	 * since it has clusters all the way down to 64 bytes.
2911 	 */
2912 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2913 		/* we only handle mbufs that are singletons.. not chains */
2914 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2915 		if (m) {
2916 			/* ok lets see if we can copy the data up */
2917 			caddr_t *from, *to;
2918 
2919 			/* get the pointers and copy */
2920 			to = mtod(m, caddr_t *);
2921 			from = mtod((*mm), caddr_t *);
2922 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2923 			/* copy the length and free up the old */
2924 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2925 			sctp_m_freem(*mm);
2926 			/* sucess, back copy */
2927 			*mm = m;
2928 		} else {
2929 			/* We are in trouble in the mbuf world .. yikes */
2930 			m = *mm;
2931 		}
2932 	}
2933 	/* get pointer to the first chunk header */
2934 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2935 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2936 	if (ch == NULL) {
2937 		return (1);
2938 	}
2939 	/*
2940 	 * process all DATA chunks...
2941 	 */
2942 	*high_tsn = asoc->cumulative_tsn;
2943 	break_flag = 0;
2944 	asoc->data_pkts_seen++;
2945 	while (stop_proc == 0) {
2946 		/* validate chunk length */
2947 		chk_length = ntohs(ch->ch.chunk_length);
2948 		if (length - *offset < chk_length) {
2949 			/* all done, mutulated chunk */
2950 			stop_proc = 1;
2951 			break;
2952 		}
2953 		if (ch->ch.chunk_type == SCTP_DATA) {
2954 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2955 				/*
2956 				 * Need to send an abort since we had a
2957 				 * invalid data chunk.
2958 				 */
2959 				struct mbuf *op_err;
2960 
2961 				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2962 				    0, M_DONTWAIT, 1, MT_DATA);
2963 
2964 				if (op_err) {
2965 					struct sctp_paramhdr *ph;
2966 					uint32_t *ippp;
2967 
2968 					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2969 					    (2 * sizeof(uint32_t));
2970 					ph = mtod(op_err, struct sctp_paramhdr *);
2971 					ph->param_type =
2972 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2973 					ph->param_length = htons(SCTP_BUF_LEN(op_err));
2974 					ippp = (uint32_t *) (ph + 1);
2975 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2976 					ippp++;
2977 					*ippp = asoc->cumulative_tsn;
2978 
2979 				}
2980 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2981 				sctp_abort_association(inp, stcb, m, iphlen, sh,
2982 				    op_err, 0, net->port);
2983 				return (2);
2984 			}
2985 #ifdef SCTP_AUDITING_ENABLED
2986 			sctp_audit_log(0xB1, 0);
2987 #endif
2988 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2989 				last_chunk = 1;
2990 			} else {
2991 				last_chunk = 0;
2992 			}
2993 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2994 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2995 			    last_chunk)) {
2996 				num_chunks++;
2997 			}
2998 			if (abort_flag)
2999 				return (2);
3000 
3001 			if (break_flag) {
3002 				/*
3003 				 * Set because of out of rwnd space and no
3004 				 * drop rep space left.
3005 				 */
3006 				stop_proc = 1;
3007 				break;
3008 			}
3009 		} else {
3010 			/* not a data chunk in the data region */
3011 			switch (ch->ch.chunk_type) {
3012 			case SCTP_INITIATION:
3013 			case SCTP_INITIATION_ACK:
3014 			case SCTP_SELECTIVE_ACK:
3015 			case SCTP_NR_SELECTIVE_ACK:	/* EY */
3016 			case SCTP_HEARTBEAT_REQUEST:
3017 			case SCTP_HEARTBEAT_ACK:
3018 			case SCTP_ABORT_ASSOCIATION:
3019 			case SCTP_SHUTDOWN:
3020 			case SCTP_SHUTDOWN_ACK:
3021 			case SCTP_OPERATION_ERROR:
3022 			case SCTP_COOKIE_ECHO:
3023 			case SCTP_COOKIE_ACK:
3024 			case SCTP_ECN_ECHO:
3025 			case SCTP_ECN_CWR:
3026 			case SCTP_SHUTDOWN_COMPLETE:
3027 			case SCTP_AUTHENTICATION:
3028 			case SCTP_ASCONF_ACK:
3029 			case SCTP_PACKET_DROPPED:
3030 			case SCTP_STREAM_RESET:
3031 			case SCTP_FORWARD_CUM_TSN:
3032 			case SCTP_ASCONF:
3033 				/*
3034 				 * Now, what do we do with KNOWN chunks that
3035 				 * are NOT in the right place?
3036 				 *
3037 				 * For now, I do nothing but ignore them. We
3038 				 * may later want to add sysctl stuff to
3039 				 * switch out and do either an ABORT() or
3040 				 * possibly process them.
3041 				 */
3042 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
3043 					struct mbuf *op_err;
3044 
3045 					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
3046 					sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
3047 					return (2);
3048 				}
3049 				break;
3050 			default:
3051 				/* unknown chunk type, use bit rules */
3052 				if (ch->ch.chunk_type & 0x40) {
3053 					/* Add a error report to the queue */
3054 					struct mbuf *merr;
3055 					struct sctp_paramhdr *phd;
3056 
3057 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
3058 					if (merr) {
3059 						phd = mtod(merr, struct sctp_paramhdr *);
3060 						/*
3061 						 * We cheat and use param
3062 						 * type since we did not
3063 						 * bother to define a error
3064 						 * cause struct. They are
3065 						 * the same basic format
3066 						 * with different names.
3067 						 */
3068 						phd->param_type =
3069 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
3070 						phd->param_length =
3071 						    htons(chk_length + sizeof(*phd));
3072 						SCTP_BUF_LEN(merr) = sizeof(*phd);
3073 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
3074 						    SCTP_SIZE32(chk_length),
3075 						    M_DONTWAIT);
3076 						if (SCTP_BUF_NEXT(merr)) {
3077 							sctp_queue_op_err(stcb, merr);
3078 						} else {
3079 							sctp_m_freem(merr);
3080 						}
3081 					}
3082 				}
3083 				if ((ch->ch.chunk_type & 0x80) == 0) {
3084 					/* discard the rest of this packet */
3085 					stop_proc = 1;
3086 				}	/* else skip this bad chunk and
3087 					 * continue... */
3088 				break;
3089 			};	/* switch of chunk type */
3090 		}
3091 		*offset += SCTP_SIZE32(chk_length);
3092 		if ((*offset >= length) || stop_proc) {
3093 			/* no more data left in the mbuf chain */
3094 			stop_proc = 1;
3095 			continue;
3096 		}
3097 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
3098 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
3099 		if (ch == NULL) {
3100 			*offset = length;
3101 			stop_proc = 1;
3102 			break;
3103 
3104 		}
3105 	}			/* while */
3106 	if (break_flag) {
3107 		/*
3108 		 * we need to report rwnd overrun drops.
3109 		 */
3110 		sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
3111 	}
3112 	if (num_chunks) {
3113 		/*
3114 		 * Did we get data, if so update the time for auto-close and
3115 		 * give peer credit for being alive.
3116 		 */
3117 		SCTP_STAT_INCR(sctps_recvpktwithdata);
3118 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3119 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3120 			    stcb->asoc.overall_error_count,
3121 			    0,
3122 			    SCTP_FROM_SCTP_INDATA,
3123 			    __LINE__);
3124 		}
3125 		stcb->asoc.overall_error_count = 0;
3126 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
3127 	}
3128 	/* now service all of the reassm queue if needed */
3129 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
3130 		sctp_service_queues(stcb, asoc);
3131 
3132 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
3133 		/* Assure that we ack right away */
3134 		stcb->asoc.send_sack = 1;
3135 	}
3136 	/* Start a sack timer or QUEUE a SACK for sending */
3137 	if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) &&
3138 	    (stcb->asoc.mapping_array[0] != 0xff)) {
3139 		if ((stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) ||
3140 		    (stcb->asoc.delayed_ack == 0) ||
3141 		    (stcb->asoc.numduptsns) ||
3142 		    (stcb->asoc.send_sack == 1)) {
3143 			if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
3144 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
3145 			}
3146 			/*
3147 			 * EY if nr_sacks used then send an nr-sack , a sack
3148 			 * otherwise
3149 			 */
3150 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
3151 				sctp_send_nr_sack(stcb);
3152 			else
3153 				sctp_send_sack(stcb);
3154 		} else {
3155 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
3156 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
3157 				    stcb->sctp_ep, stcb, NULL);
3158 			}
3159 		}
3160 	} else {
3161 		sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
3162 	}
3163 	if (abort_flag)
3164 		return (2);
3165 
3166 	return (0);
3167 }
3168 
3169 static void
3170 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3171     struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3172     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3173     int num_seg, int *ecn_seg_sums)
3174 {
3175 	/************************************************/
3176 	/* process fragments and update sendqueue        */
3177 	/************************************************/
3178 	struct sctp_sack *sack;
3179 	struct sctp_gap_ack_block *frag, block;
3180 	struct sctp_tmit_chunk *tp1;
3181 	int i, j;
3182 	unsigned int theTSN;
3183 	int num_frs = 0;
3184 
3185 	uint16_t frag_strt, frag_end, primary_flag_set;
3186 	u_long last_frag_high;
3187 
3188 	/*
3189 	 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
3190 	 */
3191 	if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3192 		primary_flag_set = 1;
3193 	} else {
3194 		primary_flag_set = 0;
3195 	}
3196 	sack = &ch->sack;
3197 
3198 	frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3199 	    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3200 	*offset += sizeof(block);
3201 	if (frag == NULL) {
3202 		return;
3203 	}
3204 	tp1 = NULL;
3205 	last_frag_high = 0;
3206 	for (i = 0; i < num_seg; i++) {
3207 		frag_strt = ntohs(frag->start);
3208 		frag_end = ntohs(frag->end);
3209 		/* some sanity checks on the fragment offsets */
3210 		if (frag_strt > frag_end) {
3211 			/* this one is malformed, skip */
3212 			frag++;
3213 			continue;
3214 		}
3215 		if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
3216 		    MAX_TSN))
3217 			*biggest_tsn_acked = frag_end + last_tsn;
3218 
3219 		/* mark acked dgs and find out the highestTSN being acked */
3220 		if (tp1 == NULL) {
3221 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3222 
3223 			/* save the locations of the last frags */
3224 			last_frag_high = frag_end + last_tsn;
3225 		} else {
3226 			/*
3227 			 * now lets see if we need to reset the queue due to
3228 			 * a out-of-order SACK fragment
3229 			 */
3230 			if (compare_with_wrap(frag_strt + last_tsn,
3231 			    last_frag_high, MAX_TSN)) {
3232 				/*
3233 				 * if the new frag starts after the last TSN
3234 				 * frag covered, we are ok and this one is
3235 				 * beyond the last one
3236 				 */
3237 				;
3238 			} else {
3239 				/*
3240 				 * ok, they have reset us, so we need to
3241 				 * reset the queue this will cause extra
3242 				 * hunting but hey, they chose the
3243 				 * performance hit when they failed to order
3244 				 * their gaps
3245 				 */
3246 				tp1 = TAILQ_FIRST(&asoc->sent_queue);
3247 			}
3248 			last_frag_high = frag_end + last_tsn;
3249 		}
3250 		for (j = frag_strt; j <= frag_end; j++) {
3251 			theTSN = j + last_tsn;
3252 			while (tp1) {
3253 				if (tp1->rec.data.doing_fast_retransmit)
3254 					num_frs++;
3255 
3256 				/*
3257 				 * CMT: CUCv2 algorithm. For each TSN being
3258 				 * processed from the sent queue, track the
3259 				 * next expected pseudo-cumack, or
3260 				 * rtx_pseudo_cumack, if required. Separate
3261 				 * cumack trackers for first transmissions,
3262 				 * and retransmissions.
3263 				 */
3264 				if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
3265 				    (tp1->snd_count == 1)) {
3266 					tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
3267 					tp1->whoTo->find_pseudo_cumack = 0;
3268 				}
3269 				if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
3270 				    (tp1->snd_count > 1)) {
3271 					tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
3272 					tp1->whoTo->find_rtx_pseudo_cumack = 0;
3273 				}
3274 				if (tp1->rec.data.TSN_seq == theTSN) {
3275 					if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3276 						/*
3277 						 * must be held until
3278 						 * cum-ack passes
3279 						 */
3280 						/*
3281 						 * ECN Nonce: Add the nonce
3282 						 * value to the sender's
3283 						 * nonce sum
3284 						 */
3285 						if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3286 							/*-
3287 							 * If it is less than RESEND, it is
3288 							 * now no-longer in flight.
3289 							 * Higher values may already be set
3290 							 * via previous Gap Ack Blocks...
3291 							 * i.e. ACKED or RESEND.
3292 							 */
3293 							if (compare_with_wrap(tp1->rec.data.TSN_seq,
3294 							    *biggest_newly_acked_tsn, MAX_TSN)) {
3295 								*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
3296 							}
3297 							/*
3298 							 * CMT: SFR algo
3299 							 * (and HTNA) - set
3300 							 * saw_newack to 1
3301 							 * for dest being
3302 							 * newly acked.
3303 							 * update
3304 							 * this_sack_highest_
3305 							 * newack if
3306 							 * appropriate.
3307 							 */
3308 							if (tp1->rec.data.chunk_was_revoked == 0)
3309 								tp1->whoTo->saw_newack = 1;
3310 
3311 							if (compare_with_wrap(tp1->rec.data.TSN_seq,
3312 							    tp1->whoTo->this_sack_highest_newack,
3313 							    MAX_TSN)) {
3314 								tp1->whoTo->this_sack_highest_newack =
3315 								    tp1->rec.data.TSN_seq;
3316 							}
3317 							/*
3318 							 * CMT DAC algo:
3319 							 * also update
3320 							 * this_sack_lowest_n
3321 							 * ewack
3322 							 */
3323 							if (*this_sack_lowest_newack == 0) {
3324 								if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3325 									sctp_log_sack(*this_sack_lowest_newack,
3326 									    last_tsn,
3327 									    tp1->rec.data.TSN_seq,
3328 									    0,
3329 									    0,
3330 									    SCTP_LOG_TSN_ACKED);
3331 								}
3332 								*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
3333 							}
3334 							/*
3335 							 * CMT: CUCv2
3336 							 * algorithm. If
3337 							 * (rtx-)pseudo-cumac
3338 							 * k for corresp
3339 							 * dest is being
3340 							 * acked, then we
3341 							 * have a new
3342 							 * (rtx-)pseudo-cumac
3343 							 * k. Set
3344 							 * new_(rtx_)pseudo_c
3345 							 * umack to TRUE so
3346 							 * that the cwnd for
3347 							 * this dest can be
3348 							 * updated. Also
3349 							 * trigger search
3350 							 * for the next
3351 							 * expected
3352 							 * (rtx-)pseudo-cumac
3353 							 * k. Separate
3354 							 * pseudo_cumack
3355 							 * trackers for
3356 							 * first
3357 							 * transmissions and
3358 							 * retransmissions.
3359 							 */
3360 							if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
3361 								if (tp1->rec.data.chunk_was_revoked == 0) {
3362 									tp1->whoTo->new_pseudo_cumack = 1;
3363 								}
3364 								tp1->whoTo->find_pseudo_cumack = 1;
3365 							}
3366 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3367 								sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3368 							}
3369 							if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
3370 								if (tp1->rec.data.chunk_was_revoked == 0) {
3371 									tp1->whoTo->new_pseudo_cumack = 1;
3372 								}
3373 								tp1->whoTo->find_rtx_pseudo_cumack = 1;
3374 							}
3375 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3376 								sctp_log_sack(*biggest_newly_acked_tsn,
3377 								    last_tsn,
3378 								    tp1->rec.data.TSN_seq,
3379 								    frag_strt,
3380 								    frag_end,
3381 								    SCTP_LOG_TSN_ACKED);
3382 							}
3383 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3384 								sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3385 								    tp1->whoTo->flight_size,
3386 								    tp1->book_size,
3387 								    (uintptr_t) tp1->whoTo,
3388 								    tp1->rec.data.TSN_seq);
3389 							}
3390 							sctp_flight_size_decrease(tp1);
3391 							sctp_total_flight_decrease(stcb, tp1);
3392 
3393 							tp1->whoTo->net_ack += tp1->send_size;
3394 							if (tp1->snd_count < 2) {
3395 								/*
3396 								 * True
3397 								 * non-retran
3398 								 * smited
3399 								 * chunk */
3400 								tp1->whoTo->net_ack2 += tp1->send_size;
3401 
3402 								/*
3403 								 * update RTO
3404 								 * too ? */
3405 								if (tp1->do_rtt) {
3406 									tp1->whoTo->RTO =
3407 									    sctp_calculate_rto(stcb,
3408 									    asoc,
3409 									    tp1->whoTo,
3410 									    &tp1->sent_rcv_time,
3411 									    sctp_align_safe_nocopy);
3412 									tp1->do_rtt = 0;
3413 								}
3414 							}
3415 						}
3416 						if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3417 							(*ecn_seg_sums) += tp1->rec.data.ect_nonce;
3418 							(*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
3419 							if (compare_with_wrap(tp1->rec.data.TSN_seq,
3420 							    asoc->this_sack_highest_gap,
3421 							    MAX_TSN)) {
3422 								asoc->this_sack_highest_gap =
3423 								    tp1->rec.data.TSN_seq;
3424 							}
3425 							if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3426 								sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3427 #ifdef SCTP_AUDITING_ENABLED
3428 								sctp_audit_log(0xB2,
3429 								    (asoc->sent_queue_retran_cnt & 0x000000ff));
3430 #endif
3431 							}
3432 						}
3433 						/*
3434 						 * All chunks NOT UNSENT
3435 						 * fall through here and are
3436 						 * marked (leave PR-SCTP
3437 						 * ones that are to skip
3438 						 * alone though)
3439 						 */
3440 						if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
3441 							tp1->sent = SCTP_DATAGRAM_MARKED;
3442 
3443 						if (tp1->rec.data.chunk_was_revoked) {
3444 							/* deflate the cwnd */
3445 							tp1->whoTo->cwnd -= tp1->book_size;
3446 							tp1->rec.data.chunk_was_revoked = 0;
3447 						}
3448 					}
3449 					break;
3450 				}	/* if (tp1->TSN_seq == theTSN) */
3451 				if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
3452 				    MAX_TSN))
3453 					break;
3454 
3455 				tp1 = TAILQ_NEXT(tp1, sctp_next);
3456 			}	/* end while (tp1) */
3457 		}		/* end for (j = fragStart */
3458 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3459 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3460 		*offset += sizeof(block);
3461 		if (frag == NULL) {
3462 			break;
3463 		}
3464 	}
3465 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3466 		if (num_frs)
3467 			sctp_log_fr(*biggest_tsn_acked,
3468 			    *biggest_newly_acked_tsn,
3469 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3470 	}
3471 }
3472 
3473 static void
3474 sctp_check_for_revoked(struct sctp_tcb *stcb,
3475     struct sctp_association *asoc, uint32_t cumack,
3476     u_long biggest_tsn_acked)
3477 {
3478 	struct sctp_tmit_chunk *tp1;
3479 	int tot_revoked = 0;
3480 
3481 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3482 	while (tp1) {
3483 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3484 		    MAX_TSN)) {
3485 			/*
3486 			 * ok this guy is either ACK or MARKED. If it is
3487 			 * ACKED it has been previously acked but not this
3488 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3489 			 * again.
3490 			 */
3491 			if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3492 			    MAX_TSN))
3493 				break;
3494 
3495 
3496 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3497 				/* it has been revoked */
3498 				tp1->sent = SCTP_DATAGRAM_SENT;
3499 				tp1->rec.data.chunk_was_revoked = 1;
3500 				/*
3501 				 * We must add this stuff back in to assure
3502 				 * timers and such get started.
3503 				 */
3504 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3505 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3506 					    tp1->whoTo->flight_size,
3507 					    tp1->book_size,
3508 					    (uintptr_t) tp1->whoTo,
3509 					    tp1->rec.data.TSN_seq);
3510 				}
3511 				sctp_flight_size_increase(tp1);
3512 				sctp_total_flight_increase(stcb, tp1);
3513 				/*
3514 				 * We inflate the cwnd to compensate for our
3515 				 * artificial inflation of the flight_size.
3516 				 */
3517 				tp1->whoTo->cwnd += tp1->book_size;
3518 				tot_revoked++;
3519 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3520 					sctp_log_sack(asoc->last_acked_seq,
3521 					    cumack,
3522 					    tp1->rec.data.TSN_seq,
3523 					    0,
3524 					    0,
3525 					    SCTP_LOG_TSN_REVOKED);
3526 				}
3527 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3528 				/* it has been re-acked in this SACK */
3529 				tp1->sent = SCTP_DATAGRAM_ACKED;
3530 			}
3531 		}
3532 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3533 			break;
3534 		tp1 = TAILQ_NEXT(tp1, sctp_next);
3535 	}
3536 	if (tot_revoked > 0) {
3537 		/*
3538 		 * Setup the ecn nonce re-sync point. We do this since once
3539 		 * data is revoked we begin to retransmit things, which do
3540 		 * NOT have the ECN bits set. This means we are now out of
3541 		 * sync and must wait until we get back in sync with the
3542 		 * peer to check ECN bits.
3543 		 */
3544 		tp1 = TAILQ_FIRST(&asoc->send_queue);
3545 		if (tp1 == NULL) {
3546 			asoc->nonce_resync_tsn = asoc->sending_seq;
3547 		} else {
3548 			asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3549 		}
3550 		asoc->nonce_wait_for_ecne = 0;
3551 		asoc->nonce_sum_check = 0;
3552 	}
3553 }
3554 
3555 
3556 static void
3557 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3558     u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved)
3559 {
3560 	struct sctp_tmit_chunk *tp1;
3561 	int strike_flag = 0;
3562 	struct timeval now;
3563 	int tot_retrans = 0;
3564 	uint32_t sending_seq;
3565 	struct sctp_nets *net;
3566 	int num_dests_sacked = 0;
3567 
3568 	/*
3569 	 * select the sending_seq, this is either the next thing ready to be
3570 	 * sent but not transmitted, OR, the next seq we assign.
3571 	 */
3572 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3573 	if (tp1 == NULL) {
3574 		sending_seq = asoc->sending_seq;
3575 	} else {
3576 		sending_seq = tp1->rec.data.TSN_seq;
3577 	}
3578 
3579 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3580 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3581 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3582 			if (net->saw_newack)
3583 				num_dests_sacked++;
3584 		}
3585 	}
3586 	if (stcb->asoc.peer_supports_prsctp) {
3587 		(void)SCTP_GETTIME_TIMEVAL(&now);
3588 	}
3589 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3590 	while (tp1) {
3591 		strike_flag = 0;
3592 		if (tp1->no_fr_allowed) {
3593 			/* this one had a timeout or something */
3594 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3595 			continue;
3596 		}
3597 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3598 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3599 				sctp_log_fr(biggest_tsn_newly_acked,
3600 				    tp1->rec.data.TSN_seq,
3601 				    tp1->sent,
3602 				    SCTP_FR_LOG_CHECK_STRIKE);
3603 		}
3604 		if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3605 		    MAX_TSN) ||
3606 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3607 			/* done */
3608 			break;
3609 		}
3610 		if (stcb->asoc.peer_supports_prsctp) {
3611 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3612 				/* Is it expired? */
3613 				if (
3614 				/*
3615 				 * TODO sctp_constants.h needs alternative
3616 				 * time macros when _KERNEL is undefined.
3617 				 */
3618 				    (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3619 				    ) {
3620 					/* Yes so drop it */
3621 					if (tp1->data != NULL) {
3622 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3623 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3624 						    SCTP_SO_NOT_LOCKED);
3625 					}
3626 					tp1 = TAILQ_NEXT(tp1, sctp_next);
3627 					continue;
3628 				}
3629 			}
3630 		}
3631 		if (compare_with_wrap(tp1->rec.data.TSN_seq,
3632 		    asoc->this_sack_highest_gap, MAX_TSN)) {
3633 			/* we are beyond the tsn in the sack  */
3634 			break;
3635 		}
3636 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3637 			/* either a RESEND, ACKED, or MARKED */
3638 			/* skip */
3639 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3640 			continue;
3641 		}
3642 		/*
3643 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3644 		 */
3645 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3646 			/*
3647 			 * No new acks were receieved for data sent to this
3648 			 * dest. Therefore, according to the SFR algo for
3649 			 * CMT, no data sent to this dest can be marked for
3650 			 * FR using this SACK.
3651 			 */
3652 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3653 			continue;
3654 		} else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq,
3655 		    tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3656 			/*
3657 			 * CMT: New acks were receieved for data sent to
3658 			 * this dest. But no new acks were seen for data
3659 			 * sent after tp1. Therefore, according to the SFR
3660 			 * algo for CMT, tp1 cannot be marked for FR using
3661 			 * this SACK. This step covers part of the DAC algo
3662 			 * and the HTNA algo as well.
3663 			 */
3664 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3665 			continue;
3666 		}
3667 		/*
3668 		 * Here we check to see if we were have already done a FR
3669 		 * and if so we see if the biggest TSN we saw in the sack is
3670 		 * smaller than the recovery point. If so we don't strike
3671 		 * the tsn... otherwise we CAN strike the TSN.
3672 		 */
3673 		/*
3674 		 * @@@ JRI: Check for CMT if (accum_moved &&
3675 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3676 		 * 0)) {
3677 		 */
3678 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3679 			/*
3680 			 * Strike the TSN if in fast-recovery and cum-ack
3681 			 * moved.
3682 			 */
3683 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3684 				sctp_log_fr(biggest_tsn_newly_acked,
3685 				    tp1->rec.data.TSN_seq,
3686 				    tp1->sent,
3687 				    SCTP_FR_LOG_STRIKE_CHUNK);
3688 			}
3689 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3690 				tp1->sent++;
3691 			}
3692 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3693 				/*
3694 				 * CMT DAC algorithm: If SACK flag is set to
3695 				 * 0, then lowest_newack test will not pass
3696 				 * because it would have been set to the
3697 				 * cumack earlier. If not already to be
3698 				 * rtx'd, If not a mixed sack and if tp1 is
3699 				 * not between two sacked TSNs, then mark by
3700 				 * one more. NOTE that we are marking by one
3701 				 * additional time since the SACK DAC flag
3702 				 * indicates that two packets have been
3703 				 * received after this missing TSN.
3704 				 */
3705 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3706 				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3707 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3708 						sctp_log_fr(16 + num_dests_sacked,
3709 						    tp1->rec.data.TSN_seq,
3710 						    tp1->sent,
3711 						    SCTP_FR_LOG_STRIKE_CHUNK);
3712 					}
3713 					tp1->sent++;
3714 				}
3715 			}
3716 		} else if ((tp1->rec.data.doing_fast_retransmit) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
3717 			/*
3718 			 * For those that have done a FR we must take
3719 			 * special consideration if we strike. I.e the
3720 			 * biggest_newly_acked must be higher than the
3721 			 * sending_seq at the time we did the FR.
3722 			 */
3723 			if (
3724 #ifdef SCTP_FR_TO_ALTERNATE
3725 			/*
3726 			 * If FR's go to new networks, then we must only do
3727 			 * this for singly homed asoc's. However if the FR's
3728 			 * go to the same network (Armando's work) then its
3729 			 * ok to FR multiple times.
3730 			 */
3731 			    (asoc->numnets < 2)
3732 #else
3733 			    (1)
3734 #endif
3735 			    ) {
3736 
3737 				if ((compare_with_wrap(biggest_tsn_newly_acked,
3738 				    tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3739 				    (biggest_tsn_newly_acked ==
3740 				    tp1->rec.data.fast_retran_tsn)) {
3741 					/*
3742 					 * Strike the TSN, since this ack is
3743 					 * beyond where things were when we
3744 					 * did a FR.
3745 					 */
3746 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3747 						sctp_log_fr(biggest_tsn_newly_acked,
3748 						    tp1->rec.data.TSN_seq,
3749 						    tp1->sent,
3750 						    SCTP_FR_LOG_STRIKE_CHUNK);
3751 					}
3752 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3753 						tp1->sent++;
3754 					}
3755 					strike_flag = 1;
3756 					if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3757 						/*
3758 						 * CMT DAC algorithm: If
3759 						 * SACK flag is set to 0,
3760 						 * then lowest_newack test
3761 						 * will not pass because it
3762 						 * would have been set to
3763 						 * the cumack earlier. If
3764 						 * not already to be rtx'd,
3765 						 * If not a mixed sack and
3766 						 * if tp1 is not between two
3767 						 * sacked TSNs, then mark by
3768 						 * one more. NOTE that we
3769 						 * are marking by one
3770 						 * additional time since the
3771 						 * SACK DAC flag indicates
3772 						 * that two packets have
3773 						 * been received after this
3774 						 * missing TSN.
3775 						 */
3776 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3777 						    (num_dests_sacked == 1) &&
3778 						    compare_with_wrap(this_sack_lowest_newack,
3779 						    tp1->rec.data.TSN_seq, MAX_TSN)) {
3780 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3781 								sctp_log_fr(32 + num_dests_sacked,
3782 								    tp1->rec.data.TSN_seq,
3783 								    tp1->sent,
3784 								    SCTP_FR_LOG_STRIKE_CHUNK);
3785 							}
3786 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3787 								tp1->sent++;
3788 							}
3789 						}
3790 					}
3791 				}
3792 			}
3793 			/*
3794 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3795 			 * algo covers HTNA.
3796 			 */
3797 		} else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3798 		    biggest_tsn_newly_acked, MAX_TSN)) {
3799 			/*
3800 			 * We don't strike these: This is the  HTNA
3801 			 * algorithm i.e. we don't strike If our TSN is
3802 			 * larger than the Highest TSN Newly Acked.
3803 			 */
3804 			;
3805 		} else {
3806 			/* Strike the TSN */
3807 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3808 				sctp_log_fr(biggest_tsn_newly_acked,
3809 				    tp1->rec.data.TSN_seq,
3810 				    tp1->sent,
3811 				    SCTP_FR_LOG_STRIKE_CHUNK);
3812 			}
3813 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3814 				tp1->sent++;
3815 			}
3816 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3817 				/*
3818 				 * CMT DAC algorithm: If SACK flag is set to
3819 				 * 0, then lowest_newack test will not pass
3820 				 * because it would have been set to the
3821 				 * cumack earlier. If not already to be
3822 				 * rtx'd, If not a mixed sack and if tp1 is
3823 				 * not between two sacked TSNs, then mark by
3824 				 * one more. NOTE that we are marking by one
3825 				 * additional time since the SACK DAC flag
3826 				 * indicates that two packets have been
3827 				 * received after this missing TSN.
3828 				 */
3829 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3830 				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3831 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3832 						sctp_log_fr(48 + num_dests_sacked,
3833 						    tp1->rec.data.TSN_seq,
3834 						    tp1->sent,
3835 						    SCTP_FR_LOG_STRIKE_CHUNK);
3836 					}
3837 					tp1->sent++;
3838 				}
3839 			}
3840 		}
3841 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3842 			struct sctp_nets *alt;
3843 
3844 			/* fix counts and things */
3845 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3846 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3847 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3848 				    tp1->book_size,
3849 				    (uintptr_t) tp1->whoTo,
3850 				    tp1->rec.data.TSN_seq);
3851 			}
3852 			if (tp1->whoTo) {
3853 				tp1->whoTo->net_ack++;
3854 				sctp_flight_size_decrease(tp1);
3855 			}
3856 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3857 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3858 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3859 			}
3860 			/* add back to the rwnd */
3861 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3862 
3863 			/* remove from the total flight */
3864 			sctp_total_flight_decrease(stcb, tp1);
3865 
3866 			if ((stcb->asoc.peer_supports_prsctp) &&
3867 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3868 				/*
3869 				 * Has it been retransmitted tv_sec times? -
3870 				 * we store the retran count there.
3871 				 */
3872 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3873 					/* Yes, so drop it */
3874 					if (tp1->data != NULL) {
3875 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3876 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3877 						    SCTP_SO_NOT_LOCKED);
3878 					}
3879 					/* Make sure to flag we had a FR */
3880 					tp1->whoTo->net_ack++;
3881 					tp1 = TAILQ_NEXT(tp1, sctp_next);
3882 					continue;
3883 				}
3884 			}
3885 			/* printf("OK, we are now ready to FR this guy\n"); */
3886 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3887 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3888 				    0, SCTP_FR_MARKED);
3889 			}
3890 			if (strike_flag) {
3891 				/* This is a subsequent FR */
3892 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3893 			}
3894 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3895 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
3896 				/*
3897 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3898 				 * If CMT is being used, then pick dest with
3899 				 * largest ssthresh for any retransmission.
3900 				 */
3901 				tp1->no_fr_allowed = 1;
3902 				alt = tp1->whoTo;
3903 				/* sa_ignore NO_NULL_CHK */
3904 				if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
3905 					/*
3906 					 * JRS 5/18/07 - If CMT PF is on,
3907 					 * use the PF version of
3908 					 * find_alt_net()
3909 					 */
3910 					alt = sctp_find_alternate_net(stcb, alt, 2);
3911 				} else {
3912 					/*
3913 					 * JRS 5/18/07 - If only CMT is on,
3914 					 * use the CMT version of
3915 					 * find_alt_net()
3916 					 */
3917 					/* sa_ignore NO_NULL_CHK */
3918 					alt = sctp_find_alternate_net(stcb, alt, 1);
3919 				}
3920 				if (alt == NULL) {
3921 					alt = tp1->whoTo;
3922 				}
3923 				/*
3924 				 * CUCv2: If a different dest is picked for
3925 				 * the retransmission, then new
3926 				 * (rtx-)pseudo_cumack needs to be tracked
3927 				 * for orig dest. Let CUCv2 track new (rtx-)
3928 				 * pseudo-cumack always.
3929 				 */
3930 				if (tp1->whoTo) {
3931 					tp1->whoTo->find_pseudo_cumack = 1;
3932 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3933 				}
3934 			} else {/* CMT is OFF */
3935 
3936 #ifdef SCTP_FR_TO_ALTERNATE
3937 				/* Can we find an alternate? */
3938 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3939 #else
3940 				/*
3941 				 * default behavior is to NOT retransmit
3942 				 * FR's to an alternate. Armando Caro's
3943 				 * paper details why.
3944 				 */
3945 				alt = tp1->whoTo;
3946 #endif
3947 			}
3948 
3949 			tp1->rec.data.doing_fast_retransmit = 1;
3950 			tot_retrans++;
3951 			/* mark the sending seq for possible subsequent FR's */
3952 			/*
3953 			 * printf("Marking TSN for FR new value %x\n",
3954 			 * (uint32_t)tpi->rec.data.TSN_seq);
3955 			 */
3956 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3957 				/*
3958 				 * If the queue of send is empty then its
3959 				 * the next sequence number that will be
3960 				 * assigned so we subtract one from this to
3961 				 * get the one we last sent.
3962 				 */
3963 				tp1->rec.data.fast_retran_tsn = sending_seq;
3964 			} else {
3965 				/*
3966 				 * If there are chunks on the send queue
3967 				 * (unsent data that has made it from the
3968 				 * stream queues but not out the door, we
3969 				 * take the first one (which will have the
3970 				 * lowest TSN) and subtract one to get the
3971 				 * one we last sent.
3972 				 */
3973 				struct sctp_tmit_chunk *ttt;
3974 
3975 				ttt = TAILQ_FIRST(&asoc->send_queue);
3976 				tp1->rec.data.fast_retran_tsn =
3977 				    ttt->rec.data.TSN_seq;
3978 			}
3979 
3980 			if (tp1->do_rtt) {
3981 				/*
3982 				 * this guy had a RTO calculation pending on
3983 				 * it, cancel it
3984 				 */
3985 				tp1->do_rtt = 0;
3986 			}
3987 			if (alt != tp1->whoTo) {
3988 				/* yes, there is an alternate. */
3989 				sctp_free_remote_addr(tp1->whoTo);
3990 				/* sa_ignore FREED_MEMORY */
3991 				tp1->whoTo = alt;
3992 				atomic_add_int(&alt->ref_count, 1);
3993 			}
3994 		}
3995 		tp1 = TAILQ_NEXT(tp1, sctp_next);
3996 	}			/* while (tp1) */
3997 
3998 	if (tot_retrans > 0) {
3999 		/*
4000 		 * Setup the ecn nonce re-sync point. We do this since once
4001 		 * we go to FR something we introduce a Karn's rule scenario
4002 		 * and won't know the totals for the ECN bits.
4003 		 */
4004 		asoc->nonce_resync_tsn = sending_seq;
4005 		asoc->nonce_wait_for_ecne = 0;
4006 		asoc->nonce_sum_check = 0;
4007 	}
4008 }
4009 
4010 struct sctp_tmit_chunk *
4011 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
4012     struct sctp_association *asoc)
4013 {
4014 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
4015 	struct timeval now;
4016 	int now_filled = 0;
4017 
4018 	if (asoc->peer_supports_prsctp == 0) {
4019 		return (NULL);
4020 	}
4021 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4022 	while (tp1) {
4023 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
4024 		    tp1->sent != SCTP_DATAGRAM_RESEND) {
4025 			/* no chance to advance, out of here */
4026 			break;
4027 		}
4028 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
4029 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
4030 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
4031 				    asoc->advanced_peer_ack_point,
4032 				    tp1->rec.data.TSN_seq, 0, 0);
4033 			}
4034 		}
4035 		if (!PR_SCTP_ENABLED(tp1->flags)) {
4036 			/*
4037 			 * We can't fwd-tsn past any that are reliable aka
4038 			 * retransmitted until the asoc fails.
4039 			 */
4040 			break;
4041 		}
4042 		if (!now_filled) {
4043 			(void)SCTP_GETTIME_TIMEVAL(&now);
4044 			now_filled = 1;
4045 		}
4046 		tp2 = TAILQ_NEXT(tp1, sctp_next);
4047 		/*
4048 		 * now we got a chunk which is marked for another
4049 		 * retransmission to a PR-stream but has run out its chances
4050 		 * already maybe OR has been marked to skip now. Can we skip
4051 		 * it if its a resend?
4052 		 */
4053 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
4054 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
4055 			/*
4056 			 * Now is this one marked for resend and its time is
4057 			 * now up?
4058 			 */
4059 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
4060 				/* Yes so drop it */
4061 				if (tp1->data) {
4062 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
4063 					    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
4064 					    SCTP_SO_NOT_LOCKED);
4065 				}
4066 			} else {
4067 				/*
4068 				 * No, we are done when hit one for resend
4069 				 * whos time as not expired.
4070 				 */
4071 				break;
4072 			}
4073 		}
4074 		/*
4075 		 * Ok now if this chunk is marked to drop it we can clean up
4076 		 * the chunk, advance our peer ack point and we can check
4077 		 * the next chunk.
4078 		 */
4079 		if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
4080 			/* advance PeerAckPoint goes forward */
4081 			if (compare_with_wrap(tp1->rec.data.TSN_seq,
4082 			    asoc->advanced_peer_ack_point,
4083 			    MAX_TSN)) {
4084 
4085 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
4086 				a_adv = tp1;
4087 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
4088 				/* No update but we do save the chk */
4089 				a_adv = tp1;
4090 			}
4091 		} else {
4092 			/*
4093 			 * If it is still in RESEND we can advance no
4094 			 * further
4095 			 */
4096 			break;
4097 		}
4098 		/*
4099 		 * If we hit here we just dumped tp1, move to next tsn on
4100 		 * sent queue.
4101 		 */
4102 		tp1 = tp2;
4103 	}
4104 	return (a_adv);
4105 }
4106 
4107 static int
4108 sctp_fs_audit(struct sctp_association *asoc)
4109 {
4110 	struct sctp_tmit_chunk *chk;
4111 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
4112 	int entry_flight, entry_cnt, ret;
4113 
4114 	entry_flight = asoc->total_flight;
4115 	entry_cnt = asoc->total_flight_count;
4116 	ret = 0;
4117 
4118 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
4119 		return (0);
4120 
4121 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
4122 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
4123 			printf("Chk TSN:%u size:%d inflight cnt:%d\n",
4124 			    chk->rec.data.TSN_seq,
4125 			    chk->send_size,
4126 			    chk->snd_count
4127 			    );
4128 			inflight++;
4129 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
4130 			resend++;
4131 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
4132 			inbetween++;
4133 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
4134 			above++;
4135 		} else {
4136 			acked++;
4137 		}
4138 	}
4139 
4140 	if ((inflight > 0) || (inbetween > 0)) {
4141 #ifdef INVARIANTS
4142 		panic("Flight size-express incorrect? \n");
4143 #else
4144 		printf("asoc->total_flight:%d cnt:%d\n",
4145 		    entry_flight, entry_cnt);
4146 
4147 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
4148 		    inflight, inbetween, resend, above, acked);
4149 		ret = 1;
4150 #endif
4151 	}
4152 	return (ret);
4153 }
4154 
4155 
4156 static void
4157 sctp_window_probe_recovery(struct sctp_tcb *stcb,
4158     struct sctp_association *asoc,
4159     struct sctp_nets *net,
4160     struct sctp_tmit_chunk *tp1)
4161 {
4162 	tp1->window_probe = 0;
4163 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
4164 		/* TSN's skipped we do NOT move back. */
4165 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
4166 		    tp1->whoTo->flight_size,
4167 		    tp1->book_size,
4168 		    (uintptr_t) tp1->whoTo,
4169 		    tp1->rec.data.TSN_seq);
4170 		return;
4171 	}
4172 	/* First setup this by shrinking flight */
4173 	sctp_flight_size_decrease(tp1);
4174 	sctp_total_flight_decrease(stcb, tp1);
4175 	/* Now mark for resend */
4176 	tp1->sent = SCTP_DATAGRAM_RESEND;
4177 	asoc->sent_queue_retran_cnt++;
4178 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4179 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
4180 		    tp1->whoTo->flight_size,
4181 		    tp1->book_size,
4182 		    (uintptr_t) tp1->whoTo,
4183 		    tp1->rec.data.TSN_seq);
4184 	}
4185 }
4186 
4187 void
4188 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
4189     uint32_t rwnd, int nonce_sum_flag, int *abort_now)
4190 {
4191 	struct sctp_nets *net;
4192 	struct sctp_association *asoc;
4193 	struct sctp_tmit_chunk *tp1, *tp2;
4194 	uint32_t old_rwnd;
4195 	int win_probe_recovery = 0;
4196 	int win_probe_recovered = 0;
4197 	int j, done_once = 0;
4198 
4199 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4200 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
4201 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4202 	}
4203 	SCTP_TCB_LOCK_ASSERT(stcb);
4204 #ifdef SCTP_ASOCLOG_OF_TSNS
4205 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
4206 	stcb->asoc.cumack_log_at++;
4207 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4208 		stcb->asoc.cumack_log_at = 0;
4209 	}
4210 #endif
4211 	asoc = &stcb->asoc;
4212 	old_rwnd = asoc->peers_rwnd;
4213 	if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
4214 		/* old ack */
4215 		return;
4216 	} else if (asoc->last_acked_seq == cumack) {
4217 		/* Window update sack */
4218 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4219 		    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4220 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4221 			/* SWS sender side engages */
4222 			asoc->peers_rwnd = 0;
4223 		}
4224 		if (asoc->peers_rwnd > old_rwnd) {
4225 			goto again;
4226 		}
4227 		return;
4228 	}
4229 	/* First setup for CC stuff */
4230 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4231 		net->prev_cwnd = net->cwnd;
4232 		net->net_ack = 0;
4233 		net->net_ack2 = 0;
4234 
4235 		/*
4236 		 * CMT: Reset CUC and Fast recovery algo variables before
4237 		 * SACK processing
4238 		 */
4239 		net->new_pseudo_cumack = 0;
4240 		net->will_exit_fast_recovery = 0;
4241 	}
4242 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4243 		uint32_t send_s;
4244 
4245 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4246 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4247 			    sctpchunk_listhead);
4248 			send_s = tp1->rec.data.TSN_seq + 1;
4249 		} else {
4250 			send_s = asoc->sending_seq;
4251 		}
4252 		if ((cumack == send_s) ||
4253 		    compare_with_wrap(cumack, send_s, MAX_TSN)) {
4254 #ifndef INVARIANTS
4255 			struct mbuf *oper;
4256 
4257 #endif
4258 #ifdef INVARIANTS
4259 			panic("Impossible sack 1");
4260 #else
4261 			*abort_now = 1;
4262 			/* XXX */
4263 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4264 			    0, M_DONTWAIT, 1, MT_DATA);
4265 			if (oper) {
4266 				struct sctp_paramhdr *ph;
4267 				uint32_t *ippp;
4268 
4269 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4270 				    sizeof(uint32_t);
4271 				ph = mtod(oper, struct sctp_paramhdr *);
4272 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4273 				ph->param_length = htons(SCTP_BUF_LEN(oper));
4274 				ippp = (uint32_t *) (ph + 1);
4275 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4276 			}
4277 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4278 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4279 			return;
4280 #endif
4281 		}
4282 	}
4283 	asoc->this_sack_highest_gap = cumack;
4284 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4285 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4286 		    stcb->asoc.overall_error_count,
4287 		    0,
4288 		    SCTP_FROM_SCTP_INDATA,
4289 		    __LINE__);
4290 	}
4291 	stcb->asoc.overall_error_count = 0;
4292 	if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
4293 		/* process the new consecutive TSN first */
4294 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
4295 		while (tp1) {
4296 			tp2 = TAILQ_NEXT(tp1, sctp_next);
4297 			if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
4298 			    MAX_TSN) ||
4299 			    cumack == tp1->rec.data.TSN_seq) {
4300 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4301 					printf("Warning, an unsent is now acked?\n");
4302 				}
4303 				/*
4304 				 * ECN Nonce: Add the nonce to the sender's
4305 				 * nonce sum
4306 				 */
4307 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4308 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4309 					/*
4310 					 * If it is less than ACKED, it is
4311 					 * now no-longer in flight. Higher
4312 					 * values may occur during marking
4313 					 */
4314 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4315 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4316 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4317 							    tp1->whoTo->flight_size,
4318 							    tp1->book_size,
4319 							    (uintptr_t) tp1->whoTo,
4320 							    tp1->rec.data.TSN_seq);
4321 						}
4322 						sctp_flight_size_decrease(tp1);
4323 						/* sa_ignore NO_NULL_CHK */
4324 						sctp_total_flight_decrease(stcb, tp1);
4325 					}
4326 					tp1->whoTo->net_ack += tp1->send_size;
4327 					if (tp1->snd_count < 2) {
4328 						/*
4329 						 * True non-retransmited
4330 						 * chunk
4331 						 */
4332 						tp1->whoTo->net_ack2 +=
4333 						    tp1->send_size;
4334 
4335 						/* update RTO too? */
4336 						if (tp1->do_rtt) {
4337 							tp1->whoTo->RTO =
4338 							/*
4339 							 * sa_ignore
4340 							 * NO_NULL_CHK
4341 							 */
4342 							    sctp_calculate_rto(stcb,
4343 							    asoc, tp1->whoTo,
4344 							    &tp1->sent_rcv_time,
4345 							    sctp_align_safe_nocopy);
4346 							tp1->do_rtt = 0;
4347 						}
4348 					}
4349 					/*
4350 					 * CMT: CUCv2 algorithm. From the
4351 					 * cumack'd TSNs, for each TSN being
4352 					 * acked for the first time, set the
4353 					 * following variables for the
4354 					 * corresp destination.
4355 					 * new_pseudo_cumack will trigger a
4356 					 * cwnd update.
4357 					 * find_(rtx_)pseudo_cumack will
4358 					 * trigger search for the next
4359 					 * expected (rtx-)pseudo-cumack.
4360 					 */
4361 					tp1->whoTo->new_pseudo_cumack = 1;
4362 					tp1->whoTo->find_pseudo_cumack = 1;
4363 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4364 
4365 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4366 						/* sa_ignore NO_NULL_CHK */
4367 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4368 					}
4369 				}
4370 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4371 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4372 				}
4373 				if (tp1->rec.data.chunk_was_revoked) {
4374 					/* deflate the cwnd */
4375 					tp1->whoTo->cwnd -= tp1->book_size;
4376 					tp1->rec.data.chunk_was_revoked = 0;
4377 				}
4378 				tp1->sent = SCTP_DATAGRAM_ACKED;
4379 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4380 				if (tp1->data) {
4381 					/* sa_ignore NO_NULL_CHK */
4382 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4383 					sctp_m_freem(tp1->data);
4384 				}
4385 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4386 					sctp_log_sack(asoc->last_acked_seq,
4387 					    cumack,
4388 					    tp1->rec.data.TSN_seq,
4389 					    0,
4390 					    0,
4391 					    SCTP_LOG_FREE_SENT);
4392 				}
4393 				tp1->data = NULL;
4394 				asoc->sent_queue_cnt--;
4395 				sctp_free_a_chunk(stcb, tp1);
4396 				tp1 = tp2;
4397 			} else {
4398 				break;
4399 			}
4400 		}
4401 
4402 	}
4403 	/* sa_ignore NO_NULL_CHK */
4404 	if (stcb->sctp_socket) {
4405 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4406 		struct socket *so;
4407 
4408 #endif
4409 
4410 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4411 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4412 			/* sa_ignore NO_NULL_CHK */
4413 			sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4414 		}
4415 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4416 		so = SCTP_INP_SO(stcb->sctp_ep);
4417 		atomic_add_int(&stcb->asoc.refcnt, 1);
4418 		SCTP_TCB_UNLOCK(stcb);
4419 		SCTP_SOCKET_LOCK(so, 1);
4420 		SCTP_TCB_LOCK(stcb);
4421 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4422 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4423 			/* assoc was freed while we were unlocked */
4424 			SCTP_SOCKET_UNLOCK(so, 1);
4425 			return;
4426 		}
4427 #endif
4428 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4429 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4430 		SCTP_SOCKET_UNLOCK(so, 1);
4431 #endif
4432 	} else {
4433 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4434 			sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4435 		}
4436 	}
4437 
4438 	/* JRS - Use the congestion control given in the CC module */
4439 	if (asoc->last_acked_seq != cumack)
4440 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4441 
4442 	asoc->last_acked_seq = cumack;
4443 
4444 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4445 		/* nothing left in-flight */
4446 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4447 			net->flight_size = 0;
4448 			net->partial_bytes_acked = 0;
4449 		}
4450 		asoc->total_flight = 0;
4451 		asoc->total_flight_count = 0;
4452 	}
4453 	/* ECN Nonce updates */
4454 	if (asoc->ecn_nonce_allowed) {
4455 		if (asoc->nonce_sum_check) {
4456 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4457 				if (asoc->nonce_wait_for_ecne == 0) {
4458 					struct sctp_tmit_chunk *lchk;
4459 
4460 					lchk = TAILQ_FIRST(&asoc->send_queue);
4461 					asoc->nonce_wait_for_ecne = 1;
4462 					if (lchk) {
4463 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4464 					} else {
4465 						asoc->nonce_wait_tsn = asoc->sending_seq;
4466 					}
4467 				} else {
4468 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4469 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4470 						/*
4471 						 * Misbehaving peer. We need
4472 						 * to react to this guy
4473 						 */
4474 						asoc->ecn_allowed = 0;
4475 						asoc->ecn_nonce_allowed = 0;
4476 					}
4477 				}
4478 			}
4479 		} else {
4480 			/* See if Resynchronization Possible */
4481 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4482 				asoc->nonce_sum_check = 1;
4483 				/*
4484 				 * now we must calculate what the base is.
4485 				 * We do this based on two things, we know
4486 				 * the total's for all the segments
4487 				 * gap-acked in the SACK (none), We also
4488 				 * know the SACK's nonce sum, its in
4489 				 * nonce_sum_flag. So we can build a truth
4490 				 * table to back-calculate the new value of
4491 				 * asoc->nonce_sum_expect_base:
4492 				 *
4493 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
4494 				 * 1                    0 1 0 1 1 1
4495 				 * 1 0
4496 				 */
4497 				asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4498 			}
4499 		}
4500 	}
4501 	/* RWND update */
4502 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4503 	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4504 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4505 		/* SWS sender side engages */
4506 		asoc->peers_rwnd = 0;
4507 	}
4508 	if (asoc->peers_rwnd > old_rwnd) {
4509 		win_probe_recovery = 1;
4510 	}
4511 	/* Now assure a timer where data is queued at */
4512 again:
4513 	j = 0;
4514 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4515 		int to_ticks;
4516 
4517 		if (win_probe_recovery && (net->window_probe)) {
4518 			win_probe_recovered = 1;
4519 			/*
4520 			 * Find first chunk that was used with window probe
4521 			 * and clear the sent
4522 			 */
4523 			/* sa_ignore FREED_MEMORY */
4524 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4525 				if (tp1->window_probe) {
4526 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
4527 					break;
4528 				}
4529 			}
4530 		}
4531 		if (net->RTO == 0) {
4532 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4533 		} else {
4534 			to_ticks = MSEC_TO_TICKS(net->RTO);
4535 		}
4536 		if (net->flight_size) {
4537 			j++;
4538 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4539 			    sctp_timeout_handler, &net->rxt_timer);
4540 			if (net->window_probe) {
4541 				net->window_probe = 0;
4542 			}
4543 		} else {
4544 			if (net->window_probe) {
4545 				/*
4546 				 * In window probes we must assure a timer
4547 				 * is still running there
4548 				 */
4549 				net->window_probe = 0;
4550 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4551 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4552 					    sctp_timeout_handler, &net->rxt_timer);
4553 				}
4554 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4555 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4556 				    stcb, net,
4557 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4558 			}
4559 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4560 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4561 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4562 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4563 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4564 				}
4565 			}
4566 		}
4567 	}
4568 	if ((j == 0) &&
4569 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4570 	    (asoc->sent_queue_retran_cnt == 0) &&
4571 	    (win_probe_recovered == 0) &&
4572 	    (done_once == 0)) {
4573 		/*
4574 		 * huh, this should not happen unless all packets are
4575 		 * PR-SCTP and marked to skip of course.
4576 		 */
4577 		if (sctp_fs_audit(asoc)) {
4578 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4579 				if (net->flight_size) {
4580 					net->flight_size = 0;
4581 				}
4582 			}
4583 			asoc->total_flight = 0;
4584 			asoc->total_flight_count = 0;
4585 			asoc->sent_queue_retran_cnt = 0;
4586 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4587 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4588 					sctp_flight_size_increase(tp1);
4589 					sctp_total_flight_increase(stcb, tp1);
4590 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4591 					asoc->sent_queue_retran_cnt++;
4592 				}
4593 			}
4594 		}
4595 		done_once = 1;
4596 		goto again;
4597 	}
4598 	/**********************************/
4599 	/* Now what about shutdown issues */
4600 	/**********************************/
4601 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4602 		/* nothing left on sendqueue.. consider done */
4603 		/* clean up */
4604 		if ((asoc->stream_queue_cnt == 1) &&
4605 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4606 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4607 		    (asoc->locked_on_sending)
4608 		    ) {
4609 			struct sctp_stream_queue_pending *sp;
4610 
4611 			/*
4612 			 * I may be in a state where we got all across.. but
4613 			 * cannot write more due to a shutdown... we abort
4614 			 * since the user did not indicate EOR in this case.
4615 			 * The sp will be cleaned during free of the asoc.
4616 			 */
4617 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4618 			    sctp_streamhead);
4619 			if ((sp) && (sp->length == 0)) {
4620 				/* Let cleanup code purge it */
4621 				if (sp->msg_is_complete) {
4622 					asoc->stream_queue_cnt--;
4623 				} else {
4624 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4625 					asoc->locked_on_sending = NULL;
4626 					asoc->stream_queue_cnt--;
4627 				}
4628 			}
4629 		}
4630 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4631 		    (asoc->stream_queue_cnt == 0)) {
4632 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4633 				/* Need to abort here */
4634 				struct mbuf *oper;
4635 
4636 		abort_out_now:
4637 				*abort_now = 1;
4638 				/* XXX */
4639 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4640 				    0, M_DONTWAIT, 1, MT_DATA);
4641 				if (oper) {
4642 					struct sctp_paramhdr *ph;
4643 					uint32_t *ippp;
4644 
4645 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4646 					    sizeof(uint32_t);
4647 					ph = mtod(oper, struct sctp_paramhdr *);
4648 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4649 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4650 					ippp = (uint32_t *) (ph + 1);
4651 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4652 				}
4653 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4654 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4655 			} else {
4656 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4657 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4658 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4659 				}
4660 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4661 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4662 				sctp_stop_timers_for_shutdown(stcb);
4663 				sctp_send_shutdown(stcb,
4664 				    stcb->asoc.primary_destination);
4665 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4666 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4667 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4668 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4669 			}
4670 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4671 		    (asoc->stream_queue_cnt == 0)) {
4672 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4673 				goto abort_out_now;
4674 			}
4675 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4676 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4677 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4678 			sctp_send_shutdown_ack(stcb,
4679 			    stcb->asoc.primary_destination);
4680 
4681 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4682 			    stcb->sctp_ep, stcb, asoc->primary_destination);
4683 		}
4684 	}
4685 	/*********************************************/
4686 	/* Here we perform PR-SCTP procedures        */
4687 	/* (section 4.2)                             */
4688 	/*********************************************/
4689 	/* C1. update advancedPeerAckPoint */
4690 	if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4691 		asoc->advanced_peer_ack_point = cumack;
4692 	}
4693 	/* PR-Sctp issues need to be addressed too */
4694 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4695 		struct sctp_tmit_chunk *lchk;
4696 		uint32_t old_adv_peer_ack_point;
4697 
4698 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4699 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4700 		/* C3. See if we need to send a Fwd-TSN */
4701 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cumack,
4702 		    MAX_TSN)) {
4703 			/*
4704 			 * ISSUE with ECN, see FWD-TSN processing for notes
4705 			 * on issues that will occur when the ECN NONCE
4706 			 * stuff is put into SCTP for cross checking.
4707 			 */
4708 			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
4709 			    MAX_TSN)) {
4710 				send_forward_tsn(stcb, asoc);
4711 				/*
4712 				 * ECN Nonce: Disable Nonce Sum check when
4713 				 * FWD TSN is sent and store resync tsn
4714 				 */
4715 				asoc->nonce_sum_check = 0;
4716 				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4717 			} else if (lchk) {
4718 				/* try to FR fwd-tsn's that get lost too */
4719 				lchk->rec.data.fwd_tsn_cnt++;
4720 				if (lchk->rec.data.fwd_tsn_cnt > 3) {
4721 					send_forward_tsn(stcb, asoc);
4722 					lchk->rec.data.fwd_tsn_cnt = 0;
4723 				}
4724 			}
4725 		}
4726 		if (lchk) {
4727 			/* Assure a timer is up */
4728 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4729 			    stcb->sctp_ep, stcb, lchk->whoTo);
4730 		}
4731 	}
4732 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4733 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4734 		    rwnd,
4735 		    stcb->asoc.peers_rwnd,
4736 		    stcb->asoc.total_flight,
4737 		    stcb->asoc.total_output_queue_size);
4738 	}
4739 }
4740 
4741 void
4742 sctp_handle_sack(struct mbuf *m, int offset,
4743     struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
4744     struct sctp_nets *net_from, int *abort_now, int sack_len, uint32_t rwnd)
4745 {
4746 	struct sctp_association *asoc;
4747 	struct sctp_sack *sack;
4748 	struct sctp_tmit_chunk *tp1, *tp2;
4749 	uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
4750 	         this_sack_lowest_newack;
4751 	uint32_t sav_cum_ack;
4752 	uint16_t num_seg, num_dup;
4753 	uint16_t wake_him = 0;
4754 	unsigned int sack_length;
4755 	uint32_t send_s = 0;
4756 	long j;
4757 	int accum_moved = 0;
4758 	int will_exit_fast_recovery = 0;
4759 	uint32_t a_rwnd, old_rwnd;
4760 	int win_probe_recovery = 0;
4761 	int win_probe_recovered = 0;
4762 	struct sctp_nets *net = NULL;
4763 	int nonce_sum_flag, ecn_seg_sums = 0;
4764 	int done_once;
4765 	uint8_t reneged_all = 0;
4766 	uint8_t cmt_dac_flag;
4767 
4768 	/*
4769 	 * we take any chance we can to service our queues since we cannot
4770 	 * get awoken when the socket is read from :<
4771 	 */
4772 	/*
4773 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4774 	 * old sack, if so discard. 2) If there is nothing left in the send
4775 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4776 	 * too, update any rwnd change and verify no timers are running.
4777 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4778 	 * moved process these first and note that it moved. 4) Process any
4779 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4780 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4781 	 * sync up flightsizes and things, stop all timers and also check
4782 	 * for shutdown_pending state. If so then go ahead and send off the
4783 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4784 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4785 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4786 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4787 	 * if in shutdown_recv state.
4788 	 */
4789 	SCTP_TCB_LOCK_ASSERT(stcb);
4790 	sack = &ch->sack;
4791 	/* CMT DAC algo */
4792 	this_sack_lowest_newack = 0;
4793 	j = 0;
4794 	sack_length = (unsigned int)sack_len;
4795 	/* ECN Nonce */
4796 	SCTP_STAT_INCR(sctps_slowpath_sack);
4797 	nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
4798 	cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
4799 #ifdef SCTP_ASOCLOG_OF_TSNS
4800 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4801 	stcb->asoc.cumack_log_at++;
4802 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4803 		stcb->asoc.cumack_log_at = 0;
4804 	}
4805 #endif
4806 	num_seg = ntohs(sack->num_gap_ack_blks);
4807 	a_rwnd = rwnd;
4808 
4809 	/* CMT DAC algo */
4810 	cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
4811 	num_dup = ntohs(sack->num_dup_tsns);
4812 
4813 	old_rwnd = stcb->asoc.peers_rwnd;
4814 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4815 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4816 		    stcb->asoc.overall_error_count,
4817 		    0,
4818 		    SCTP_FROM_SCTP_INDATA,
4819 		    __LINE__);
4820 	}
4821 	stcb->asoc.overall_error_count = 0;
4822 	asoc = &stcb->asoc;
4823 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4824 		sctp_log_sack(asoc->last_acked_seq,
4825 		    cum_ack,
4826 		    0,
4827 		    num_seg,
4828 		    num_dup,
4829 		    SCTP_LOG_NEW_SACK);
4830 	}
4831 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4832 		int off_to_dup, iii;
4833 		uint32_t *dupdata, dblock;
4834 
4835 		off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk);
4836 		if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) {
4837 			dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
4838 			    sizeof(uint32_t), (uint8_t *) & dblock);
4839 			off_to_dup += sizeof(uint32_t);
4840 			if (dupdata) {
4841 				for (iii = 0; iii < num_dup; iii++) {
4842 					sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4843 					dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
4844 					    sizeof(uint32_t), (uint8_t *) & dblock);
4845 					if (dupdata == NULL)
4846 						break;
4847 					off_to_dup += sizeof(uint32_t);
4848 				}
4849 			}
4850 		} else {
4851 			SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n",
4852 			    off_to_dup, num_dup, sack_length, num_seg);
4853 		}
4854 	}
4855 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4856 		/* reality check */
4857 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4858 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4859 			    sctpchunk_listhead);
4860 			send_s = tp1->rec.data.TSN_seq + 1;
4861 		} else {
4862 			send_s = asoc->sending_seq;
4863 		}
4864 		if (cum_ack == send_s ||
4865 		    compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4866 #ifndef INVARIANTS
4867 			struct mbuf *oper;
4868 
4869 #endif
4870 #ifdef INVARIANTS
4871 	hopeless_peer:
4872 			panic("Impossible sack 1");
4873 #else
4874 
4875 
4876 			/*
4877 			 * no way, we have not even sent this TSN out yet.
4878 			 * Peer is hopelessly messed up with us.
4879 			 */
4880 	hopeless_peer:
4881 			*abort_now = 1;
4882 			/* XXX */
4883 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4884 			    0, M_DONTWAIT, 1, MT_DATA);
4885 			if (oper) {
4886 				struct sctp_paramhdr *ph;
4887 				uint32_t *ippp;
4888 
4889 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4890 				    sizeof(uint32_t);
4891 				ph = mtod(oper, struct sctp_paramhdr *);
4892 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4893 				ph->param_length = htons(SCTP_BUF_LEN(oper));
4894 				ippp = (uint32_t *) (ph + 1);
4895 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4896 			}
4897 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4898 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4899 			return;
4900 #endif
4901 		}
4902 	}
4903 	/**********************/
4904 	/* 1) check the range */
4905 	/**********************/
4906 	if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4907 		/* acking something behind */
4908 		return;
4909 	}
4910 	sav_cum_ack = asoc->last_acked_seq;
4911 
4912 	/* update the Rwnd of the peer */
4913 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4914 	    TAILQ_EMPTY(&asoc->send_queue) &&
4915 	    (asoc->stream_queue_cnt == 0)
4916 	    ) {
4917 		/* nothing left on send/sent and strmq */
4918 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4919 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4920 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4921 		}
4922 		asoc->peers_rwnd = a_rwnd;
4923 		if (asoc->sent_queue_retran_cnt) {
4924 			asoc->sent_queue_retran_cnt = 0;
4925 		}
4926 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4927 			/* SWS sender side engages */
4928 			asoc->peers_rwnd = 0;
4929 		}
4930 		/* stop any timers */
4931 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4932 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4933 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4934 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4935 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4936 					SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4937 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4938 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4939 				}
4940 			}
4941 			net->partial_bytes_acked = 0;
4942 			net->flight_size = 0;
4943 		}
4944 		asoc->total_flight = 0;
4945 		asoc->total_flight_count = 0;
4946 		return;
4947 	}
4948 	/*
4949 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4950 	 * things. The total byte count acked is tracked in netAckSz AND
4951 	 * netAck2 is used to track the total bytes acked that are un-
4952 	 * amibguious and were never retransmitted. We track these on a per
4953 	 * destination address basis.
4954 	 */
4955 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4956 		net->prev_cwnd = net->cwnd;
4957 		net->net_ack = 0;
4958 		net->net_ack2 = 0;
4959 
4960 		/*
4961 		 * CMT: Reset CUC and Fast recovery algo variables before
4962 		 * SACK processing
4963 		 */
4964 		net->new_pseudo_cumack = 0;
4965 		net->will_exit_fast_recovery = 0;
4966 	}
4967 	/* process the new consecutive TSN first */
4968 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4969 	while (tp1) {
4970 		if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
4971 		    MAX_TSN) ||
4972 		    last_tsn == tp1->rec.data.TSN_seq) {
4973 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4974 				/*
4975 				 * ECN Nonce: Add the nonce to the sender's
4976 				 * nonce sum
4977 				 */
4978 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4979 				accum_moved = 1;
4980 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4981 					/*
4982 					 * If it is less than ACKED, it is
4983 					 * now no-longer in flight. Higher
4984 					 * values may occur during marking
4985 					 */
4986 					if ((tp1->whoTo->dest_state &
4987 					    SCTP_ADDR_UNCONFIRMED) &&
4988 					    (tp1->snd_count < 2)) {
4989 						/*
4990 						 * If there was no retran
4991 						 * and the address is
4992 						 * un-confirmed and we sent
4993 						 * there and are now
4994 						 * sacked.. its confirmed,
4995 						 * mark it so.
4996 						 */
4997 						tp1->whoTo->dest_state &=
4998 						    ~SCTP_ADDR_UNCONFIRMED;
4999 					}
5000 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5001 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
5002 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
5003 							    tp1->whoTo->flight_size,
5004 							    tp1->book_size,
5005 							    (uintptr_t) tp1->whoTo,
5006 							    tp1->rec.data.TSN_seq);
5007 						}
5008 						sctp_flight_size_decrease(tp1);
5009 						sctp_total_flight_decrease(stcb, tp1);
5010 					}
5011 					tp1->whoTo->net_ack += tp1->send_size;
5012 
5013 					/* CMT SFR and DAC algos */
5014 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
5015 					tp1->whoTo->saw_newack = 1;
5016 
5017 					if (tp1->snd_count < 2) {
5018 						/*
5019 						 * True non-retransmited
5020 						 * chunk
5021 						 */
5022 						tp1->whoTo->net_ack2 +=
5023 						    tp1->send_size;
5024 
5025 						/* update RTO too? */
5026 						if (tp1->do_rtt) {
5027 							tp1->whoTo->RTO =
5028 							    sctp_calculate_rto(stcb,
5029 							    asoc, tp1->whoTo,
5030 							    &tp1->sent_rcv_time,
5031 							    sctp_align_safe_nocopy);
5032 							tp1->do_rtt = 0;
5033 						}
5034 					}
5035 					/*
5036 					 * CMT: CUCv2 algorithm. From the
5037 					 * cumack'd TSNs, for each TSN being
5038 					 * acked for the first time, set the
5039 					 * following variables for the
5040 					 * corresp destination.
5041 					 * new_pseudo_cumack will trigger a
5042 					 * cwnd update.
5043 					 * find_(rtx_)pseudo_cumack will
5044 					 * trigger search for the next
5045 					 * expected (rtx-)pseudo-cumack.
5046 					 */
5047 					tp1->whoTo->new_pseudo_cumack = 1;
5048 					tp1->whoTo->find_pseudo_cumack = 1;
5049 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
5050 
5051 
5052 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
5053 						sctp_log_sack(asoc->last_acked_seq,
5054 						    cum_ack,
5055 						    tp1->rec.data.TSN_seq,
5056 						    0,
5057 						    0,
5058 						    SCTP_LOG_TSN_ACKED);
5059 					}
5060 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
5061 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
5062 					}
5063 				}
5064 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5065 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
5066 #ifdef SCTP_AUDITING_ENABLED
5067 					sctp_audit_log(0xB3,
5068 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
5069 #endif
5070 				}
5071 				if (tp1->rec.data.chunk_was_revoked) {
5072 					/* deflate the cwnd */
5073 					tp1->whoTo->cwnd -= tp1->book_size;
5074 					tp1->rec.data.chunk_was_revoked = 0;
5075 				}
5076 				tp1->sent = SCTP_DATAGRAM_ACKED;
5077 			}
5078 		} else {
5079 			break;
5080 		}
5081 		tp1 = TAILQ_NEXT(tp1, sctp_next);
5082 	}
5083 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
5084 	/* always set this up to cum-ack */
5085 	asoc->this_sack_highest_gap = last_tsn;
5086 
5087 	/* Move offset up to point to gaps/dups */
5088 	offset += sizeof(struct sctp_sack_chunk);
5089 	if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) {
5090 
5091 		/* skip corrupt segments */
5092 		goto skip_segments;
5093 	}
5094 	if (num_seg > 0) {
5095 
5096 		/*
5097 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
5098 		 * to be greater than the cumack. Also reset saw_newack to 0
5099 		 * for all dests.
5100 		 */
5101 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5102 			net->saw_newack = 0;
5103 			net->this_sack_highest_newack = last_tsn;
5104 		}
5105 
5106 		/*
5107 		 * thisSackHighestGap will increase while handling NEW
5108 		 * segments this_sack_highest_newack will increase while
5109 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
5110 		 * used for CMT DAC algo. saw_newack will also change.
5111 		 */
5112 		sctp_handle_segments(m, &offset, stcb, asoc, ch, last_tsn,
5113 		    &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
5114 		    num_seg, &ecn_seg_sums);
5115 
5116 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
5117 			/*
5118 			 * validate the biggest_tsn_acked in the gap acks if
5119 			 * strict adherence is wanted.
5120 			 */
5121 			if ((biggest_tsn_acked == send_s) ||
5122 			    (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
5123 				/*
5124 				 * peer is either confused or we are under
5125 				 * attack. We must abort.
5126 				 */
5127 				goto hopeless_peer;
5128 			}
5129 		}
5130 	}
5131 skip_segments:
5132 	/*******************************************/
5133 	/* cancel ALL T3-send timer if accum moved */
5134 	/*******************************************/
5135 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
5136 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5137 			if (net->new_pseudo_cumack)
5138 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5139 				    stcb, net,
5140 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
5141 
5142 		}
5143 	} else {
5144 		if (accum_moved) {
5145 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5146 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5147 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
5148 			}
5149 		}
5150 	}
5151 	/********************************************/
5152 	/* drop the acked chunks from the sendqueue */
5153 	/********************************************/
5154 	asoc->last_acked_seq = cum_ack;
5155 
5156 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
5157 	if (tp1 == NULL)
5158 		goto done_with_it;
5159 	do {
5160 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
5161 		    MAX_TSN)) {
5162 			break;
5163 		}
5164 		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
5165 			/* no more sent on list */
5166 			printf("Warning, tp1->sent == %d and its now acked?\n",
5167 			    tp1->sent);
5168 		}
5169 		tp2 = TAILQ_NEXT(tp1, sctp_next);
5170 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
5171 		if (tp1->pr_sctp_on) {
5172 			if (asoc->pr_sctp_cnt != 0)
5173 				asoc->pr_sctp_cnt--;
5174 		}
5175 		if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
5176 		    (asoc->total_flight > 0)) {
5177 #ifdef INVARIANTS
5178 			panic("Warning flight size is postive and should be 0");
5179 #else
5180 			SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
5181 			    asoc->total_flight);
5182 #endif
5183 			asoc->total_flight = 0;
5184 		}
5185 		if (tp1->data) {
5186 			/* sa_ignore NO_NULL_CHK */
5187 			sctp_free_bufspace(stcb, asoc, tp1, 1);
5188 			sctp_m_freem(tp1->data);
5189 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
5190 				asoc->sent_queue_cnt_removeable--;
5191 			}
5192 		}
5193 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
5194 			sctp_log_sack(asoc->last_acked_seq,
5195 			    cum_ack,
5196 			    tp1->rec.data.TSN_seq,
5197 			    0,
5198 			    0,
5199 			    SCTP_LOG_FREE_SENT);
5200 		}
5201 		tp1->data = NULL;
5202 		asoc->sent_queue_cnt--;
5203 		sctp_free_a_chunk(stcb, tp1);
5204 		wake_him++;
5205 		tp1 = tp2;
5206 	} while (tp1 != NULL);
5207 
5208 done_with_it:
5209 	/* sa_ignore NO_NULL_CHK */
5210 	if ((wake_him) && (stcb->sctp_socket)) {
5211 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5212 		struct socket *so;
5213 
5214 #endif
5215 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
5216 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
5217 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
5218 		}
5219 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5220 		so = SCTP_INP_SO(stcb->sctp_ep);
5221 		atomic_add_int(&stcb->asoc.refcnt, 1);
5222 		SCTP_TCB_UNLOCK(stcb);
5223 		SCTP_SOCKET_LOCK(so, 1);
5224 		SCTP_TCB_LOCK(stcb);
5225 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
5226 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5227 			/* assoc was freed while we were unlocked */
5228 			SCTP_SOCKET_UNLOCK(so, 1);
5229 			return;
5230 		}
5231 #endif
5232 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
5233 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5234 		SCTP_SOCKET_UNLOCK(so, 1);
5235 #endif
5236 	} else {
5237 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
5238 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
5239 		}
5240 	}
5241 
5242 	if (asoc->fast_retran_loss_recovery && accum_moved) {
5243 		if (compare_with_wrap(asoc->last_acked_seq,
5244 		    asoc->fast_recovery_tsn, MAX_TSN) ||
5245 		    asoc->last_acked_seq == asoc->fast_recovery_tsn) {
5246 			/* Setup so we will exit RFC2582 fast recovery */
5247 			will_exit_fast_recovery = 1;
5248 		}
5249 	}
5250 	/*
5251 	 * Check for revoked fragments:
5252 	 *
5253 	 * if Previous sack - Had no frags then we can't have any revoked if
5254 	 * Previous sack - Had frag's then - If we now have frags aka
5255 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
5256 	 * some of them. else - The peer revoked all ACKED fragments, since
5257 	 * we had some before and now we have NONE.
5258 	 */
5259 
5260 	if (num_seg)
5261 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
5262 	else if (asoc->saw_sack_with_frags) {
5263 		int cnt_revoked = 0;
5264 
5265 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
5266 		if (tp1 != NULL) {
5267 			/* Peer revoked all dg's marked or acked */
5268 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5269 				if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
5270 				    (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
5271 					tp1->sent = SCTP_DATAGRAM_SENT;
5272 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
5273 						sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
5274 						    tp1->whoTo->flight_size,
5275 						    tp1->book_size,
5276 						    (uintptr_t) tp1->whoTo,
5277 						    tp1->rec.data.TSN_seq);
5278 					}
5279 					sctp_flight_size_increase(tp1);
5280 					sctp_total_flight_increase(stcb, tp1);
5281 					tp1->rec.data.chunk_was_revoked = 1;
5282 					/*
5283 					 * To ensure that this increase in
5284 					 * flightsize, which is artificial,
5285 					 * does not throttle the sender, we
5286 					 * also increase the cwnd
5287 					 * artificially.
5288 					 */
5289 					tp1->whoTo->cwnd += tp1->book_size;
5290 					cnt_revoked++;
5291 				}
5292 			}
5293 			if (cnt_revoked) {
5294 				reneged_all = 1;
5295 			}
5296 		}
5297 		asoc->saw_sack_with_frags = 0;
5298 	}
5299 	if (num_seg)
5300 		asoc->saw_sack_with_frags = 1;
5301 	else
5302 		asoc->saw_sack_with_frags = 0;
5303 
5304 	/* JRS - Use the congestion control given in the CC module */
5305 	asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5306 
5307 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
5308 		/* nothing left in-flight */
5309 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5310 			/* stop all timers */
5311 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5312 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5313 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5314 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5315 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
5316 				}
5317 			}
5318 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5319 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
5320 			net->flight_size = 0;
5321 			net->partial_bytes_acked = 0;
5322 		}
5323 		asoc->total_flight = 0;
5324 		asoc->total_flight_count = 0;
5325 	}
5326 	/**********************************/
5327 	/* Now what about shutdown issues */
5328 	/**********************************/
5329 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5330 		/* nothing left on sendqueue.. consider done */
5331 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5332 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5333 			    asoc->peers_rwnd, 0, 0, a_rwnd);
5334 		}
5335 		asoc->peers_rwnd = a_rwnd;
5336 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5337 			/* SWS sender side engages */
5338 			asoc->peers_rwnd = 0;
5339 		}
5340 		/* clean up */
5341 		if ((asoc->stream_queue_cnt == 1) &&
5342 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5343 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5344 		    (asoc->locked_on_sending)
5345 		    ) {
5346 			struct sctp_stream_queue_pending *sp;
5347 
5348 			/*
5349 			 * I may be in a state where we got all across.. but
5350 			 * cannot write more due to a shutdown... we abort
5351 			 * since the user did not indicate EOR in this case.
5352 			 */
5353 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
5354 			    sctp_streamhead);
5355 			if ((sp) && (sp->length == 0)) {
5356 				asoc->locked_on_sending = NULL;
5357 				if (sp->msg_is_complete) {
5358 					asoc->stream_queue_cnt--;
5359 				} else {
5360 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5361 					asoc->stream_queue_cnt--;
5362 				}
5363 			}
5364 		}
5365 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5366 		    (asoc->stream_queue_cnt == 0)) {
5367 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5368 				/* Need to abort here */
5369 				struct mbuf *oper;
5370 
5371 		abort_out_now:
5372 				*abort_now = 1;
5373 				/* XXX */
5374 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
5375 				    0, M_DONTWAIT, 1, MT_DATA);
5376 				if (oper) {
5377 					struct sctp_paramhdr *ph;
5378 					uint32_t *ippp;
5379 
5380 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5381 					    sizeof(uint32_t);
5382 					ph = mtod(oper, struct sctp_paramhdr *);
5383 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
5384 					ph->param_length = htons(SCTP_BUF_LEN(oper));
5385 					ippp = (uint32_t *) (ph + 1);
5386 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
5387 				}
5388 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
5389 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
5390 				return;
5391 			} else {
5392 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5393 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5394 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5395 				}
5396 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5397 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5398 				sctp_stop_timers_for_shutdown(stcb);
5399 				sctp_send_shutdown(stcb,
5400 				    stcb->asoc.primary_destination);
5401 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5402 				    stcb->sctp_ep, stcb, asoc->primary_destination);
5403 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5404 				    stcb->sctp_ep, stcb, asoc->primary_destination);
5405 			}
5406 			return;
5407 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5408 		    (asoc->stream_queue_cnt == 0)) {
5409 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5410 				goto abort_out_now;
5411 			}
5412 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5413 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5414 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5415 			sctp_send_shutdown_ack(stcb,
5416 			    stcb->asoc.primary_destination);
5417 
5418 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5419 			    stcb->sctp_ep, stcb, asoc->primary_destination);
5420 			return;
5421 		}
5422 	}
5423 	/*
5424 	 * Now here we are going to recycle net_ack for a different use...
5425 	 * HEADS UP.
5426 	 */
5427 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5428 		net->net_ack = 0;
5429 	}
5430 
5431 	/*
5432 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5433 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5434 	 * automatically ensure that.
5435 	 */
5436 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
5437 		this_sack_lowest_newack = cum_ack;
5438 	}
5439 	if (num_seg > 0) {
5440 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5441 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5442 	}
5443 	/* JRS - Use the congestion control given in the CC module */
5444 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5445 
5446 	/******************************************************************
5447 	 *  Here we do the stuff with ECN Nonce checking.
5448 	 *  We basically check to see if the nonce sum flag was incorrect
5449 	 *  or if resynchronization needs to be done. Also if we catch a
5450 	 *  misbehaving receiver we give him the kick.
5451 	 ******************************************************************/
5452 
5453 	if (asoc->ecn_nonce_allowed) {
5454 		if (asoc->nonce_sum_check) {
5455 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5456 				if (asoc->nonce_wait_for_ecne == 0) {
5457 					struct sctp_tmit_chunk *lchk;
5458 
5459 					lchk = TAILQ_FIRST(&asoc->send_queue);
5460 					asoc->nonce_wait_for_ecne = 1;
5461 					if (lchk) {
5462 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5463 					} else {
5464 						asoc->nonce_wait_tsn = asoc->sending_seq;
5465 					}
5466 				} else {
5467 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5468 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5469 						/*
5470 						 * Misbehaving peer. We need
5471 						 * to react to this guy
5472 						 */
5473 						asoc->ecn_allowed = 0;
5474 						asoc->ecn_nonce_allowed = 0;
5475 					}
5476 				}
5477 			}
5478 		} else {
5479 			/* See if Resynchronization Possible */
5480 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5481 				asoc->nonce_sum_check = 1;
5482 				/*
5483 				 * now we must calculate what the base is.
5484 				 * We do this based on two things, we know
5485 				 * the total's for all the segments
5486 				 * gap-acked in the SACK, its stored in
5487 				 * ecn_seg_sums. We also know the SACK's
5488 				 * nonce sum, its in nonce_sum_flag. So we
5489 				 * can build a truth table to back-calculate
5490 				 * the new value of
5491 				 * asoc->nonce_sum_expect_base:
5492 				 *
5493 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
5494 				 * 1                    0 1 0 1 1 1
5495 				 * 1 0
5496 				 */
5497 				asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5498 			}
5499 		}
5500 	}
5501 	/* Now are we exiting loss recovery ? */
5502 	if (will_exit_fast_recovery) {
5503 		/* Ok, we must exit fast recovery */
5504 		asoc->fast_retran_loss_recovery = 0;
5505 	}
5506 	if ((asoc->sat_t3_loss_recovery) &&
5507 	    ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5508 	    MAX_TSN) ||
5509 	    (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5510 		/* end satellite t3 loss recovery */
5511 		asoc->sat_t3_loss_recovery = 0;
5512 	}
5513 	/*
5514 	 * CMT Fast recovery
5515 	 */
5516 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5517 		if (net->will_exit_fast_recovery) {
5518 			/* Ok, we must exit fast recovery */
5519 			net->fast_retran_loss_recovery = 0;
5520 		}
5521 	}
5522 
5523 	/* Adjust and set the new rwnd value */
5524 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5525 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5526 		    asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5527 	}
5528 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5529 	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5530 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5531 		/* SWS sender side engages */
5532 		asoc->peers_rwnd = 0;
5533 	}
5534 	if (asoc->peers_rwnd > old_rwnd) {
5535 		win_probe_recovery = 1;
5536 	}
5537 	/*
5538 	 * Now we must setup so we have a timer up for anyone with
5539 	 * outstanding data.
5540 	 */
5541 	done_once = 0;
5542 again:
5543 	j = 0;
5544 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5545 		if (win_probe_recovery && (net->window_probe)) {
5546 			win_probe_recovered = 1;
5547 			/*-
5548 			 * Find first chunk that was used with
5549 			 * window probe and clear the event. Put
5550 			 * it back into the send queue as if has
5551 			 * not been sent.
5552 			 */
5553 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5554 				if (tp1->window_probe) {
5555 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
5556 					break;
5557 				}
5558 			}
5559 		}
5560 		if (net->flight_size) {
5561 			j++;
5562 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5563 			    stcb->sctp_ep, stcb, net);
5564 			if (net->window_probe) {
5565 			}
5566 		} else {
5567 			if (net->window_probe) {
5568 				/*
5569 				 * In window probes we must assure a timer
5570 				 * is still running there
5571 				 */
5572 
5573 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5574 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5575 					    stcb->sctp_ep, stcb, net);
5576 
5577 				}
5578 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5579 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5580 				    stcb, net,
5581 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5582 			}
5583 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5584 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5585 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5586 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5587 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5588 				}
5589 			}
5590 		}
5591 	}
5592 	if ((j == 0) &&
5593 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5594 	    (asoc->sent_queue_retran_cnt == 0) &&
5595 	    (win_probe_recovered == 0) &&
5596 	    (done_once == 0)) {
5597 		/*
5598 		 * huh, this should not happen unless all packets are
5599 		 * PR-SCTP and marked to skip of course.
5600 		 */
5601 		if (sctp_fs_audit(asoc)) {
5602 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5603 				net->flight_size = 0;
5604 			}
5605 			asoc->total_flight = 0;
5606 			asoc->total_flight_count = 0;
5607 			asoc->sent_queue_retran_cnt = 0;
5608 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5609 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5610 					sctp_flight_size_increase(tp1);
5611 					sctp_total_flight_increase(stcb, tp1);
5612 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5613 					asoc->sent_queue_retran_cnt++;
5614 				}
5615 			}
5616 		}
5617 		done_once = 1;
5618 		goto again;
5619 	}
5620 	/* Fix up the a-p-a-p for future PR-SCTP sends */
5621 	if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5622 		asoc->advanced_peer_ack_point = cum_ack;
5623 	}
5624 	/* C2. try to further move advancedPeerAckPoint ahead */
5625 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5626 		struct sctp_tmit_chunk *lchk;
5627 		uint32_t old_adv_peer_ack_point;
5628 
5629 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5630 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5631 		/* C3. See if we need to send a Fwd-TSN */
5632 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5633 		    MAX_TSN)) {
5634 			/*
5635 			 * ISSUE with ECN, see FWD-TSN processing for notes
5636 			 * on issues that will occur when the ECN NONCE
5637 			 * stuff is put into SCTP for cross checking.
5638 			 */
5639 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5640 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5641 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5642 				    old_adv_peer_ack_point);
5643 			}
5644 			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
5645 			    MAX_TSN)) {
5646 				send_forward_tsn(stcb, asoc);
5647 				/*
5648 				 * ECN Nonce: Disable Nonce Sum check when
5649 				 * FWD TSN is sent and store resync tsn
5650 				 */
5651 				asoc->nonce_sum_check = 0;
5652 				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5653 			} else if (lchk) {
5654 				/* try to FR fwd-tsn's that get lost too */
5655 				lchk->rec.data.fwd_tsn_cnt++;
5656 				if (lchk->rec.data.fwd_tsn_cnt > 3) {
5657 					send_forward_tsn(stcb, asoc);
5658 					lchk->rec.data.fwd_tsn_cnt = 0;
5659 				}
5660 			}
5661 		}
5662 		if (lchk) {
5663 			/* Assure a timer is up */
5664 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5665 			    stcb->sctp_ep, stcb, lchk->whoTo);
5666 		}
5667 	}
5668 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5669 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5670 		    a_rwnd,
5671 		    stcb->asoc.peers_rwnd,
5672 		    stcb->asoc.total_flight,
5673 		    stcb->asoc.total_output_queue_size);
5674 	}
5675 }
5676 
5677 void
5678 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5679     struct sctp_nets *netp, int *abort_flag)
5680 {
5681 	/* Copy cum-ack */
5682 	uint32_t cum_ack, a_rwnd;
5683 
5684 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5685 	/* Arrange so a_rwnd does NOT change */
5686 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5687 
5688 	/* Now call the express sack handling */
5689 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5690 }
5691 
5692 static void
5693 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5694     struct sctp_stream_in *strmin)
5695 {
5696 	struct sctp_queued_to_read *ctl, *nctl;
5697 	struct sctp_association *asoc;
5698 	int tt;
5699 
5700 	/* EY -used to calculate nr_gap information */
5701 	uint32_t nr_tsn, nr_gap;
5702 
5703 	asoc = &stcb->asoc;
5704 	tt = strmin->last_sequence_delivered;
5705 	/*
5706 	 * First deliver anything prior to and including the stream no that
5707 	 * came in
5708 	 */
5709 	ctl = TAILQ_FIRST(&strmin->inqueue);
5710 	while (ctl) {
5711 		nctl = TAILQ_NEXT(ctl, next);
5712 		if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5713 		    (tt == ctl->sinfo_ssn)) {
5714 			/* this is deliverable now */
5715 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5716 			/* subtract pending on streams */
5717 			asoc->size_on_all_streams -= ctl->length;
5718 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5719 			/* deliver it to at least the delivery-q */
5720 			if (stcb->sctp_socket) {
5721 				/* EY need the tsn info for calculating nr */
5722 				nr_tsn = ctl->sinfo_tsn;
5723 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5724 				    ctl,
5725 				    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
5726 				/*
5727 				 * EY this is the chunk that should be
5728 				 * tagged nr gapped calculate the gap and
5729 				 * such then tag this TSN nr
5730 				 * chk->rec.data.TSN_seq
5731 				 */
5732 				if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
5733 
5734 					SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
5735 					if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
5736 					    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
5737 						/*
5738 						 * EY These should never
5739 						 * happen- explained before
5740 						 */
5741 					} else {
5742 						SCTP_TCB_LOCK_ASSERT(stcb);
5743 						SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
5744 						SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
5745 						if (compare_with_wrap(nr_tsn,
5746 						    asoc->highest_tsn_inside_nr_map,
5747 						    MAX_TSN))
5748 							asoc->highest_tsn_inside_nr_map = nr_tsn;
5749 					}
5750 					if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, nr_gap))
5751 						/*
5752 						 * printf("In
5753 						 * sctp_kick_prsctp_reorder_q
5754 						 * ueue(7): Something wrong,
5755 						 * the TSN to be tagged"
5756 						 * "\nas NR is not even in
5757 						 * the mapping_array, or map
5758 						 * and nr_map are
5759 						 * inconsistent");
5760 						 */
5761 						/*
5762 						 * EY - not %100 sure about
5763 						 * the lock thing, don't
5764 						 * think its required
5765 						 */
5766 						/*
5767 						 * SCTP_TCB_LOCK_ASSERT(stcb)
5768 						 * ;
5769 						 */
5770 					{
5771 						/*
5772 						 * printf("\nCalculating an
5773 						 * nr_gap!!\nmapping_array_si
5774 						 * ze = %d
5775 						 * nr_mapping_array_size =
5776 						 * %d" "\nmapping_array_base
5777 						 * = %d
5778 						 * nr_mapping_array_base =
5779 						 * %d\nhighest_tsn_inside_map
5780 						 *  = %d"
5781 						 * "highest_tsn_inside_nr_map
5782 						 *  = %d\nTSN = %d nr_gap =
5783 						 * %d",asoc->mapping_array_si
5784 						 * ze,
5785 						 * asoc->nr_mapping_array_siz
5786 						 * e,
5787 						 * asoc->mapping_array_base_t
5788 						 * sn,
5789 						 * asoc->nr_mapping_array_bas
5790 						 * e_tsn,
5791 						 * asoc->highest_tsn_inside_m
5792 						 * ap,
5793 						 * asoc->highest_tsn_inside_n
5794 						 * r_map,tsn,nr_gap);
5795 						 */
5796 					}
5797 				}
5798 			}
5799 		} else {
5800 			/* no more delivery now. */
5801 			break;
5802 		}
5803 		ctl = nctl;
5804 	}
5805 	/*
5806 	 * now we must deliver things in queue the normal way  if any are
5807 	 * now ready.
5808 	 */
5809 	tt = strmin->last_sequence_delivered + 1;
5810 	ctl = TAILQ_FIRST(&strmin->inqueue);
5811 	while (ctl) {
5812 		nctl = TAILQ_NEXT(ctl, next);
5813 		if (tt == ctl->sinfo_ssn) {
5814 			/* this is deliverable now */
5815 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5816 			/* subtract pending on streams */
5817 			asoc->size_on_all_streams -= ctl->length;
5818 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5819 			/* deliver it to at least the delivery-q */
5820 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5821 			if (stcb->sctp_socket) {
5822 				/* EY */
5823 				nr_tsn = ctl->sinfo_tsn;
5824 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5825 				    ctl,
5826 				    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
5827 				/*
5828 				 * EY this is the chunk that should be
5829 				 * tagged nr gapped calculate the gap and
5830 				 * such then tag this TSN nr
5831 				 * chk->rec.data.TSN_seq
5832 				 */
5833 				if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
5834 					SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
5835 					if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
5836 					    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
5837 						/*
5838 						 * EY These should never
5839 						 * happen, explained before
5840 						 */
5841 					} else {
5842 						SCTP_TCB_LOCK_ASSERT(stcb);
5843 						SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
5844 						SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
5845 						if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map,
5846 						    MAX_TSN))
5847 							asoc->highest_tsn_inside_nr_map = nr_tsn;
5848 					}
5849 					if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, nr_gap))
5850 						/*
5851 						 * printf("In
5852 						 * sctp_kick_prsctp_reorder_q
5853 						 * ueue(8): Something wrong,
5854 						 * the TSN to be tagged"
5855 						 * "\nas NR is not even in
5856 						 * the mapping_array, or map
5857 						 * and nr_map are
5858 						 * inconsistent");
5859 						 */
5860 						/*
5861 						 * EY - not %100 sure about
5862 						 * the lock thing, don't
5863 						 * think its required
5864 						 */
5865 						/*
5866 						 * SCTP_TCB_LOCK_ASSERT(stcb)
5867 						 * ;
5868 						 */
5869 					{
5870 						/*
5871 						 * printf("\nCalculating an
5872 						 * nr_gap!!\nmapping_array_si
5873 						 * ze = %d
5874 						 * nr_mapping_array_size =
5875 						 * %d" "\nmapping_array_base
5876 						 * = %d
5877 						 * nr_mapping_array_base =
5878 						 * %d\nhighest_tsn_inside_map
5879 						 *  = %d"
5880 						 * "highest_tsn_inside_nr_map
5881 						 *  = %d\nTSN = %d nr_gap =
5882 						 * %d",asoc->mapping_array_si
5883 						 * ze,
5884 						 * asoc->nr_mapping_array_siz
5885 						 * e,
5886 						 * asoc->mapping_array_base_t
5887 						 * sn,
5888 						 * asoc->nr_mapping_array_bas
5889 						 * e_tsn,
5890 						 * asoc->highest_tsn_inside_m
5891 						 * ap,
5892 						 * asoc->highest_tsn_inside_n
5893 						 * r_map,tsn,nr_gap);
5894 						 */
5895 					}
5896 				}
5897 			}
5898 			tt = strmin->last_sequence_delivered + 1;
5899 		} else {
5900 			break;
5901 		}
5902 		ctl = nctl;
5903 	}
5904 }
5905 
5906 static void
5907 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5908     struct sctp_association *asoc,
5909     uint16_t stream, uint16_t seq)
5910 {
5911 	struct sctp_tmit_chunk *chk, *at;
5912 
5913 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5914 		/* For each one on here see if we need to toss it */
5915 		/*
5916 		 * For now large messages held on the reasmqueue that are
5917 		 * complete will be tossed too. We could in theory do more
5918 		 * work to spin through and stop after dumping one msg aka
5919 		 * seeing the start of a new msg at the head, and call the
5920 		 * delivery function... to see if it can be delivered... But
5921 		 * for now we just dump everything on the queue.
5922 		 */
5923 		chk = TAILQ_FIRST(&asoc->reasmqueue);
5924 		while (chk) {
5925 			at = TAILQ_NEXT(chk, sctp_next);
5926 			if (chk->rec.data.stream_number != stream) {
5927 				chk = at;
5928 				continue;
5929 			}
5930 			if (chk->rec.data.stream_seq == seq) {
5931 				/* It needs to be tossed */
5932 				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5933 				if (compare_with_wrap(chk->rec.data.TSN_seq,
5934 				    asoc->tsn_last_delivered, MAX_TSN)) {
5935 					asoc->tsn_last_delivered =
5936 					    chk->rec.data.TSN_seq;
5937 					asoc->str_of_pdapi =
5938 					    chk->rec.data.stream_number;
5939 					asoc->ssn_of_pdapi =
5940 					    chk->rec.data.stream_seq;
5941 					asoc->fragment_flags =
5942 					    chk->rec.data.rcv_flags;
5943 				}
5944 				asoc->size_on_reasm_queue -= chk->send_size;
5945 				sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5946 
5947 				/* Clear up any stream problem */
5948 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5949 				    SCTP_DATA_UNORDERED &&
5950 				    (compare_with_wrap(chk->rec.data.stream_seq,
5951 				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5952 				    MAX_SEQ))) {
5953 					/*
5954 					 * We must dump forward this streams
5955 					 * sequence number if the chunk is
5956 					 * not unordered that is being
5957 					 * skipped. There is a chance that
5958 					 * if the peer does not include the
5959 					 * last fragment in its FWD-TSN we
5960 					 * WILL have a problem here since
5961 					 * you would have a partial chunk in
5962 					 * queue that may not be
5963 					 * deliverable. Also if a Partial
5964 					 * delivery API as started the user
5965 					 * may get a partial chunk. The next
5966 					 * read returning a new chunk...
5967 					 * really ugly but I see no way
5968 					 * around it! Maybe a notify??
5969 					 */
5970 					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5971 					    chk->rec.data.stream_seq;
5972 				}
5973 				if (chk->data) {
5974 					sctp_m_freem(chk->data);
5975 					chk->data = NULL;
5976 				}
5977 				sctp_free_a_chunk(stcb, chk);
5978 			} else if (compare_with_wrap(chk->rec.data.stream_seq, seq, MAX_SEQ)) {
5979 				/*
5980 				 * If the stream_seq is > than the purging
5981 				 * one, we are done
5982 				 */
5983 				break;
5984 			}
5985 			chk = at;
5986 		}
5987 	}
5988 }
5989 
5990 
5991 void
5992 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5993     struct sctp_forward_tsn_chunk *fwd, int *abort_flag, struct mbuf *m, int offset)
5994 {
5995 	/*
5996 	 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5997 	 * forward TSN, when the SACK comes back that acknowledges the
5998 	 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5999 	 * get quite tricky since we may have sent more data interveneing
6000 	 * and must carefully account for what the SACK says on the nonce
6001 	 * and any gaps that are reported. This work will NOT be done here,
6002 	 * but I note it here since it is really related to PR-SCTP and
6003 	 * FWD-TSN's
6004 	 */
6005 
6006 	/* The pr-sctp fwd tsn */
6007 	/*
6008 	 * here we will perform all the data receiver side steps for
6009 	 * processing FwdTSN, as required in by pr-sctp draft:
6010 	 *
6011 	 * Assume we get FwdTSN(x):
6012 	 *
6013 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
6014 	 * others we have 3) examine and update re-ordering queue on
6015 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
6016 	 * report where we are.
6017 	 */
6018 	struct sctp_association *asoc;
6019 	uint32_t new_cum_tsn, gap;
6020 	unsigned int i, fwd_sz, cumack_set_flag, m_size;
6021 	uint32_t str_seq;
6022 	struct sctp_stream_in *strm;
6023 	struct sctp_tmit_chunk *chk, *at;
6024 	struct sctp_queued_to_read *ctl, *sv;
6025 
6026 	cumack_set_flag = 0;
6027 	asoc = &stcb->asoc;
6028 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
6029 		SCTPDBG(SCTP_DEBUG_INDATA1,
6030 		    "Bad size too small/big fwd-tsn\n");
6031 		return;
6032 	}
6033 	m_size = (stcb->asoc.mapping_array_size << 3);
6034 	/*************************************************************/
6035 	/* 1. Here we update local cumTSN and shift the bitmap array */
6036 	/*************************************************************/
6037 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
6038 
6039 	if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
6040 	    asoc->cumulative_tsn == new_cum_tsn) {
6041 		/* Already got there ... */
6042 		return;
6043 	}
6044 	if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
6045 	    MAX_TSN)) {
6046 		asoc->highest_tsn_inside_map = new_cum_tsn;
6047 		/* EY nr_mapping_array version of the above */
6048 		/*
6049 		 * if(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
6050 		 * asoc->peer_supports_nr_sack)
6051 		 */
6052 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
6053 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
6054 			sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
6055 		}
6056 	}
6057 	/*
6058 	 * now we know the new TSN is more advanced, let's find the actual
6059 	 * gap
6060 	 */
6061 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
6062 	if (gap >= m_size) {
6063 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
6064 			sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
6065 		}
6066 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
6067 			struct mbuf *oper;
6068 
6069 			/*
6070 			 * out of range (of single byte chunks in the rwnd I
6071 			 * give out). This must be an attacker.
6072 			 */
6073 			*abort_flag = 1;
6074 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
6075 			    0, M_DONTWAIT, 1, MT_DATA);
6076 			if (oper) {
6077 				struct sctp_paramhdr *ph;
6078 				uint32_t *ippp;
6079 
6080 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
6081 				    (sizeof(uint32_t) * 3);
6082 				ph = mtod(oper, struct sctp_paramhdr *);
6083 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
6084 				ph->param_length = htons(SCTP_BUF_LEN(oper));
6085 				ippp = (uint32_t *) (ph + 1);
6086 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
6087 				ippp++;
6088 				*ippp = asoc->highest_tsn_inside_map;
6089 				ippp++;
6090 				*ippp = new_cum_tsn;
6091 			}
6092 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
6093 			sctp_abort_an_association(stcb->sctp_ep, stcb,
6094 			    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
6095 			return;
6096 		}
6097 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
6098 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
6099 		cumack_set_flag = 1;
6100 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
6101 		asoc->cumulative_tsn = asoc->highest_tsn_inside_map = new_cum_tsn;
6102 		/* EY - nr_sack: nr_mapping_array version of the above */
6103 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
6104 			memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
6105 			asoc->nr_mapping_array_base_tsn = new_cum_tsn + 1;
6106 			asoc->highest_tsn_inside_nr_map = new_cum_tsn;
6107 			if (asoc->nr_mapping_array_size != asoc->mapping_array_size) {
6108 				/*
6109 				 * printf("IN sctp_handle_forward_tsn:
6110 				 * Something is wrong the size of" "map and
6111 				 * nr_map should be equal!")
6112 				 */ ;
6113 			}
6114 		}
6115 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
6116 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
6117 		}
6118 		asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
6119 	} else {
6120 		SCTP_TCB_LOCK_ASSERT(stcb);
6121 		for (i = 0; i <= gap; i++) {
6122 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack
6123 			    && SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
6124 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
6125 			} else {
6126 				SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
6127 			}
6128 		}
6129 		/*
6130 		 * Now after marking all, slide thing forward but no sack
6131 		 * please.
6132 		 */
6133 		sctp_sack_check(stcb, 0, 0, abort_flag);
6134 		if (*abort_flag)
6135 			return;
6136 	}
6137 	/*************************************************************/
6138 	/* 2. Clear up re-assembly queue                             */
6139 	/*************************************************************/
6140 	/*
6141 	 * First service it if pd-api is up, just in case we can progress it
6142 	 * forward
6143 	 */
6144 	if (asoc->fragmented_delivery_inprogress) {
6145 		sctp_service_reassembly(stcb, asoc);
6146 	}
6147 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
6148 		/* For each one on here see if we need to toss it */
6149 		/*
6150 		 * For now large messages held on the reasmqueue that are
6151 		 * complete will be tossed too. We could in theory do more
6152 		 * work to spin through and stop after dumping one msg aka
6153 		 * seeing the start of a new msg at the head, and call the
6154 		 * delivery function... to see if it can be delivered... But
6155 		 * for now we just dump everything on the queue.
6156 		 */
6157 		chk = TAILQ_FIRST(&asoc->reasmqueue);
6158 		while (chk) {
6159 			at = TAILQ_NEXT(chk, sctp_next);
6160 			if ((compare_with_wrap(new_cum_tsn,
6161 			    chk->rec.data.TSN_seq, MAX_TSN)) ||
6162 			    (new_cum_tsn == chk->rec.data.TSN_seq)) {
6163 				/* It needs to be tossed */
6164 				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
6165 				if (compare_with_wrap(chk->rec.data.TSN_seq,
6166 				    asoc->tsn_last_delivered, MAX_TSN)) {
6167 					asoc->tsn_last_delivered =
6168 					    chk->rec.data.TSN_seq;
6169 					asoc->str_of_pdapi =
6170 					    chk->rec.data.stream_number;
6171 					asoc->ssn_of_pdapi =
6172 					    chk->rec.data.stream_seq;
6173 					asoc->fragment_flags =
6174 					    chk->rec.data.rcv_flags;
6175 				}
6176 				asoc->size_on_reasm_queue -= chk->send_size;
6177 				sctp_ucount_decr(asoc->cnt_on_reasm_queue);
6178 
6179 				/* Clear up any stream problem */
6180 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
6181 				    SCTP_DATA_UNORDERED &&
6182 				    (compare_with_wrap(chk->rec.data.stream_seq,
6183 				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
6184 				    MAX_SEQ))) {
6185 					/*
6186 					 * We must dump forward this streams
6187 					 * sequence number if the chunk is
6188 					 * not unordered that is being
6189 					 * skipped. There is a chance that
6190 					 * if the peer does not include the
6191 					 * last fragment in its FWD-TSN we
6192 					 * WILL have a problem here since
6193 					 * you would have a partial chunk in
6194 					 * queue that may not be
6195 					 * deliverable. Also if a Partial
6196 					 * delivery API as started the user
6197 					 * may get a partial chunk. The next
6198 					 * read returning a new chunk...
6199 					 * really ugly but I see no way
6200 					 * around it! Maybe a notify??
6201 					 */
6202 					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
6203 					    chk->rec.data.stream_seq;
6204 				}
6205 				if (chk->data) {
6206 					sctp_m_freem(chk->data);
6207 					chk->data = NULL;
6208 				}
6209 				sctp_free_a_chunk(stcb, chk);
6210 			} else {
6211 				/*
6212 				 * Ok we have gone beyond the end of the
6213 				 * fwd-tsn's mark.
6214 				 */
6215 				break;
6216 			}
6217 			chk = at;
6218 		}
6219 	}
6220 	/*******************************************************/
6221 	/* 3. Update the PR-stream re-ordering queues and fix  */
6222 	/* delivery issues as needed.                       */
6223 	/*******************************************************/
6224 	fwd_sz -= sizeof(*fwd);
6225 	if (m && fwd_sz) {
6226 		/* New method. */
6227 		unsigned int num_str;
6228 		struct sctp_strseq *stseq, strseqbuf;
6229 
6230 		offset += sizeof(*fwd);
6231 
6232 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
6233 		num_str = fwd_sz / sizeof(struct sctp_strseq);
6234 		for (i = 0; i < num_str; i++) {
6235 			uint16_t st;
6236 
6237 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
6238 			    sizeof(struct sctp_strseq),
6239 			    (uint8_t *) & strseqbuf);
6240 			offset += sizeof(struct sctp_strseq);
6241 			if (stseq == NULL) {
6242 				break;
6243 			}
6244 			/* Convert */
6245 			st = ntohs(stseq->stream);
6246 			stseq->stream = st;
6247 			st = ntohs(stseq->sequence);
6248 			stseq->sequence = st;
6249 
6250 			/* now process */
6251 
6252 			/*
6253 			 * Ok we now look for the stream/seq on the read
6254 			 * queue where its not all delivered. If we find it
6255 			 * we transmute the read entry into a PDI_ABORTED.
6256 			 */
6257 			if (stseq->stream >= asoc->streamincnt) {
6258 				/* screwed up streams, stop!  */
6259 				break;
6260 			}
6261 			if ((asoc->str_of_pdapi == stseq->stream) &&
6262 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
6263 				/*
6264 				 * If this is the one we were partially
6265 				 * delivering now then we no longer are.
6266 				 * Note this will change with the reassembly
6267 				 * re-write.
6268 				 */
6269 				asoc->fragmented_delivery_inprogress = 0;
6270 			}
6271 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
6272 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
6273 				if ((ctl->sinfo_stream == stseq->stream) &&
6274 				    (ctl->sinfo_ssn == stseq->sequence)) {
6275 					str_seq = (stseq->stream << 16) | stseq->sequence;
6276 					ctl->end_added = 1;
6277 					ctl->pdapi_aborted = 1;
6278 					sv = stcb->asoc.control_pdapi;
6279 					stcb->asoc.control_pdapi = ctl;
6280 					sctp_notify_partial_delivery_indication(stcb,
6281 					    SCTP_PARTIAL_DELIVERY_ABORTED,
6282 					    SCTP_HOLDS_LOCK,
6283 					    str_seq);
6284 					stcb->asoc.control_pdapi = sv;
6285 					break;
6286 				} else if ((ctl->sinfo_stream == stseq->stream) &&
6287 				    (compare_with_wrap(ctl->sinfo_ssn, stseq->sequence, MAX_SEQ))) {
6288 					/* We are past our victim SSN */
6289 					break;
6290 				}
6291 			}
6292 			strm = &asoc->strmin[stseq->stream];
6293 			if (compare_with_wrap(stseq->sequence,
6294 			    strm->last_sequence_delivered, MAX_SEQ)) {
6295 				/* Update the sequence number */
6296 				strm->last_sequence_delivered =
6297 				    stseq->sequence;
6298 			}
6299 			/* now kick the stream the new way */
6300 			/* sa_ignore NO_NULL_CHK */
6301 			sctp_kick_prsctp_reorder_queue(stcb, strm);
6302 		}
6303 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
6304 	}
6305 	if (TAILQ_FIRST(&asoc->reasmqueue)) {
6306 		/* now lets kick out and check for more fragmented delivery */
6307 		/* sa_ignore NO_NULL_CHK */
6308 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
6309 	}
6310 }
6311 
6312 /* EY fully identical to sctp_express_handle_sack, duplicated for only naming convention */
6313 void
6314 sctp_express_handle_nr_sack(struct sctp_tcb *stcb, uint32_t cumack,
6315     uint32_t rwnd, int nonce_sum_flag, int *abort_now)
6316 {
6317 	struct sctp_nets *net;
6318 	struct sctp_association *asoc;
6319 	struct sctp_tmit_chunk *tp1, *tp2;
6320 	uint32_t old_rwnd;
6321 	int win_probe_recovery = 0;
6322 	int win_probe_recovered = 0;
6323 	int j, done_once = 0;
6324 
6325 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
6326 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
6327 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
6328 	}
6329 	SCTP_TCB_LOCK_ASSERT(stcb);
6330 #ifdef SCTP_ASOCLOG_OF_TSNS
6331 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
6332 	stcb->asoc.cumack_log_at++;
6333 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
6334 		stcb->asoc.cumack_log_at = 0;
6335 	}
6336 #endif
6337 	asoc = &stcb->asoc;
6338 	old_rwnd = asoc->peers_rwnd;
6339 	if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
6340 		/* old ack */
6341 		return;
6342 	} else if (asoc->last_acked_seq == cumack) {
6343 		/* Window update sack */
6344 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
6345 		    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
6346 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6347 			/* SWS sender side engages */
6348 			asoc->peers_rwnd = 0;
6349 		}
6350 		if (asoc->peers_rwnd > old_rwnd) {
6351 			goto again;
6352 		}
6353 		return;
6354 	}
6355 	/* First setup for CC stuff */
6356 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6357 		net->prev_cwnd = net->cwnd;
6358 		net->net_ack = 0;
6359 		net->net_ack2 = 0;
6360 
6361 		/*
6362 		 * CMT: Reset CUC and Fast recovery algo variables before
6363 		 * SACK processing
6364 		 */
6365 		net->new_pseudo_cumack = 0;
6366 		net->will_exit_fast_recovery = 0;
6367 	}
6368 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
6369 		uint32_t send_s;
6370 
6371 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
6372 			tp1 = TAILQ_LAST(&asoc->sent_queue,
6373 			    sctpchunk_listhead);
6374 			send_s = tp1->rec.data.TSN_seq + 1;
6375 		} else {
6376 			send_s = asoc->sending_seq;
6377 		}
6378 		if ((cumack == send_s) ||
6379 		    compare_with_wrap(cumack, send_s, MAX_TSN)) {
6380 #ifndef INVARIANTS
6381 			struct mbuf *oper;
6382 
6383 #endif
6384 #ifdef INVARIANTS
6385 			panic("Impossible sack 1");
6386 #else
6387 			*abort_now = 1;
6388 			/* XXX */
6389 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
6390 			    0, M_DONTWAIT, 1, MT_DATA);
6391 			if (oper) {
6392 				struct sctp_paramhdr *ph;
6393 				uint32_t *ippp;
6394 
6395 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
6396 				    sizeof(uint32_t);
6397 				ph = mtod(oper, struct sctp_paramhdr *);
6398 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
6399 				ph->param_length = htons(SCTP_BUF_LEN(oper));
6400 				ippp = (uint32_t *) (ph + 1);
6401 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
6402 			}
6403 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
6404 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
6405 			return;
6406 #endif
6407 		}
6408 	}
6409 	asoc->this_sack_highest_gap = cumack;
6410 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
6411 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
6412 		    stcb->asoc.overall_error_count,
6413 		    0,
6414 		    SCTP_FROM_SCTP_INDATA,
6415 		    __LINE__);
6416 	}
6417 	stcb->asoc.overall_error_count = 0;
6418 	if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
6419 		/* process the new consecutive TSN first */
6420 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
6421 		while (tp1) {
6422 			tp2 = TAILQ_NEXT(tp1, sctp_next);
6423 			if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
6424 			    MAX_TSN) ||
6425 			    cumack == tp1->rec.data.TSN_seq) {
6426 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
6427 					printf("Warning, an unsent is now acked?\n");
6428 				}
6429 				/*
6430 				 * ECN Nonce: Add the nonce to the sender's
6431 				 * nonce sum
6432 				 */
6433 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
6434 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
6435 					/*
6436 					 * If it is less than ACKED, it is
6437 					 * now no-longer in flight. Higher
6438 					 * values may occur during marking
6439 					 */
6440 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6441 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
6442 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
6443 							    tp1->whoTo->flight_size,
6444 							    tp1->book_size,
6445 							    (uintptr_t) tp1->whoTo,
6446 							    tp1->rec.data.TSN_seq);
6447 						}
6448 						sctp_flight_size_decrease(tp1);
6449 						/* sa_ignore NO_NULL_CHK */
6450 						sctp_total_flight_decrease(stcb, tp1);
6451 					}
6452 					tp1->whoTo->net_ack += tp1->send_size;
6453 					if (tp1->snd_count < 2) {
6454 						/*
6455 						 * True non-retransmited
6456 						 * chunk
6457 						 */
6458 						tp1->whoTo->net_ack2 +=
6459 						    tp1->send_size;
6460 
6461 						/* update RTO too? */
6462 						if (tp1->do_rtt) {
6463 							tp1->whoTo->RTO =
6464 							/*
6465 							 * sa_ignore
6466 							 * NO_NULL_CHK
6467 							 */
6468 							    sctp_calculate_rto(stcb,
6469 							    asoc, tp1->whoTo,
6470 							    &tp1->sent_rcv_time,
6471 							    sctp_align_safe_nocopy);
6472 							tp1->do_rtt = 0;
6473 						}
6474 					}
6475 					/*
6476 					 * CMT: CUCv2 algorithm. From the
6477 					 * cumack'd TSNs, for each TSN being
6478 					 * acked for the first time, set the
6479 					 * following variables for the
6480 					 * corresp destination.
6481 					 * new_pseudo_cumack will trigger a
6482 					 * cwnd update.
6483 					 * find_(rtx_)pseudo_cumack will
6484 					 * trigger search for the next
6485 					 * expected (rtx-)pseudo-cumack.
6486 					 */
6487 					tp1->whoTo->new_pseudo_cumack = 1;
6488 					tp1->whoTo->find_pseudo_cumack = 1;
6489 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
6490 
6491 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
6492 						/* sa_ignore NO_NULL_CHK */
6493 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
6494 					}
6495 				}
6496 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
6497 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
6498 				}
6499 				if (tp1->rec.data.chunk_was_revoked) {
6500 					/* deflate the cwnd */
6501 					tp1->whoTo->cwnd -= tp1->book_size;
6502 					tp1->rec.data.chunk_was_revoked = 0;
6503 				}
6504 				tp1->sent = SCTP_DATAGRAM_ACKED;
6505 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
6506 				if (tp1->data) {
6507 					/* sa_ignore NO_NULL_CHK */
6508 					sctp_free_bufspace(stcb, asoc, tp1, 1);
6509 					sctp_m_freem(tp1->data);
6510 				}
6511 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
6512 					sctp_log_sack(asoc->last_acked_seq,
6513 					    cumack,
6514 					    tp1->rec.data.TSN_seq,
6515 					    0,
6516 					    0,
6517 					    SCTP_LOG_FREE_SENT);
6518 				}
6519 				tp1->data = NULL;
6520 				asoc->sent_queue_cnt--;
6521 				sctp_free_a_chunk(stcb, tp1);
6522 				tp1 = tp2;
6523 			} else {
6524 				break;
6525 			}
6526 		}
6527 
6528 	}
6529 	/* sa_ignore NO_NULL_CHK */
6530 	if (stcb->sctp_socket) {
6531 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6532 		struct socket *so;
6533 
6534 #endif
6535 
6536 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
6537 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
6538 			/* sa_ignore NO_NULL_CHK */
6539 			sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
6540 		}
6541 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6542 		so = SCTP_INP_SO(stcb->sctp_ep);
6543 		atomic_add_int(&stcb->asoc.refcnt, 1);
6544 		SCTP_TCB_UNLOCK(stcb);
6545 		SCTP_SOCKET_LOCK(so, 1);
6546 		SCTP_TCB_LOCK(stcb);
6547 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
6548 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
6549 			/* assoc was freed while we were unlocked */
6550 			SCTP_SOCKET_UNLOCK(so, 1);
6551 			return;
6552 		}
6553 #endif
6554 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
6555 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6556 		SCTP_SOCKET_UNLOCK(so, 1);
6557 #endif
6558 	} else {
6559 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
6560 			sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
6561 		}
6562 	}
6563 
6564 	/* JRS - Use the congestion control given in the CC module */
6565 	if (asoc->last_acked_seq != cumack)
6566 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
6567 
6568 	asoc->last_acked_seq = cumack;
6569 
6570 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
6571 		/* nothing left in-flight */
6572 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6573 			net->flight_size = 0;
6574 			net->partial_bytes_acked = 0;
6575 		}
6576 		asoc->total_flight = 0;
6577 		asoc->total_flight_count = 0;
6578 	}
6579 	/* Fix up the a-p-a-p for future PR-SCTP sends */
6580 	if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
6581 		asoc->advanced_peer_ack_point = cumack;
6582 	}
6583 	/* ECN Nonce updates */
6584 	if (asoc->ecn_nonce_allowed) {
6585 		if (asoc->nonce_sum_check) {
6586 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
6587 				if (asoc->nonce_wait_for_ecne == 0) {
6588 					struct sctp_tmit_chunk *lchk;
6589 
6590 					lchk = TAILQ_FIRST(&asoc->send_queue);
6591 					asoc->nonce_wait_for_ecne = 1;
6592 					if (lchk) {
6593 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
6594 					} else {
6595 						asoc->nonce_wait_tsn = asoc->sending_seq;
6596 					}
6597 				} else {
6598 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
6599 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
6600 						/*
6601 						 * Misbehaving peer. We need
6602 						 * to react to this guy
6603 						 */
6604 						asoc->ecn_allowed = 0;
6605 						asoc->ecn_nonce_allowed = 0;
6606 					}
6607 				}
6608 			}
6609 		} else {
6610 			/* See if Resynchronization Possible */
6611 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
6612 				asoc->nonce_sum_check = 1;
6613 				/*
6614 				 * now we must calculate what the base is.
6615 				 * We do this based on two things, we know
6616 				 * the total's for all the segments
6617 				 * gap-acked in the SACK (none), We also
6618 				 * know the SACK's nonce sum, its in
6619 				 * nonce_sum_flag. So we can build a truth
6620 				 * table to back-calculate the new value of
6621 				 * asoc->nonce_sum_expect_base:
6622 				 *
6623 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
6624 				 * 1                    0 1 0 1 1 1 1 0
6625 				 */
6626 				asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
6627 			}
6628 		}
6629 	}
6630 	/* RWND update */
6631 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
6632 	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
6633 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6634 		/* SWS sender side engages */
6635 		asoc->peers_rwnd = 0;
6636 	}
6637 	if (asoc->peers_rwnd > old_rwnd) {
6638 		win_probe_recovery = 1;
6639 	}
6640 	/* Now assure a timer where data is queued at */
6641 again:
6642 	j = 0;
6643 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6644 		int to_ticks;
6645 
6646 		if (win_probe_recovery && (net->window_probe)) {
6647 			win_probe_recovered = 1;
6648 			/*
6649 			 * Find first chunk that was used with window probe
6650 			 * and clear the sent
6651 			 */
6652 			/* sa_ignore FREED_MEMORY */
6653 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
6654 				if (tp1->window_probe) {
6655 					/* move back to data send queue */
6656 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
6657 					break;
6658 				}
6659 			}
6660 		}
6661 		if (net->RTO == 0) {
6662 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
6663 		} else {
6664 			to_ticks = MSEC_TO_TICKS(net->RTO);
6665 		}
6666 		if (net->flight_size) {
6667 
6668 			j++;
6669 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
6670 			    sctp_timeout_handler, &net->rxt_timer);
6671 			if (net->window_probe) {
6672 				net->window_probe = 0;
6673 			}
6674 		} else {
6675 			if (net->window_probe) {
6676 				/*
6677 				 * In window probes we must assure a timer
6678 				 * is still running there
6679 				 */
6680 				net->window_probe = 0;
6681 				(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
6682 				    sctp_timeout_handler, &net->rxt_timer);
6683 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
6684 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
6685 				    stcb, net,
6686 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
6687 			}
6688 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
6689 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
6690 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
6691 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
6692 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
6693 				}
6694 			}
6695 		}
6696 	}
6697 	if ((j == 0) &&
6698 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
6699 	    (asoc->sent_queue_retran_cnt == 0) &&
6700 	    (win_probe_recovered == 0) &&
6701 	    (done_once == 0)) {
6702 		/*
6703 		 * huh, this should not happen unless all packets are
6704 		 * PR-SCTP and marked to skip of course.
6705 		 */
6706 		if (sctp_fs_audit(asoc)) {
6707 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6708 				net->flight_size = 0;
6709 			}
6710 			asoc->total_flight = 0;
6711 			asoc->total_flight_count = 0;
6712 			asoc->sent_queue_retran_cnt = 0;
6713 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
6714 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6715 					sctp_flight_size_increase(tp1);
6716 					sctp_total_flight_increase(stcb, tp1);
6717 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
6718 					asoc->sent_queue_retran_cnt++;
6719 				}
6720 			}
6721 		}
6722 		done_once = 1;
6723 		goto again;
6724 	}
6725 	/**********************************/
6726 	/* Now what about shutdown issues */
6727 	/**********************************/
6728 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
6729 		/* nothing left on sendqueue.. consider done */
6730 		/* clean up */
6731 		if ((asoc->stream_queue_cnt == 1) &&
6732 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
6733 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
6734 		    (asoc->locked_on_sending)
6735 		    ) {
6736 			struct sctp_stream_queue_pending *sp;
6737 
6738 			/*
6739 			 * I may be in a state where we got all across.. but
6740 			 * cannot write more due to a shutdown... we abort
6741 			 * since the user did not indicate EOR in this case.
6742 			 * The sp will be cleaned during free of the asoc.
6743 			 */
6744 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
6745 			    sctp_streamhead);
6746 			if ((sp) && (sp->length == 0)) {
6747 				/* Let cleanup code purge it */
6748 				if (sp->msg_is_complete) {
6749 					asoc->stream_queue_cnt--;
6750 				} else {
6751 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6752 					asoc->locked_on_sending = NULL;
6753 					asoc->stream_queue_cnt--;
6754 				}
6755 			}
6756 		}
6757 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
6758 		    (asoc->stream_queue_cnt == 0)) {
6759 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
6760 				/* Need to abort here */
6761 				struct mbuf *oper;
6762 
6763 		abort_out_now:
6764 				*abort_now = 1;
6765 				/* XXX */
6766 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
6767 				    0, M_DONTWAIT, 1, MT_DATA);
6768 				if (oper) {
6769 					struct sctp_paramhdr *ph;
6770 					uint32_t *ippp;
6771 
6772 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
6773 					    sizeof(uint32_t);
6774 					ph = mtod(oper, struct sctp_paramhdr *);
6775 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6776 					ph->param_length = htons(SCTP_BUF_LEN(oper));
6777 					ippp = (uint32_t *) (ph + 1);
6778 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
6779 				}
6780 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
6781 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
6782 			} else {
6783 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
6784 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
6785 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6786 				}
6787 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6788 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6789 				sctp_stop_timers_for_shutdown(stcb);
6790 				sctp_send_shutdown(stcb,
6791 				    stcb->asoc.primary_destination);
6792 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
6793 				    stcb->sctp_ep, stcb, asoc->primary_destination);
6794 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
6795 				    stcb->sctp_ep, stcb, asoc->primary_destination);
6796 			}
6797 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
6798 		    (asoc->stream_queue_cnt == 0)) {
6799 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
6800 				goto abort_out_now;
6801 			}
6802 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6803 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
6804 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6805 			sctp_send_shutdown_ack(stcb,
6806 			    stcb->asoc.primary_destination);
6807 
6808 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
6809 			    stcb->sctp_ep, stcb, asoc->primary_destination);
6810 		}
6811 	}
6812 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
6813 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
6814 		    rwnd,
6815 		    stcb->asoc.peers_rwnd,
6816 		    stcb->asoc.total_flight,
6817 		    stcb->asoc.total_output_queue_size);
6818 	}
6819 }
6820 
6821 /* EY! nr_sack version of sctp_handle_segments, nr-gapped TSNs get removed from RtxQ in this method*/
6822 static void
6823 sctp_handle_nr_sack_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
6824     struct sctp_nr_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
6825     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
6826     uint32_t num_seg, uint32_t num_nr_seg, int *ecn_seg_sums)
6827 {
6828 	/************************************************/
6829 	/* process fragments and update sendqueue        */
6830 	/************************************************/
6831 	struct sctp_nr_sack *nr_sack;
6832 	struct sctp_gap_ack_block *frag, block;
6833 	struct sctp_nr_gap_ack_block *nr_frag, nr_block;
6834 	struct sctp_tmit_chunk *tp1;
6835 	uint32_t i, j;
6836 	int wake_him = 0;
6837 	uint32_t theTSN;
6838 	int num_frs = 0;
6839 
6840 	uint16_t frag_strt, frag_end, primary_flag_set;
6841 	uint16_t nr_frag_strt, nr_frag_end;
6842 
6843 	uint32_t last_frag_high;
6844 	uint32_t last_nr_frag_high;
6845 
6846 	/*
6847 	 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
6848 	 */
6849 	if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
6850 		primary_flag_set = 1;
6851 	} else {
6852 		primary_flag_set = 0;
6853 	}
6854 	nr_sack = &ch->nr_sack;
6855 
6856 	/*
6857 	 * EY! - I will process nr_gaps similarly,by going to this position
6858 	 * again if All bit is set
6859 	 */
6860 	frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
6861 	    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
6862 	*offset += sizeof(block);
6863 	if (frag == NULL) {
6864 		return;
6865 	}
6866 	tp1 = NULL;
6867 	last_frag_high = 0;
6868 	for (i = 0; i < num_seg; i++) {
6869 		frag_strt = ntohs(frag->start);
6870 		frag_end = ntohs(frag->end);
6871 		/* some sanity checks on the fargment offsets */
6872 		if (frag_strt > frag_end) {
6873 			/* this one is malformed, skip */
6874 			frag++;
6875 			continue;
6876 		}
6877 		if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
6878 		    MAX_TSN))
6879 			*biggest_tsn_acked = frag_end + last_tsn;
6880 
6881 		/* mark acked dgs and find out the highestTSN being acked */
6882 		if (tp1 == NULL) {
6883 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
6884 
6885 			/* save the locations of the last frags */
6886 			last_frag_high = frag_end + last_tsn;
6887 		} else {
6888 			/*
6889 			 * now lets see if we need to reset the queue due to
6890 			 * a out-of-order SACK fragment
6891 			 */
6892 			if (compare_with_wrap(frag_strt + last_tsn,
6893 			    last_frag_high, MAX_TSN)) {
6894 				/*
6895 				 * if the new frag starts after the last TSN
6896 				 * frag covered, we are ok and this one is
6897 				 * beyond the last one
6898 				 */
6899 				;
6900 			} else {
6901 				/*
6902 				 * ok, they have reset us, so we need to
6903 				 * reset the queue this will cause extra
6904 				 * hunting but hey, they chose the
6905 				 * performance hit when they failed to order
6906 				 * there gaps..
6907 				 */
6908 				tp1 = TAILQ_FIRST(&asoc->sent_queue);
6909 			}
6910 			last_frag_high = frag_end + last_tsn;
6911 		}
6912 		for (j = frag_strt; j <= frag_end; j++) {
6913 			theTSN = j + last_tsn;
6914 			while (tp1) {
6915 				if (tp1->rec.data.doing_fast_retransmit)
6916 					num_frs++;
6917 
6918 				/*
6919 				 * CMT: CUCv2 algorithm. For each TSN being
6920 				 * processed from the sent queue, track the
6921 				 * next expected pseudo-cumack, or
6922 				 * rtx_pseudo_cumack, if required. Separate
6923 				 * cumack trackers for first transmissions,
6924 				 * and retransmissions.
6925 				 */
6926 				if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
6927 				    (tp1->snd_count == 1)) {
6928 					tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
6929 					tp1->whoTo->find_pseudo_cumack = 0;
6930 				}
6931 				if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
6932 				    (tp1->snd_count > 1)) {
6933 					tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
6934 					tp1->whoTo->find_rtx_pseudo_cumack = 0;
6935 				}
6936 				if (tp1->rec.data.TSN_seq == theTSN) {
6937 					if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
6938 						/*
6939 						 * must be held until
6940 						 * cum-ack passes
6941 						 */
6942 						/*
6943 						 * ECN Nonce: Add the nonce
6944 						 * value to the sender's
6945 						 * nonce sum
6946 						 */
6947 						if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6948 							/*-
6949 						         * If it is less than RESEND, it is
6950 						         * now no-longer in flight.
6951 						         * Higher values may already be set
6952 						         * via previous Gap Ack Blocks...
6953 						         * i.e. ACKED or RESEND.
6954 						         */
6955 							if (compare_with_wrap(tp1->rec.data.TSN_seq,
6956 							    *biggest_newly_acked_tsn, MAX_TSN)) {
6957 								*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
6958 							}
6959 							/*
6960 							 * CMT: SFR algo
6961 							 * (and HTNA) - set
6962 							 * saw_newack to 1
6963 							 * for dest being
6964 							 * newly acked.
6965 							 * update
6966 							 * this_sack_highest_
6967 							 * newack if
6968 							 * appropriate.
6969 							 */
6970 							if (tp1->rec.data.chunk_was_revoked == 0)
6971 								tp1->whoTo->saw_newack = 1;
6972 
6973 							if (compare_with_wrap(tp1->rec.data.TSN_seq,
6974 							    tp1->whoTo->this_sack_highest_newack,
6975 							    MAX_TSN)) {
6976 								tp1->whoTo->this_sack_highest_newack =
6977 								    tp1->rec.data.TSN_seq;
6978 							}
6979 							/*
6980 							 * CMT DAC algo:
6981 							 * also update
6982 							 * this_sack_lowest_n
6983 							 * ewack
6984 							 */
6985 							if (*this_sack_lowest_newack == 0) {
6986 								if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
6987 									sctp_log_sack(*this_sack_lowest_newack,
6988 									    last_tsn,
6989 									    tp1->rec.data.TSN_seq,
6990 									    0,
6991 									    0,
6992 									    SCTP_LOG_TSN_ACKED);
6993 								}
6994 								*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
6995 							}
6996 							/*
6997 							 * CMT: CUCv2
6998 							 * algorithm. If
6999 							 * (rtx-)pseudo-cumac
7000 							 * k for corresp
7001 							 * dest is being
7002 							 * acked, then we
7003 							 * have a new
7004 							 * (rtx-)pseudo-cumac
7005 							 * k. Set
7006 							 * new_(rtx_)pseudo_c
7007 							 * umack to TRUE so
7008 							 * that the cwnd for
7009 							 * this dest can be
7010 							 * updated. Also
7011 							 * trigger search
7012 							 * for the next
7013 							 * expected
7014 							 * (rtx-)pseudo-cumac
7015 							 * k. Separate
7016 							 * pseudo_cumack
7017 							 * trackers for
7018 							 * first
7019 							 * transmissions and
7020 							 * retransmissions.
7021 							 */
7022 							if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
7023 								if (tp1->rec.data.chunk_was_revoked == 0) {
7024 									tp1->whoTo->new_pseudo_cumack = 1;
7025 								}
7026 								tp1->whoTo->find_pseudo_cumack = 1;
7027 							}
7028 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7029 								sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
7030 							}
7031 							if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
7032 								if (tp1->rec.data.chunk_was_revoked == 0) {
7033 									tp1->whoTo->new_pseudo_cumack = 1;
7034 								}
7035 								tp1->whoTo->find_rtx_pseudo_cumack = 1;
7036 							}
7037 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7038 								sctp_log_sack(*biggest_newly_acked_tsn,
7039 								    last_tsn,
7040 								    tp1->rec.data.TSN_seq,
7041 								    frag_strt,
7042 								    frag_end,
7043 								    SCTP_LOG_TSN_ACKED);
7044 							}
7045 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7046 								sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
7047 								    tp1->whoTo->flight_size,
7048 								    tp1->book_size,
7049 								    (uintptr_t) tp1->whoTo,
7050 								    tp1->rec.data.TSN_seq);
7051 							}
7052 							sctp_flight_size_decrease(tp1);
7053 							sctp_total_flight_decrease(stcb, tp1);
7054 
7055 							tp1->whoTo->net_ack += tp1->send_size;
7056 							if (tp1->snd_count < 2) {
7057 								/*
7058 								 * True
7059 								 * non-retran
7060 								 * smited
7061 								 * chunk
7062 								 */
7063 								tp1->whoTo->net_ack2 += tp1->send_size;
7064 
7065 								/*
7066 								 * update
7067 								 * RTO too ?
7068 								 */
7069 								if (tp1->do_rtt) {
7070 									tp1->whoTo->RTO =
7071 									    sctp_calculate_rto(stcb,
7072 									    asoc,
7073 									    tp1->whoTo,
7074 									    &tp1->sent_rcv_time,
7075 									    sctp_align_safe_nocopy);
7076 									tp1->do_rtt = 0;
7077 								}
7078 							}
7079 						}
7080 						if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
7081 							(*ecn_seg_sums) += tp1->rec.data.ect_nonce;
7082 							(*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
7083 							if (compare_with_wrap(tp1->rec.data.TSN_seq,
7084 							    asoc->this_sack_highest_gap,
7085 							    MAX_TSN)) {
7086 								asoc->this_sack_highest_gap =
7087 								    tp1->rec.data.TSN_seq;
7088 							}
7089 							if (tp1->sent == SCTP_DATAGRAM_RESEND) {
7090 								sctp_ucount_decr(asoc->sent_queue_retran_cnt);
7091 #ifdef SCTP_AUDITING_ENABLED
7092 								sctp_audit_log(0xB2,
7093 								    (asoc->sent_queue_retran_cnt & 0x000000ff));
7094 #endif
7095 							}
7096 						}
7097 						/*
7098 						 * All chunks NOT UNSENT
7099 						 * fall through here and are
7100 						 * marked
7101 						 */
7102 						if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
7103 							tp1->sent = SCTP_DATAGRAM_NR_MARKED;
7104 						if (tp1->rec.data.chunk_was_revoked) {
7105 							/* deflate the cwnd */
7106 							tp1->whoTo->cwnd -= tp1->book_size;
7107 							tp1->rec.data.chunk_was_revoked = 0;
7108 						}
7109 					}
7110 					break;
7111 				}	/* if (tp1->TSN_seq == theTSN) */
7112 				if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
7113 				    MAX_TSN))
7114 					break;
7115 
7116 				tp1 = TAILQ_NEXT(tp1, sctp_next);
7117 			}	/* end while (tp1) */
7118 		}		/* end for (j = fragStart */
7119 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
7120 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
7121 		*offset += sizeof(block);
7122 		if (frag == NULL) {
7123 			break;
7124 		}
7125 	}
7126 
7127 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
7128 		if (num_frs)
7129 			sctp_log_fr(*biggest_tsn_acked,
7130 			    *biggest_newly_acked_tsn,
7131 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
7132 	}
7133 	nr_frag = (struct sctp_nr_gap_ack_block *)sctp_m_getptr(m, *offset,
7134 	    sizeof(struct sctp_nr_gap_ack_block), (uint8_t *) & nr_block);
7135 	*offset += sizeof(nr_block);
7136 
7137 
7138 
7139 	if (nr_frag == NULL) {
7140 		return;
7141 	}
7142 	tp1 = NULL;
7143 	last_nr_frag_high = 0;
7144 
7145 	for (i = 0; i < num_nr_seg; i++) {
7146 
7147 		nr_frag_strt = ntohs(nr_frag->start);
7148 		nr_frag_end = ntohs(nr_frag->end);
7149 
7150 		/* some sanity checks on the nr fargment offsets */
7151 		if (nr_frag_strt > nr_frag_end) {
7152 			/* this one is malformed, skip */
7153 			nr_frag++;
7154 			continue;
7155 		}
7156 		/* mark acked dgs and find out the highestTSN being acked */
7157 		if (tp1 == NULL) {
7158 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
7159 
7160 			/* save the locations of the last frags */
7161 			last_nr_frag_high = nr_frag_end + last_tsn;
7162 		} else {
7163 			/*
7164 			 * now lets see if we need to reset the queue due to
7165 			 * a out-of-order SACK fragment
7166 			 */
7167 			if (compare_with_wrap(nr_frag_strt + last_tsn,
7168 			    last_nr_frag_high, MAX_TSN)) {
7169 				/*
7170 				 * if the new frag starts after the last TSN
7171 				 * frag covered, we are ok and this one is
7172 				 * beyond the last one
7173 				 */
7174 				;
7175 			} else {
7176 				/*
7177 				 * ok, they have reset us, so we need to
7178 				 * reset the queue this will cause extra
7179 				 * hunting but hey, they chose the
7180 				 * performance hit when they failed to order
7181 				 * there gaps..
7182 				 */
7183 				tp1 = TAILQ_FIRST(&asoc->sent_queue);
7184 			}
7185 			last_nr_frag_high = nr_frag_end + last_tsn;
7186 		}
7187 
7188 		for (j = nr_frag_strt + last_tsn; (compare_with_wrap((nr_frag_end + last_tsn), j, MAX_TSN)); j++) {
7189 			while (tp1) {
7190 				if (tp1->rec.data.TSN_seq == j) {
7191 					if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
7192 						if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
7193 							tp1->sent = SCTP_DATAGRAM_NR_MARKED;
7194 						/*
7195 						 * TAILQ_REMOVE(&asoc->sent_q
7196 						 * ueue, tp1, sctp_next);
7197 						 */
7198 						if (tp1->data) {
7199 							/*
7200 							 * sa_ignore
7201 							 * NO_NULL_CHK
7202 							 */
7203 							sctp_free_bufspace(stcb, asoc, tp1, 1);
7204 							sctp_m_freem(tp1->data);
7205 						}
7206 						tp1->data = NULL;
7207 						/* asoc->sent_queue_cnt--; */
7208 						/*
7209 						 * sctp_free_a_chunk(stcb,
7210 						 * tp1);
7211 						 */
7212 						wake_him++;
7213 					}
7214 					break;
7215 				}	/* if (tp1->TSN_seq == j) */
7216 				if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
7217 				    MAX_TSN))
7218 					break;
7219 				tp1 = TAILQ_NEXT(tp1, sctp_next);
7220 			}	/* end while (tp1) */
7221 
7222 		}		/* end for (j = nrFragStart */
7223 
7224 		nr_frag = (struct sctp_nr_gap_ack_block *)sctp_m_getptr(m, *offset,
7225 		    sizeof(struct sctp_nr_gap_ack_block), (uint8_t *) & nr_block);
7226 		*offset += sizeof(nr_block);
7227 		if (nr_frag == NULL) {
7228 			break;
7229 		}
7230 	}
7231 
7232 	/*
7233 	 * EY- wake up the socket if things have been removed from the sent
7234 	 * queue
7235 	 */
7236 	if ((wake_him) && (stcb->sctp_socket)) {
7237 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7238 		struct socket *so;
7239 
7240 #endif
7241 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
7242 		/*
7243 		 * if (SCTP_BASE_SYSCTL(sctp_logging_level) &
7244 		 * SCTP_WAKE_LOGGING_ENABLE) { sctp_wakeup_log(stcb,
7245 		 * cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);}
7246 		 */
7247 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7248 		so = SCTP_INP_SO(stcb->sctp_ep);
7249 		atomic_add_int(&stcb->asoc.refcnt, 1);
7250 		SCTP_TCB_UNLOCK(stcb);
7251 		SCTP_SOCKET_LOCK(so, 1);
7252 		SCTP_TCB_LOCK(stcb);
7253 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
7254 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7255 			/* assoc was freed while we were unlocked */
7256 			SCTP_SOCKET_UNLOCK(so, 1);
7257 			return;
7258 		}
7259 #endif
7260 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
7261 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7262 		SCTP_SOCKET_UNLOCK(so, 1);
7263 #endif
7264 	}			/* else { if
7265 				 * (SCTP_BASE_SYSCTL(sctp_logging_level) &
7266 				 * SCTP_WAKE_LOGGING_ENABLE) {
7267 				 * sctp_wakeup_log(stcb, cum_ack, wake_him,
7268 				 * SCTP_NOWAKE_FROM_SACK); } } */
7269 }
7270 
7271 /* EY- nr_sack */
7272 /* Identifies the non-renegable tsns that are revoked*/
7273 static void
7274 sctp_check_for_nr_revoked(struct sctp_tcb *stcb,
7275     struct sctp_association *asoc, uint32_t cumack,
7276     u_long biggest_tsn_acked)
7277 {
7278 	struct sctp_tmit_chunk *tp1;
7279 
7280 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
7281 	while (tp1) {
7282 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
7283 		    MAX_TSN)) {
7284 			/*
7285 			 * ok this guy is either ACK or MARKED. If it is
7286 			 * ACKED it has been previously acked but not this
7287 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
7288 			 * again.
7289 			 */
7290 			if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
7291 			    MAX_TSN))
7292 				break;
7293 
7294 
7295 			if (tp1->sent == SCTP_DATAGRAM_NR_ACKED) {
7296 				/*
7297 				 * EY! a non-renegable TSN is revoked, need
7298 				 * to abort the association
7299 				 */
7300 				/*
7301 				 * EY TODO: put in the code to abort the
7302 				 * assoc.
7303 				 */
7304 				return;
7305 			} else if (tp1->sent == SCTP_DATAGRAM_NR_MARKED) {
7306 				/* it has been re-acked in this SACK */
7307 				tp1->sent = SCTP_DATAGRAM_NR_ACKED;
7308 			}
7309 		}
7310 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
7311 			break;
7312 		tp1 = TAILQ_NEXT(tp1, sctp_next);
7313 	}
7314 }
7315 
7316 /* EY! nr_sack version of sctp_handle_sack, nr_gap_ack processing should be added to this method*/
7317 void
7318 sctp_handle_nr_sack(struct mbuf *m, int offset,
7319     struct sctp_nr_sack_chunk *ch, struct sctp_tcb *stcb,
7320     struct sctp_nets *net_from, int *abort_now, int nr_sack_len, uint32_t rwnd)
7321 {
7322 	struct sctp_association *asoc;
7323 
7324 	/* EY sack */
7325 	struct sctp_nr_sack *nr_sack;
7326 	struct sctp_tmit_chunk *tp1, *tp2;
7327 	uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
7328 	         this_sack_lowest_newack;
7329 	uint32_t sav_cum_ack;
7330 
7331 	/* EY num_seg */
7332 	uint16_t num_seg, num_nr_seg, num_dup;
7333 	uint16_t wake_him = 0;
7334 	unsigned int nr_sack_length;
7335 	uint32_t send_s = 0;
7336 	long j;
7337 	int accum_moved = 0;
7338 	int will_exit_fast_recovery = 0;
7339 	uint32_t a_rwnd, old_rwnd;
7340 	int win_probe_recovery = 0;
7341 	int win_probe_recovered = 0;
7342 	struct sctp_nets *net = NULL;
7343 	int nonce_sum_flag, ecn_seg_sums = 0;
7344 	int done_once;
7345 	uint8_t reneged_all = 0;
7346 	uint8_t cmt_dac_flag;
7347 
7348 	/*
7349 	 * we take any chance we can to service our queues since we cannot
7350 	 * get awoken when the socket is read from :<
7351 	 */
7352 	/*
7353 	 * Now perform the actual SACK handling: 1) Verify that it is not an
7354 	 * old sack, if so discard. 2) If there is nothing left in the send
7355 	 * queue (cum-ack is equal to last acked) then you have a duplicate
7356 	 * too, update any rwnd change and verify no timers are running.
7357 	 * then return. 3) Process any new consequtive data i.e. cum-ack
7358 	 * moved process these first and note that it moved. 4) Process any
7359 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
7360 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
7361 	 * sync up flightsizes and things, stop all timers and also check
7362 	 * for shutdown_pending state. If so then go ahead and send off the
7363 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
7364 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
7365 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
7366 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
7367 	 * if in shutdown_recv state.
7368 	 */
7369 	SCTP_TCB_LOCK_ASSERT(stcb);
7370 	nr_sack = &ch->nr_sack;
7371 	/* CMT DAC algo */
7372 	this_sack_lowest_newack = 0;
7373 	j = 0;
7374 	nr_sack_length = (unsigned int)nr_sack_len;
7375 	/* ECN Nonce */
7376 	SCTP_STAT_INCR(sctps_slowpath_sack);
7377 	nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
7378 	cum_ack = last_tsn = ntohl(nr_sack->cum_tsn_ack);
7379 #ifdef SCTP_ASOCLOG_OF_TSNS
7380 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
7381 	stcb->asoc.cumack_log_at++;
7382 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
7383 		stcb->asoc.cumack_log_at = 0;
7384 	}
7385 #endif
7386 	num_seg = ntohs(nr_sack->num_gap_ack_blks);
7387 	num_nr_seg = ntohs(nr_sack->num_nr_gap_ack_blks);
7388 	a_rwnd = rwnd;
7389 
7390 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
7391 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
7392 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
7393 	}
7394 	/* CMT DAC algo */
7395 	cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
7396 	num_dup = ntohs(nr_sack->num_dup_tsns);
7397 
7398 	old_rwnd = stcb->asoc.peers_rwnd;
7399 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
7400 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
7401 		    stcb->asoc.overall_error_count,
7402 		    0,
7403 		    SCTP_FROM_SCTP_INDATA,
7404 		    __LINE__);
7405 	}
7406 	stcb->asoc.overall_error_count = 0;
7407 	asoc = &stcb->asoc;
7408 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7409 		sctp_log_sack(asoc->last_acked_seq,
7410 		    cum_ack,
7411 		    0,
7412 		    num_seg,
7413 		    num_dup,
7414 		    SCTP_LOG_NEW_SACK);
7415 	}
7416 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
7417 		int off_to_dup, iii;
7418 		uint32_t *dupdata, dblock;
7419 
7420 		off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) +
7421 		    (num_nr_seg * sizeof(struct sctp_nr_gap_ack_block)) + sizeof(struct sctp_nr_sack_chunk);
7422 		if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= nr_sack_length) {
7423 			dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
7424 			    sizeof(uint32_t), (uint8_t *) & dblock);
7425 			off_to_dup += sizeof(uint32_t);
7426 			if (dupdata) {
7427 				for (iii = 0; iii < num_dup; iii++) {
7428 					sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
7429 					dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
7430 					    sizeof(uint32_t), (uint8_t *) & dblock);
7431 					if (dupdata == NULL)
7432 						break;
7433 					off_to_dup += sizeof(uint32_t);
7434 				}
7435 			}
7436 		} else {
7437 			SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d nr_sack_len:%d num gaps:%d num nr_gaps:%d\n",
7438 			    off_to_dup, num_dup, nr_sack_length, num_seg, num_nr_seg);
7439 		}
7440 	}
7441 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
7442 		/* reality check */
7443 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
7444 			tp1 = TAILQ_LAST(&asoc->sent_queue,
7445 			    sctpchunk_listhead);
7446 			send_s = tp1->rec.data.TSN_seq + 1;
7447 		} else {
7448 			send_s = asoc->sending_seq;
7449 		}
7450 		if (cum_ack == send_s ||
7451 		    compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
7452 #ifndef INVARIANTS
7453 			struct mbuf *oper;
7454 
7455 #endif
7456 #ifdef INVARIANTS
7457 	hopeless_peer:
7458 			panic("Impossible sack 1");
7459 #else
7460 
7461 
7462 			/*
7463 			 * no way, we have not even sent this TSN out yet.
7464 			 * Peer is hopelessly messed up with us.
7465 			 */
7466 	hopeless_peer:
7467 			*abort_now = 1;
7468 			/* XXX */
7469 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
7470 			    0, M_DONTWAIT, 1, MT_DATA);
7471 			if (oper) {
7472 				struct sctp_paramhdr *ph;
7473 				uint32_t *ippp;
7474 
7475 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
7476 				    sizeof(uint32_t);
7477 				ph = mtod(oper, struct sctp_paramhdr *);
7478 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
7479 				ph->param_length = htons(SCTP_BUF_LEN(oper));
7480 				ippp = (uint32_t *) (ph + 1);
7481 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
7482 			}
7483 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
7484 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
7485 			return;
7486 #endif
7487 		}
7488 	}
7489 	/**********************/
7490 	/* 1) check the range */
7491 	/**********************/
7492 	if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
7493 		/* acking something behind */
7494 		return;
7495 	}
7496 	sav_cum_ack = asoc->last_acked_seq;
7497 
7498 	/* update the Rwnd of the peer */
7499 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
7500 	    TAILQ_EMPTY(&asoc->send_queue) &&
7501 	    (asoc->stream_queue_cnt == 0)
7502 	    ) {
7503 		/* nothing left on send/sent and strmq */
7504 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7505 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
7506 			    asoc->peers_rwnd, 0, 0, a_rwnd);
7507 		}
7508 		asoc->peers_rwnd = a_rwnd;
7509 		if (asoc->sent_queue_retran_cnt) {
7510 			asoc->sent_queue_retran_cnt = 0;
7511 		}
7512 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7513 			/* SWS sender side engages */
7514 			asoc->peers_rwnd = 0;
7515 		}
7516 		/* stop any timers */
7517 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7518 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7519 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
7520 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
7521 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
7522 					SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
7523 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
7524 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
7525 				}
7526 			}
7527 			net->partial_bytes_acked = 0;
7528 			net->flight_size = 0;
7529 		}
7530 		asoc->total_flight = 0;
7531 		asoc->total_flight_count = 0;
7532 		return;
7533 	}
7534 	/*
7535 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
7536 	 * things. The total byte count acked is tracked in netAckSz AND
7537 	 * netAck2 is used to track the total bytes acked that are un-
7538 	 * amibguious and were never retransmitted. We track these on a per
7539 	 * destination address basis.
7540 	 */
7541 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7542 		net->prev_cwnd = net->cwnd;
7543 		net->net_ack = 0;
7544 		net->net_ack2 = 0;
7545 
7546 		/*
7547 		 * CMT: Reset CUC and Fast recovery algo variables before
7548 		 * SACK processing
7549 		 */
7550 		net->new_pseudo_cumack = 0;
7551 		net->will_exit_fast_recovery = 0;
7552 	}
7553 	/* process the new consecutive TSN first */
7554 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
7555 	while (tp1) {
7556 		if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
7557 		    MAX_TSN) ||
7558 		    last_tsn == tp1->rec.data.TSN_seq) {
7559 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
7560 				/*
7561 				 * ECN Nonce: Add the nonce to the sender's
7562 				 * nonce sum
7563 				 */
7564 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
7565 				accum_moved = 1;
7566 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
7567 					/*
7568 					 * If it is less than ACKED, it is
7569 					 * now no-longer in flight. Higher
7570 					 * values may occur during marking
7571 					 */
7572 					if ((tp1->whoTo->dest_state &
7573 					    SCTP_ADDR_UNCONFIRMED) &&
7574 					    (tp1->snd_count < 2)) {
7575 						/*
7576 						 * If there was no retran
7577 						 * and the address is
7578 						 * un-confirmed and we sent
7579 						 * there and are now
7580 						 * sacked.. its confirmed,
7581 						 * mark it so.
7582 						 */
7583 						tp1->whoTo->dest_state &=
7584 						    ~SCTP_ADDR_UNCONFIRMED;
7585 					}
7586 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
7587 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7588 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
7589 							    tp1->whoTo->flight_size,
7590 							    tp1->book_size,
7591 							    (uintptr_t) tp1->whoTo,
7592 							    tp1->rec.data.TSN_seq);
7593 						}
7594 						sctp_flight_size_decrease(tp1);
7595 						sctp_total_flight_decrease(stcb, tp1);
7596 					}
7597 					tp1->whoTo->net_ack += tp1->send_size;
7598 
7599 					/* CMT SFR and DAC algos */
7600 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
7601 					tp1->whoTo->saw_newack = 1;
7602 
7603 					if (tp1->snd_count < 2) {
7604 						/*
7605 						 * True non-retransmited
7606 						 * chunk
7607 						 */
7608 						tp1->whoTo->net_ack2 +=
7609 						    tp1->send_size;
7610 
7611 						/* update RTO too? */
7612 						if (tp1->do_rtt) {
7613 							tp1->whoTo->RTO =
7614 							    sctp_calculate_rto(stcb,
7615 							    asoc, tp1->whoTo,
7616 							    &tp1->sent_rcv_time,
7617 							    sctp_align_safe_nocopy);
7618 							tp1->do_rtt = 0;
7619 						}
7620 					}
7621 					/*
7622 					 * CMT: CUCv2 algorithm. From the
7623 					 * cumack'd TSNs, for each TSN being
7624 					 * acked for the first time, set the
7625 					 * following variables for the
7626 					 * corresp destination.
7627 					 * new_pseudo_cumack will trigger a
7628 					 * cwnd update.
7629 					 * find_(rtx_)pseudo_cumack will
7630 					 * trigger search for the next
7631 					 * expected (rtx-)pseudo-cumack.
7632 					 */
7633 					tp1->whoTo->new_pseudo_cumack = 1;
7634 					tp1->whoTo->find_pseudo_cumack = 1;
7635 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
7636 
7637 
7638 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7639 						sctp_log_sack(asoc->last_acked_seq,
7640 						    cum_ack,
7641 						    tp1->rec.data.TSN_seq,
7642 						    0,
7643 						    0,
7644 						    SCTP_LOG_TSN_ACKED);
7645 					}
7646 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7647 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
7648 					}
7649 				}
7650 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
7651 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
7652 #ifdef SCTP_AUDITING_ENABLED
7653 					sctp_audit_log(0xB3,
7654 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
7655 #endif
7656 				}
7657 				if (tp1->rec.data.chunk_was_revoked) {
7658 					/* deflate the cwnd */
7659 					tp1->whoTo->cwnd -= tp1->book_size;
7660 					tp1->rec.data.chunk_was_revoked = 0;
7661 				}
7662 				tp1->sent = SCTP_DATAGRAM_ACKED;
7663 			}
7664 		} else {
7665 			break;
7666 		}
7667 		tp1 = TAILQ_NEXT(tp1, sctp_next);
7668 	}
7669 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
7670 	/* always set this up to cum-ack */
7671 	asoc->this_sack_highest_gap = last_tsn;
7672 
7673 	/* Move offset up to point to gaps/dups */
7674 	offset += sizeof(struct sctp_nr_sack_chunk);
7675 	if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_nr_sack_chunk)) > nr_sack_length) {
7676 
7677 		/* skip corrupt segments */
7678 		goto skip_segments;
7679 	}
7680 	if (num_seg > 0) {
7681 
7682 		/*
7683 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
7684 		 * to be greater than the cumack. Also reset saw_newack to 0
7685 		 * for all dests.
7686 		 */
7687 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7688 			net->saw_newack = 0;
7689 			net->this_sack_highest_newack = last_tsn;
7690 		}
7691 
7692 		/*
7693 		 * thisSackHighestGap will increase while handling NEW
7694 		 * segments this_sack_highest_newack will increase while
7695 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
7696 		 * used for CMT DAC algo. saw_newack will also change.
7697 		 */
7698 
7699 		sctp_handle_nr_sack_segments(m, &offset, stcb, asoc, ch, last_tsn,
7700 		    &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
7701 		    num_seg, num_nr_seg, &ecn_seg_sums);
7702 
7703 
7704 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
7705 			/*
7706 			 * validate the biggest_tsn_acked in the gap acks if
7707 			 * strict adherence is wanted.
7708 			 */
7709 			if ((biggest_tsn_acked == send_s) ||
7710 			    (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
7711 				/*
7712 				 * peer is either confused or we are under
7713 				 * attack. We must abort.
7714 				 */
7715 				goto hopeless_peer;
7716 			}
7717 		}
7718 	}
7719 skip_segments:
7720 	/*******************************************/
7721 	/* cancel ALL T3-send timer if accum moved */
7722 	/*******************************************/
7723 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
7724 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7725 			if (net->new_pseudo_cumack)
7726 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7727 				    stcb, net,
7728 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
7729 
7730 		}
7731 	} else {
7732 		if (accum_moved) {
7733 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7734 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7735 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
7736 			}
7737 		}
7738 	}
7739 	/********************************************/
7740 	/* drop the acked chunks from the sendqueue */
7741 	/********************************************/
7742 	asoc->last_acked_seq = cum_ack;
7743 
7744 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
7745 	if (tp1 == NULL)
7746 		goto done_with_it;
7747 	do {
7748 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
7749 		    MAX_TSN)) {
7750 			break;
7751 		}
7752 		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
7753 			/* no more sent on list */
7754 			printf("Warning, tp1->sent == %d and its now acked?\n",
7755 			    tp1->sent);
7756 		}
7757 		tp2 = TAILQ_NEXT(tp1, sctp_next);
7758 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
7759 		if (tp1->pr_sctp_on) {
7760 			if (asoc->pr_sctp_cnt != 0)
7761 				asoc->pr_sctp_cnt--;
7762 		}
7763 		if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
7764 		    (asoc->total_flight > 0)) {
7765 #ifdef INVARIANTS
7766 			panic("Warning flight size is postive and should be 0");
7767 #else
7768 			SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
7769 			    asoc->total_flight);
7770 #endif
7771 			asoc->total_flight = 0;
7772 		}
7773 		if (tp1->data) {
7774 			/* sa_ignore NO_NULL_CHK */
7775 			sctp_free_bufspace(stcb, asoc, tp1, 1);
7776 			sctp_m_freem(tp1->data);
7777 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
7778 				asoc->sent_queue_cnt_removeable--;
7779 			}
7780 		}
7781 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7782 			sctp_log_sack(asoc->last_acked_seq,
7783 			    cum_ack,
7784 			    tp1->rec.data.TSN_seq,
7785 			    0,
7786 			    0,
7787 			    SCTP_LOG_FREE_SENT);
7788 		}
7789 		tp1->data = NULL;
7790 		asoc->sent_queue_cnt--;
7791 		sctp_free_a_chunk(stcb, tp1);
7792 		wake_him++;
7793 		tp1 = tp2;
7794 	} while (tp1 != NULL);
7795 
7796 done_with_it:
7797 	/* sa_ignore NO_NULL_CHK */
7798 	if ((wake_him) && (stcb->sctp_socket)) {
7799 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7800 		struct socket *so;
7801 
7802 #endif
7803 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
7804 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
7805 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
7806 		}
7807 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7808 		so = SCTP_INP_SO(stcb->sctp_ep);
7809 		atomic_add_int(&stcb->asoc.refcnt, 1);
7810 		SCTP_TCB_UNLOCK(stcb);
7811 		SCTP_SOCKET_LOCK(so, 1);
7812 		SCTP_TCB_LOCK(stcb);
7813 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
7814 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7815 			/* assoc was freed while we were unlocked */
7816 			SCTP_SOCKET_UNLOCK(so, 1);
7817 			return;
7818 		}
7819 #endif
7820 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
7821 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7822 		SCTP_SOCKET_UNLOCK(so, 1);
7823 #endif
7824 	} else {
7825 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
7826 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
7827 		}
7828 	}
7829 
7830 	if (asoc->fast_retran_loss_recovery && accum_moved) {
7831 		if (compare_with_wrap(asoc->last_acked_seq,
7832 		    asoc->fast_recovery_tsn, MAX_TSN) ||
7833 		    asoc->last_acked_seq == asoc->fast_recovery_tsn) {
7834 			/* Setup so we will exit RFC2582 fast recovery */
7835 			will_exit_fast_recovery = 1;
7836 		}
7837 	}
7838 	/*
7839 	 * Check for revoked fragments:
7840 	 *
7841 	 * if Previous sack - Had no frags then we can't have any revoked if
7842 	 * Previous sack - Had frag's then - If we now have frags aka
7843 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
7844 	 * some of them. else - The peer revoked all ACKED fragments, since
7845 	 * we had some before and now we have NONE.
7846 	 */
7847 
7848 	if (num_seg)
7849 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
7850 
7851 	else if (asoc->saw_sack_with_frags) {
7852 		int cnt_revoked = 0;
7853 
7854 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
7855 		if (tp1 != NULL) {
7856 			/* Peer revoked all dg's marked or acked */
7857 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
7858 				/*
7859 				 * EY- maybe check only if it is nr_acked
7860 				 * nr_marked may not be possible
7861 				 */
7862 				if ((tp1->sent == SCTP_DATAGRAM_NR_ACKED) ||
7863 				    (tp1->sent == SCTP_DATAGRAM_NR_MARKED)) {
7864 					/*
7865 					 * EY! - TODO: Something previously
7866 					 * nr_gapped is reneged, abort the
7867 					 * association
7868 					 */
7869 					return;
7870 				}
7871 				if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
7872 				    (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
7873 					tp1->sent = SCTP_DATAGRAM_SENT;
7874 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7875 						sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
7876 						    tp1->whoTo->flight_size,
7877 						    tp1->book_size,
7878 						    (uintptr_t) tp1->whoTo,
7879 						    tp1->rec.data.TSN_seq);
7880 					}
7881 					sctp_flight_size_increase(tp1);
7882 					sctp_total_flight_increase(stcb, tp1);
7883 					tp1->rec.data.chunk_was_revoked = 1;
7884 					/*
7885 					 * To ensure that this increase in
7886 					 * flightsize, which is artificial,
7887 					 * does not throttle the sender, we
7888 					 * also increase the cwnd
7889 					 * artificially.
7890 					 */
7891 					tp1->whoTo->cwnd += tp1->book_size;
7892 					cnt_revoked++;
7893 				}
7894 			}
7895 			if (cnt_revoked) {
7896 				reneged_all = 1;
7897 			}
7898 		}
7899 		asoc->saw_sack_with_frags = 0;
7900 	}
7901 	if (num_seg)
7902 		asoc->saw_sack_with_frags = 1;
7903 	else
7904 		asoc->saw_sack_with_frags = 0;
7905 
7906 	/* EY! - not sure about if there should be an IF */
7907 	if (num_nr_seg)
7908 		sctp_check_for_nr_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
7909 	else if (asoc->saw_sack_with_nr_frags) {
7910 		/*
7911 		 * EY!- TODO: all previously nr_gapped chunks have been
7912 		 * reneged abort the association
7913 		 */
7914 		asoc->saw_sack_with_nr_frags = 0;
7915 	}
7916 	if (num_nr_seg)
7917 		asoc->saw_sack_with_nr_frags = 1;
7918 	else
7919 		asoc->saw_sack_with_nr_frags = 0;
7920 	/* JRS - Use the congestion control given in the CC module */
7921 	asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
7922 
7923 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
7924 		/* nothing left in-flight */
7925 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7926 			/* stop all timers */
7927 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
7928 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
7929 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
7930 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
7931 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
7932 				}
7933 			}
7934 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7935 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
7936 			net->flight_size = 0;
7937 			net->partial_bytes_acked = 0;
7938 		}
7939 		asoc->total_flight = 0;
7940 		asoc->total_flight_count = 0;
7941 	}
7942 	/**********************************/
7943 	/* Now what about shutdown issues */
7944 	/**********************************/
7945 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
7946 		/* nothing left on sendqueue.. consider done */
7947 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7948 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
7949 			    asoc->peers_rwnd, 0, 0, a_rwnd);
7950 		}
7951 		asoc->peers_rwnd = a_rwnd;
7952 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7953 			/* SWS sender side engages */
7954 			asoc->peers_rwnd = 0;
7955 		}
7956 		/* clean up */
7957 		if ((asoc->stream_queue_cnt == 1) &&
7958 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7959 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
7960 		    (asoc->locked_on_sending)
7961 		    ) {
7962 			struct sctp_stream_queue_pending *sp;
7963 
7964 			/*
7965 			 * I may be in a state where we got all across.. but
7966 			 * cannot write more due to a shutdown... we abort
7967 			 * since the user did not indicate EOR in this case.
7968 			 */
7969 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
7970 			    sctp_streamhead);
7971 			if ((sp) && (sp->length == 0)) {
7972 				asoc->locked_on_sending = NULL;
7973 				if (sp->msg_is_complete) {
7974 					asoc->stream_queue_cnt--;
7975 				} else {
7976 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
7977 					asoc->stream_queue_cnt--;
7978 				}
7979 			}
7980 		}
7981 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
7982 		    (asoc->stream_queue_cnt == 0)) {
7983 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
7984 				/* Need to abort here */
7985 				struct mbuf *oper;
7986 
7987 		abort_out_now:
7988 				*abort_now = 1;
7989 				/* XXX */
7990 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
7991 				    0, M_DONTWAIT, 1, MT_DATA);
7992 				if (oper) {
7993 					struct sctp_paramhdr *ph;
7994 					uint32_t *ippp;
7995 
7996 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
7997 					    sizeof(uint32_t);
7998 					ph = mtod(oper, struct sctp_paramhdr *);
7999 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
8000 					ph->param_length = htons(SCTP_BUF_LEN(oper));
8001 					ippp = (uint32_t *) (ph + 1);
8002 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
8003 				}
8004 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
8005 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
8006 				return;
8007 			} else {
8008 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
8009 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
8010 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
8011 				}
8012 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
8013 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
8014 				sctp_stop_timers_for_shutdown(stcb);
8015 				sctp_send_shutdown(stcb,
8016 				    stcb->asoc.primary_destination);
8017 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
8018 				    stcb->sctp_ep, stcb, asoc->primary_destination);
8019 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
8020 				    stcb->sctp_ep, stcb, asoc->primary_destination);
8021 			}
8022 			return;
8023 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
8024 		    (asoc->stream_queue_cnt == 0)) {
8025 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
8026 				goto abort_out_now;
8027 			}
8028 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
8029 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
8030 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
8031 			sctp_send_shutdown_ack(stcb,
8032 			    stcb->asoc.primary_destination);
8033 
8034 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
8035 			    stcb->sctp_ep, stcb, asoc->primary_destination);
8036 			return;
8037 		}
8038 	}
8039 	/*
8040 	 * Now here we are going to recycle net_ack for a different use...
8041 	 * HEADS UP.
8042 	 */
8043 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8044 		net->net_ack = 0;
8045 	}
8046 
8047 	/*
8048 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
8049 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
8050 	 * automatically ensure that.
8051 	 */
8052 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
8053 		this_sack_lowest_newack = cum_ack;
8054 	}
8055 	if (num_seg > 0) {
8056 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
8057 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
8058 	}
8059 	/* JRS - Use the congestion control given in the CC module */
8060 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
8061 
8062 	/******************************************************************
8063 	 *  Here we do the stuff with ECN Nonce checking.
8064 	 *  We basically check to see if the nonce sum flag was incorrect
8065 	 *  or if resynchronization needs to be done. Also if we catch a
8066 	 *  misbehaving receiver we give him the kick.
8067 	 ******************************************************************/
8068 
8069 	if (asoc->ecn_nonce_allowed) {
8070 		if (asoc->nonce_sum_check) {
8071 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
8072 				if (asoc->nonce_wait_for_ecne == 0) {
8073 					struct sctp_tmit_chunk *lchk;
8074 
8075 					lchk = TAILQ_FIRST(&asoc->send_queue);
8076 					asoc->nonce_wait_for_ecne = 1;
8077 					if (lchk) {
8078 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
8079 					} else {
8080 						asoc->nonce_wait_tsn = asoc->sending_seq;
8081 					}
8082 				} else {
8083 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
8084 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
8085 						/*
8086 						 * Misbehaving peer. We need
8087 						 * to react to this guy
8088 						 */
8089 						asoc->ecn_allowed = 0;
8090 						asoc->ecn_nonce_allowed = 0;
8091 					}
8092 				}
8093 			}
8094 		} else {
8095 			/* See if Resynchronization Possible */
8096 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
8097 				asoc->nonce_sum_check = 1;
8098 				/*
8099 				 * now we must calculate what the base is.
8100 				 * We do this based on two things, we know
8101 				 * the total's for all the segments
8102 				 * gap-acked in the SACK, its stored in
8103 				 * ecn_seg_sums. We also know the SACK's
8104 				 * nonce sum, its in nonce_sum_flag. So we
8105 				 * can build a truth table to back-calculate
8106 				 * the new value of
8107 				 * asoc->nonce_sum_expect_base:
8108 				 *
8109 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
8110 				 * 1                    0 1 0 1 1 1 1 0
8111 				 */
8112 				asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
8113 			}
8114 		}
8115 	}
8116 	/* Now are we exiting loss recovery ? */
8117 	if (will_exit_fast_recovery) {
8118 		/* Ok, we must exit fast recovery */
8119 		asoc->fast_retran_loss_recovery = 0;
8120 	}
8121 	if ((asoc->sat_t3_loss_recovery) &&
8122 	    ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
8123 	    MAX_TSN) ||
8124 	    (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
8125 		/* end satellite t3 loss recovery */
8126 		asoc->sat_t3_loss_recovery = 0;
8127 	}
8128 	/*
8129 	 * CMT Fast recovery
8130 	 */
8131 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8132 		if (net->will_exit_fast_recovery) {
8133 			/* Ok, we must exit fast recovery */
8134 			net->fast_retran_loss_recovery = 0;
8135 		}
8136 	}
8137 
8138 	/* Adjust and set the new rwnd value */
8139 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
8140 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
8141 		    asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
8142 	}
8143 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
8144 	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
8145 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
8146 		/* SWS sender side engages */
8147 		asoc->peers_rwnd = 0;
8148 	}
8149 	if (asoc->peers_rwnd > old_rwnd) {
8150 		win_probe_recovery = 1;
8151 	}
8152 	/*
8153 	 * Now we must setup so we have a timer up for anyone with
8154 	 * outstanding data.
8155 	 */
8156 	done_once = 0;
8157 again:
8158 	j = 0;
8159 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8160 		if (win_probe_recovery && (net->window_probe)) {
8161 			win_probe_recovered = 1;
8162 			/*-
8163 			 * Find first chunk that was used with
8164 			 * window probe and clear the event. Put
8165 			 * it back into the send queue as if has
8166 			 * not been sent.
8167 			 */
8168 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
8169 				if (tp1->window_probe) {
8170 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
8171 					break;
8172 				}
8173 			}
8174 		}
8175 		if (net->flight_size) {
8176 			j++;
8177 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
8178 			    stcb->sctp_ep, stcb, net);
8179 			if (net->window_probe) {
8180 				net->window_probe = 0;
8181 			}
8182 		} else {
8183 			if (net->window_probe) {
8184 				net->window_probe = 0;
8185 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
8186 				    stcb->sctp_ep, stcb, net);
8187 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8188 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
8189 				    stcb, net,
8190 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
8191 			}
8192 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
8193 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
8194 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
8195 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
8196 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
8197 				}
8198 			}
8199 		}
8200 	}
8201 	if ((j == 0) &&
8202 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
8203 	    (asoc->sent_queue_retran_cnt == 0) &&
8204 	    (win_probe_recovered == 0) &&
8205 	    (done_once == 0)) {
8206 		/*
8207 		 * huh, this should not happen unless all packets are
8208 		 * PR-SCTP and marked to skip of course.
8209 		 */
8210 		if (sctp_fs_audit(asoc)) {
8211 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8212 				net->flight_size = 0;
8213 			}
8214 			asoc->total_flight = 0;
8215 			asoc->total_flight_count = 0;
8216 			asoc->sent_queue_retran_cnt = 0;
8217 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
8218 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
8219 					sctp_flight_size_increase(tp1);
8220 					sctp_total_flight_increase(stcb, tp1);
8221 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
8222 					asoc->sent_queue_retran_cnt++;
8223 				}
8224 			}
8225 		}
8226 		done_once = 1;
8227 		goto again;
8228 	}
8229 	/*********************************************/
8230 	/* Here we perform PR-SCTP procedures        */
8231 	/* (section 4.2)                             */
8232 	/*********************************************/
8233 	/* C1. update advancedPeerAckPoint */
8234 	if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
8235 		asoc->advanced_peer_ack_point = cum_ack;
8236 	}
8237 	/* C2. try to further move advancedPeerAckPoint ahead */
8238 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
8239 		struct sctp_tmit_chunk *lchk;
8240 		uint32_t old_adv_peer_ack_point;
8241 
8242 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
8243 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
8244 		/* C3. See if we need to send a Fwd-TSN */
8245 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
8246 		    MAX_TSN)) {
8247 			/*
8248 			 * ISSUE with ECN, see FWD-TSN processing for notes
8249 			 * on issues that will occur when the ECN NONCE
8250 			 * stuff is put into SCTP for cross checking.
8251 			 */
8252 			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
8253 			    MAX_TSN)) {
8254 				send_forward_tsn(stcb, asoc);
8255 				/*
8256 				 * ECN Nonce: Disable Nonce Sum check when
8257 				 * FWD TSN is sent and store resync tsn
8258 				 */
8259 				asoc->nonce_sum_check = 0;
8260 				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
8261 			} else if (lchk) {
8262 				/* try to FR fwd-tsn's that get lost too */
8263 				lchk->rec.data.fwd_tsn_cnt++;
8264 				if (lchk->rec.data.fwd_tsn_cnt > 3) {
8265 					send_forward_tsn(stcb, asoc);
8266 					lchk->rec.data.fwd_tsn_cnt = 0;
8267 				}
8268 			}
8269 		}
8270 		if (lchk) {
8271 			/* Assure a timer is up */
8272 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
8273 			    stcb->sctp_ep, stcb, lchk->whoTo);
8274 		}
8275 	}
8276 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
8277 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
8278 		    a_rwnd,
8279 		    stcb->asoc.peers_rwnd,
8280 		    stcb->asoc.total_flight,
8281 		    stcb->asoc.total_output_queue_size);
8282 	}
8283 }
8284