xref: /freebsd/sys/netinet/sctp_indata.c (revision eb6d21b4ca6d668cf89afd99eef7baeafa712197)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 
48 #define SCTP_CALC_TSN_TO_GAP(gap, tsn, mapping_tsn) do { \
49 					if ((compare_with_wrap(tsn, mapping_tsn, MAX_TSN)) || \
50                         (tsn == mapping_tsn)) { \
51 						gap = tsn - mapping_tsn; \
52 					} else { \
53 						gap = (MAX_TSN - mapping_tsn) + tsn + 1; \
54 					} \
55                   } while(0)
56 
57 #define SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc) do { \
58                     if (asoc->mapping_array_base_tsn == asoc->nr_mapping_array_base_tsn) { \
59                        SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, nr_gap); \
60                     } else {\
61                        int lgap; \
62                        SCTP_CALC_TSN_TO_GAP(lgap, tsn, asoc->mapping_array_base_tsn); \
63                        SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, lgap); \
64                     } \
65                   } while(0)
66 
67 /*
68  * NOTES: On the outbound side of things I need to check the sack timer to
69  * see if I should generate a sack into the chunk queue (if I have data to
70  * send that is and will be sending it .. for bundling.
71  *
72  * The callback in sctp_usrreq.c will get called when the socket is read from.
73  * This will cause sctp_service_queues() to get called on the top entry in
74  * the list.
75  */
76 
77 void
78 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
79 {
80 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
81 }
82 
83 /* Calculate what the rwnd would be */
84 uint32_t
85 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
86 {
87 	uint32_t calc = 0;
88 
89 	/*
90 	 * This is really set wrong with respect to a 1-2-m socket. Since
91 	 * the sb_cc is the count that everyone as put up. When we re-write
92 	 * sctp_soreceive then we will fix this so that ONLY this
93 	 * associations data is taken into account.
94 	 */
95 	if (stcb->sctp_socket == NULL)
96 		return (calc);
97 
98 	if (stcb->asoc.sb_cc == 0 &&
99 	    asoc->size_on_reasm_queue == 0 &&
100 	    asoc->size_on_all_streams == 0) {
101 		/* Full rwnd granted */
102 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
103 		return (calc);
104 	}
105 	/* get actual space */
106 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
107 
108 	/*
109 	 * take out what has NOT been put on socket queue and we yet hold
110 	 * for putting up.
111 	 */
112 	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
113 	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
114 
115 	if (calc == 0) {
116 		/* out of space */
117 		return (calc);
118 	}
119 	/* what is the overhead of all these rwnd's */
120 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
121 	/*
122 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
123 	 * even it is 0. SWS engaged
124 	 */
125 	if (calc < stcb->asoc.my_rwnd_control_len) {
126 		calc = 1;
127 	}
128 	return (calc);
129 }
130 
131 
132 
133 /*
134  * Build out our readq entry based on the incoming packet.
135  */
136 struct sctp_queued_to_read *
137 sctp_build_readq_entry(struct sctp_tcb *stcb,
138     struct sctp_nets *net,
139     uint32_t tsn, uint32_t ppid,
140     uint32_t context, uint16_t stream_no,
141     uint16_t stream_seq, uint8_t flags,
142     struct mbuf *dm)
143 {
144 	struct sctp_queued_to_read *read_queue_e = NULL;
145 
146 	sctp_alloc_a_readq(stcb, read_queue_e);
147 	if (read_queue_e == NULL) {
148 		goto failed_build;
149 	}
150 	read_queue_e->sinfo_stream = stream_no;
151 	read_queue_e->sinfo_ssn = stream_seq;
152 	read_queue_e->sinfo_flags = (flags << 8);
153 	read_queue_e->sinfo_ppid = ppid;
154 	read_queue_e->sinfo_context = stcb->asoc.context;
155 	read_queue_e->sinfo_timetolive = 0;
156 	read_queue_e->sinfo_tsn = tsn;
157 	read_queue_e->sinfo_cumtsn = tsn;
158 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
159 	read_queue_e->whoFrom = net;
160 	read_queue_e->length = 0;
161 	atomic_add_int(&net->ref_count, 1);
162 	read_queue_e->data = dm;
163 	read_queue_e->spec_flags = 0;
164 	read_queue_e->tail_mbuf = NULL;
165 	read_queue_e->aux_data = NULL;
166 	read_queue_e->stcb = stcb;
167 	read_queue_e->port_from = stcb->rport;
168 	read_queue_e->do_not_ref_stcb = 0;
169 	read_queue_e->end_added = 0;
170 	read_queue_e->some_taken = 0;
171 	read_queue_e->pdapi_aborted = 0;
172 failed_build:
173 	return (read_queue_e);
174 }
175 
176 
177 /*
178  * Build out our readq entry based on the incoming packet.
179  */
180 static struct sctp_queued_to_read *
181 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
182     struct sctp_tmit_chunk *chk)
183 {
184 	struct sctp_queued_to_read *read_queue_e = NULL;
185 
186 	sctp_alloc_a_readq(stcb, read_queue_e);
187 	if (read_queue_e == NULL) {
188 		goto failed_build;
189 	}
190 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
191 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
192 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
193 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
194 	read_queue_e->sinfo_context = stcb->asoc.context;
195 	read_queue_e->sinfo_timetolive = 0;
196 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
197 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
198 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
199 	read_queue_e->whoFrom = chk->whoTo;
200 	read_queue_e->aux_data = NULL;
201 	read_queue_e->length = 0;
202 	atomic_add_int(&chk->whoTo->ref_count, 1);
203 	read_queue_e->data = chk->data;
204 	read_queue_e->tail_mbuf = NULL;
205 	read_queue_e->stcb = stcb;
206 	read_queue_e->port_from = stcb->rport;
207 	read_queue_e->spec_flags = 0;
208 	read_queue_e->do_not_ref_stcb = 0;
209 	read_queue_e->end_added = 0;
210 	read_queue_e->some_taken = 0;
211 	read_queue_e->pdapi_aborted = 0;
212 failed_build:
213 	return (read_queue_e);
214 }
215 
216 
217 struct mbuf *
218 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
219     struct sctp_sndrcvinfo *sinfo)
220 {
221 	struct sctp_sndrcvinfo *outinfo;
222 	struct cmsghdr *cmh;
223 	struct mbuf *ret;
224 	int len;
225 	int use_extended = 0;
226 
227 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
228 		/* user does not want the sndrcv ctl */
229 		return (NULL);
230 	}
231 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
232 		use_extended = 1;
233 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
234 	} else {
235 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
236 	}
237 
238 
239 	ret = sctp_get_mbuf_for_msg(len,
240 	    0, M_DONTWAIT, 1, MT_DATA);
241 
242 	if (ret == NULL) {
243 		/* No space */
244 		return (ret);
245 	}
246 	/* We need a CMSG header followed by the struct  */
247 	cmh = mtod(ret, struct cmsghdr *);
248 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
249 	cmh->cmsg_level = IPPROTO_SCTP;
250 	if (use_extended) {
251 		cmh->cmsg_type = SCTP_EXTRCV;
252 		cmh->cmsg_len = len;
253 		memcpy(outinfo, sinfo, len);
254 	} else {
255 		cmh->cmsg_type = SCTP_SNDRCV;
256 		cmh->cmsg_len = len;
257 		*outinfo = *sinfo;
258 	}
259 	SCTP_BUF_LEN(ret) = cmh->cmsg_len;
260 	return (ret);
261 }
262 
263 
264 char *
265 sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
266     int *control_len,
267     struct sctp_sndrcvinfo *sinfo)
268 {
269 	struct sctp_sndrcvinfo *outinfo;
270 	struct cmsghdr *cmh;
271 	char *buf;
272 	int len;
273 	int use_extended = 0;
274 
275 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
276 		/* user does not want the sndrcv ctl */
277 		return (NULL);
278 	}
279 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
280 		use_extended = 1;
281 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
282 	} else {
283 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
284 	}
285 	SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
286 	if (buf == NULL) {
287 		/* No space */
288 		return (buf);
289 	}
290 	/* We need a CMSG header followed by the struct  */
291 	cmh = (struct cmsghdr *)buf;
292 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
293 	cmh->cmsg_level = IPPROTO_SCTP;
294 	if (use_extended) {
295 		cmh->cmsg_type = SCTP_EXTRCV;
296 		cmh->cmsg_len = len;
297 		memcpy(outinfo, sinfo, len);
298 	} else {
299 		cmh->cmsg_type = SCTP_SNDRCV;
300 		cmh->cmsg_len = len;
301 		*outinfo = *sinfo;
302 	}
303 	*control_len = len;
304 	return (buf);
305 }
306 
307 
308 /*
309  * We are delivering currently from the reassembly queue. We must continue to
310  * deliver until we either: 1) run out of space. 2) run out of sequential
311  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
312  */
313 static void
314 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
315 {
316 	struct sctp_tmit_chunk *chk;
317 	uint16_t nxt_todel;
318 	uint16_t stream_no;
319 	int end = 0;
320 	int cntDel;
321 
322 	/* EY if any out-of-order delivered, then tag it nr on nr_map */
323 	uint32_t nr_tsn, nr_gap;
324 
325 	struct sctp_queued_to_read *control, *ctl, *ctlat;
326 
327 	if (stcb == NULL)
328 		return;
329 
330 	cntDel = stream_no = 0;
331 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
332 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
333 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
334 		/* socket above is long gone or going.. */
335 abandon:
336 		asoc->fragmented_delivery_inprogress = 0;
337 		chk = TAILQ_FIRST(&asoc->reasmqueue);
338 		while (chk) {
339 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
340 			asoc->size_on_reasm_queue -= chk->send_size;
341 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
342 			/*
343 			 * Lose the data pointer, since its in the socket
344 			 * buffer
345 			 */
346 			if (chk->data) {
347 				sctp_m_freem(chk->data);
348 				chk->data = NULL;
349 			}
350 			/* Now free the address and data */
351 			sctp_free_a_chunk(stcb, chk);
352 			/* sa_ignore FREED_MEMORY */
353 			chk = TAILQ_FIRST(&asoc->reasmqueue);
354 		}
355 		return;
356 	}
357 	SCTP_TCB_LOCK_ASSERT(stcb);
358 	do {
359 		chk = TAILQ_FIRST(&asoc->reasmqueue);
360 		if (chk == NULL) {
361 			return;
362 		}
363 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
364 			/* Can't deliver more :< */
365 			return;
366 		}
367 		stream_no = chk->rec.data.stream_number;
368 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
369 		if (nxt_todel != chk->rec.data.stream_seq &&
370 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
371 			/*
372 			 * Not the next sequence to deliver in its stream OR
373 			 * unordered
374 			 */
375 			return;
376 		}
377 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
378 
379 			control = sctp_build_readq_entry_chk(stcb, chk);
380 			if (control == NULL) {
381 				/* out of memory? */
382 				return;
383 			}
384 			/* save it off for our future deliveries */
385 			stcb->asoc.control_pdapi = control;
386 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
387 				end = 1;
388 			else
389 				end = 0;
390 			sctp_add_to_readq(stcb->sctp_ep,
391 			    stcb, control, &stcb->sctp_socket->so_rcv, end,
392 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
393 			cntDel++;
394 		} else {
395 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
396 				end = 1;
397 			else
398 				end = 0;
399 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
400 			    stcb->asoc.control_pdapi,
401 			    chk->data, end, chk->rec.data.TSN_seq,
402 			    &stcb->sctp_socket->so_rcv)) {
403 				/*
404 				 * something is very wrong, either
405 				 * control_pdapi is NULL, or the tail_mbuf
406 				 * is corrupt, or there is a EOM already on
407 				 * the mbuf chain.
408 				 */
409 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
410 					goto abandon;
411 				} else {
412 #ifdef INVARIANTS
413 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
414 						panic("This should not happen control_pdapi NULL?");
415 					}
416 					/* if we did not panic, it was a EOM */
417 					panic("Bad chunking ??");
418 #else
419 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
420 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
421 					}
422 					SCTP_PRINTF("Bad chunking ??\n");
423 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
424 
425 #endif
426 					goto abandon;
427 				}
428 			}
429 			cntDel++;
430 		}
431 		/* pull it we did it */
432 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
433 		/*
434 		 * EY this is the chunk that should be tagged nr gapped
435 		 * calculate the gap and such then tag this TSN nr
436 		 * chk->rec.data.TSN_seq
437 		 */
438 		/*
439 		 * EY!-TODO- this tsn should be tagged nr only if it is
440 		 * out-of-order, the if statement should be modified
441 		 */
442 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
443 
444 			nr_tsn = chk->rec.data.TSN_seq;
445 			SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
446 			if ((nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3)) ||
447 			    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
448 				/*
449 				 * EY The 1st should never happen, as in
450 				 * process_a_data_chunk method this check
451 				 * should be done
452 				 */
453 				/*
454 				 * EY The 2nd should never happen, because
455 				 * nr_mapping_array is always expanded when
456 				 * mapping_array is expanded
457 				 */
458 				printf("Impossible nr_gap ack range failed\n");
459 			} else {
460 				SCTP_TCB_LOCK_ASSERT(stcb);
461 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
462 				SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
463 				if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
464 					asoc->highest_tsn_inside_nr_map = nr_tsn;
465 			}
466 		}
467 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
468 			asoc->fragmented_delivery_inprogress = 0;
469 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
470 				asoc->strmin[stream_no].last_sequence_delivered++;
471 			}
472 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
473 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
474 			}
475 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
476 			/*
477 			 * turn the flag back on since we just  delivered
478 			 * yet another one.
479 			 */
480 			asoc->fragmented_delivery_inprogress = 1;
481 		}
482 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
483 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
484 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
485 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
486 
487 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
488 		asoc->size_on_reasm_queue -= chk->send_size;
489 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
490 		/* free up the chk */
491 		chk->data = NULL;
492 		sctp_free_a_chunk(stcb, chk);
493 
494 		if (asoc->fragmented_delivery_inprogress == 0) {
495 			/*
496 			 * Now lets see if we can deliver the next one on
497 			 * the stream
498 			 */
499 			struct sctp_stream_in *strm;
500 
501 			strm = &asoc->strmin[stream_no];
502 			nxt_todel = strm->last_sequence_delivered + 1;
503 			ctl = TAILQ_FIRST(&strm->inqueue);
504 			if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
505 				while (ctl != NULL) {
506 					/* Deliver more if we can. */
507 					if (nxt_todel == ctl->sinfo_ssn) {
508 						ctlat = TAILQ_NEXT(ctl, next);
509 						TAILQ_REMOVE(&strm->inqueue, ctl, next);
510 						asoc->size_on_all_streams -= ctl->length;
511 						sctp_ucount_decr(asoc->cnt_on_all_streams);
512 						strm->last_sequence_delivered++;
513 						/*
514 						 * EY will be used to
515 						 * calculate nr-gap
516 						 */
517 						nr_tsn = ctl->sinfo_tsn;
518 						sctp_add_to_readq(stcb->sctp_ep, stcb,
519 						    ctl,
520 						    &stcb->sctp_socket->so_rcv, 1,
521 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
522 						/*
523 						 * EY -now something is
524 						 * delivered, calculate
525 						 * nr_gap and tag this tsn
526 						 * NR
527 						 */
528 						if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
529 							SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
530 							if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
531 							    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
532 								printf("Impossible NR gap calculation?\n");
533 								/*
534 								 * EY The
535 								 * 1st
536 								 * should
537 								 * never
538 								 * happen,
539 								 * as in
540 								 * process_a_
541 								 * data_chunk
542 								 *  method
543 								 * this
544 								 * check
545 								 * should be
546 								 * done
547 								 */
548 								/*
549 								 * EY The
550 								 * 2nd
551 								 * should
552 								 * never
553 								 * happen,
554 								 * because
555 								 * nr_mapping
556 								 * _array is
557 								 * always
558 								 * expanded
559 								 * when
560 								 * mapping_ar
561 								 * ray is
562 								 * expanded
563 								 */
564 							} else {
565 								SCTP_TCB_LOCK_ASSERT(stcb);
566 								SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
567 								SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
568 								if (compare_with_wrap(nr_tsn,
569 								    asoc->highest_tsn_inside_nr_map,
570 								    MAX_TSN))
571 									asoc->highest_tsn_inside_nr_map = nr_tsn;
572 							}
573 						}
574 						ctl = ctlat;
575 					} else {
576 						break;
577 					}
578 					nxt_todel = strm->last_sequence_delivered + 1;
579 				}
580 			}
581 			break;
582 		}
583 		/* sa_ignore FREED_MEMORY */
584 		chk = TAILQ_FIRST(&asoc->reasmqueue);
585 	} while (chk);
586 }
587 
588 /*
589  * Queue the chunk either right into the socket buffer if it is the next one
590  * to go OR put it in the correct place in the delivery queue.  If we do
591  * append to the so_buf, keep doing so until we are out of order. One big
592  * question still remains, what to do when the socket buffer is FULL??
593  */
594 static void
595 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
596     struct sctp_queued_to_read *control, int *abort_flag)
597 {
598 	/*
599 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
600 	 * all the data in one stream this could happen quite rapidly. One
601 	 * could use the TSN to keep track of things, but this scheme breaks
602 	 * down in the other type of stream useage that could occur. Send a
603 	 * single msg to stream 0, send 4Billion messages to stream 1, now
604 	 * send a message to stream 0. You have a situation where the TSN
605 	 * has wrapped but not in the stream. Is this worth worrying about
606 	 * or should we just change our queue sort at the bottom to be by
607 	 * TSN.
608 	 *
609 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
610 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
611 	 * assignment this could happen... and I don't see how this would be
612 	 * a violation. So for now I am undecided an will leave the sort by
613 	 * SSN alone. Maybe a hybred approach is the answer
614 	 *
615 	 */
616 	struct sctp_stream_in *strm;
617 	struct sctp_queued_to_read *at;
618 	int queue_needed;
619 	uint16_t nxt_todel;
620 	struct mbuf *oper;
621 
622 	/* EY- will be used to calculate nr-gap for a tsn */
623 	uint32_t nr_tsn, nr_gap;
624 
625 	queue_needed = 1;
626 	asoc->size_on_all_streams += control->length;
627 	sctp_ucount_incr(asoc->cnt_on_all_streams);
628 	strm = &asoc->strmin[control->sinfo_stream];
629 	nxt_todel = strm->last_sequence_delivered + 1;
630 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
631 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
632 	}
633 	SCTPDBG(SCTP_DEBUG_INDATA1,
634 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
635 	    (uint32_t) control->sinfo_stream,
636 	    (uint32_t) strm->last_sequence_delivered,
637 	    (uint32_t) nxt_todel);
638 	if (compare_with_wrap(strm->last_sequence_delivered,
639 	    control->sinfo_ssn, MAX_SEQ) ||
640 	    (strm->last_sequence_delivered == control->sinfo_ssn)) {
641 		/* The incoming sseq is behind where we last delivered? */
642 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
643 		    control->sinfo_ssn, strm->last_sequence_delivered);
644 protocol_error:
645 		/*
646 		 * throw it in the stream so it gets cleaned up in
647 		 * association destruction
648 		 */
649 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
650 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
651 		    0, M_DONTWAIT, 1, MT_DATA);
652 		if (oper) {
653 			struct sctp_paramhdr *ph;
654 			uint32_t *ippp;
655 
656 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
657 			    (sizeof(uint32_t) * 3);
658 			ph = mtod(oper, struct sctp_paramhdr *);
659 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
660 			ph->param_length = htons(SCTP_BUF_LEN(oper));
661 			ippp = (uint32_t *) (ph + 1);
662 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
663 			ippp++;
664 			*ippp = control->sinfo_tsn;
665 			ippp++;
666 			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
667 		}
668 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
669 		sctp_abort_an_association(stcb->sctp_ep, stcb,
670 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
671 
672 		*abort_flag = 1;
673 		return;
674 
675 	}
676 	if (nxt_todel == control->sinfo_ssn) {
677 		/* can be delivered right away? */
678 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
679 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
680 		}
681 		/* EY it wont be queued if it could be delivered directly */
682 		queue_needed = 0;
683 		asoc->size_on_all_streams -= control->length;
684 		sctp_ucount_decr(asoc->cnt_on_all_streams);
685 		strm->last_sequence_delivered++;
686 		/* EY will be used to calculate nr-gap */
687 		nr_tsn = control->sinfo_tsn;
688 		sctp_add_to_readq(stcb->sctp_ep, stcb,
689 		    control,
690 		    &stcb->sctp_socket->so_rcv, 1,
691 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
692 		/*
693 		 * EY this is the chunk that should be tagged nr gapped
694 		 * calculate the gap and such then tag this TSN nr
695 		 * chk->rec.data.TSN_seq
696 		 */
697 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
698 			SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
699 			if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
700 			    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
701 				printf("Impossible nr_tsn set 2?\n");
702 				/*
703 				 * EY The 1st should never happen, as in
704 				 * process_a_data_chunk method this check
705 				 * should be done
706 				 */
707 				/*
708 				 * EY The 2nd should never happen, because
709 				 * nr_mapping_array is always expanded when
710 				 * mapping_array is expanded
711 				 */
712 			} else {
713 				SCTP_TCB_LOCK_ASSERT(stcb);
714 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
715 				SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
716 				if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
717 					asoc->highest_tsn_inside_nr_map = nr_tsn;
718 			}
719 		}
720 		control = TAILQ_FIRST(&strm->inqueue);
721 		while (control != NULL) {
722 			/* all delivered */
723 			nxt_todel = strm->last_sequence_delivered + 1;
724 			if (nxt_todel == control->sinfo_ssn) {
725 				at = TAILQ_NEXT(control, next);
726 				TAILQ_REMOVE(&strm->inqueue, control, next);
727 				asoc->size_on_all_streams -= control->length;
728 				sctp_ucount_decr(asoc->cnt_on_all_streams);
729 				strm->last_sequence_delivered++;
730 				/*
731 				 * We ignore the return of deliver_data here
732 				 * since we always can hold the chunk on the
733 				 * d-queue. And we have a finite number that
734 				 * can be delivered from the strq.
735 				 */
736 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
737 					sctp_log_strm_del(control, NULL,
738 					    SCTP_STR_LOG_FROM_IMMED_DEL);
739 				}
740 				/* EY will be used to calculate nr-gap */
741 				nr_tsn = control->sinfo_tsn;
742 				sctp_add_to_readq(stcb->sctp_ep, stcb,
743 				    control,
744 				    &stcb->sctp_socket->so_rcv, 1,
745 				    SCTP_READ_LOCK_NOT_HELD,
746 				    SCTP_SO_NOT_LOCKED);
747 				/*
748 				 * EY this is the chunk that should be
749 				 * tagged nr gapped calculate the gap and
750 				 * such then tag this TSN nr
751 				 * chk->rec.data.TSN_seq
752 				 */
753 				if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
754 					SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
755 					if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
756 					    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
757 						printf("Impossible nr TSN set 3?\n");
758 						/*
759 						 * EY The 1st should never
760 						 * happen, as in
761 						 * process_a_data_chunk
762 						 * method this check should
763 						 * be done
764 						 */
765 						/*
766 						 * EY The 2nd should never
767 						 * happen, because
768 						 * nr_mapping_array is
769 						 * always expanded when
770 						 * mapping_array is expanded
771 						 */
772 					} else {
773 						SCTP_TCB_LOCK_ASSERT(stcb);
774 						SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
775 						SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
776 						if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map,
777 						    MAX_TSN))
778 							asoc->highest_tsn_inside_nr_map = nr_tsn;
779 					}
780 				}
781 				control = at;
782 				continue;
783 			}
784 			break;
785 		}
786 	}
787 	if (queue_needed) {
788 		/*
789 		 * Ok, we did not deliver this guy, find the correct place
790 		 * to put it on the queue.
791 		 */
792 		if ((compare_with_wrap(asoc->cumulative_tsn,
793 		    control->sinfo_tsn, MAX_TSN)) ||
794 		    (control->sinfo_tsn == asoc->cumulative_tsn)) {
795 			goto protocol_error;
796 		}
797 		if (TAILQ_EMPTY(&strm->inqueue)) {
798 			/* Empty queue */
799 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
800 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
801 			}
802 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
803 		} else {
804 			TAILQ_FOREACH(at, &strm->inqueue, next) {
805 				if (compare_with_wrap(at->sinfo_ssn,
806 				    control->sinfo_ssn, MAX_SEQ)) {
807 					/*
808 					 * one in queue is bigger than the
809 					 * new one, insert before this one
810 					 */
811 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
812 						sctp_log_strm_del(control, at,
813 						    SCTP_STR_LOG_FROM_INSERT_MD);
814 					}
815 					TAILQ_INSERT_BEFORE(at, control, next);
816 					break;
817 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
818 					/*
819 					 * Gak, He sent me a duplicate str
820 					 * seq number
821 					 */
822 					/*
823 					 * foo bar, I guess I will just free
824 					 * this new guy, should we abort
825 					 * too? FIX ME MAYBE? Or it COULD be
826 					 * that the SSN's have wrapped.
827 					 * Maybe I should compare to TSN
828 					 * somehow... sigh for now just blow
829 					 * away the chunk!
830 					 */
831 
832 					if (control->data)
833 						sctp_m_freem(control->data);
834 					control->data = NULL;
835 					asoc->size_on_all_streams -= control->length;
836 					sctp_ucount_decr(asoc->cnt_on_all_streams);
837 					if (control->whoFrom)
838 						sctp_free_remote_addr(control->whoFrom);
839 					control->whoFrom = NULL;
840 					sctp_free_a_readq(stcb, control);
841 					return;
842 				} else {
843 					if (TAILQ_NEXT(at, next) == NULL) {
844 						/*
845 						 * We are at the end, insert
846 						 * it after this one
847 						 */
848 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
849 							sctp_log_strm_del(control, at,
850 							    SCTP_STR_LOG_FROM_INSERT_TL);
851 						}
852 						TAILQ_INSERT_AFTER(&strm->inqueue,
853 						    at, control, next);
854 						break;
855 					}
856 				}
857 			}
858 		}
859 	}
860 }
861 
862 /*
863  * Returns two things: You get the total size of the deliverable parts of the
864  * first fragmented message on the reassembly queue. And you get a 1 back if
865  * all of the message is ready or a 0 back if the message is still incomplete
866  */
867 static int
868 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
869 {
870 	struct sctp_tmit_chunk *chk;
871 	uint32_t tsn;
872 
873 	*t_size = 0;
874 	chk = TAILQ_FIRST(&asoc->reasmqueue);
875 	if (chk == NULL) {
876 		/* nothing on the queue */
877 		return (0);
878 	}
879 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
880 		/* Not a first on the queue */
881 		return (0);
882 	}
883 	tsn = chk->rec.data.TSN_seq;
884 	while (chk) {
885 		if (tsn != chk->rec.data.TSN_seq) {
886 			return (0);
887 		}
888 		*t_size += chk->send_size;
889 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
890 			return (1);
891 		}
892 		tsn++;
893 		chk = TAILQ_NEXT(chk, sctp_next);
894 	}
895 	return (0);
896 }
897 
898 static void
899 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
900 {
901 	struct sctp_tmit_chunk *chk;
902 	uint16_t nxt_todel;
903 	uint32_t tsize, pd_point;
904 
905 doit_again:
906 	chk = TAILQ_FIRST(&asoc->reasmqueue);
907 	if (chk == NULL) {
908 		/* Huh? */
909 		asoc->size_on_reasm_queue = 0;
910 		asoc->cnt_on_reasm_queue = 0;
911 		return;
912 	}
913 	if (asoc->fragmented_delivery_inprogress == 0) {
914 		nxt_todel =
915 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
916 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
917 		    (nxt_todel == chk->rec.data.stream_seq ||
918 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
919 			/*
920 			 * Yep the first one is here and its ok to deliver
921 			 * but should we?
922 			 */
923 			if (stcb->sctp_socket) {
924 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
925 				    stcb->sctp_ep->partial_delivery_point);
926 			} else {
927 				pd_point = stcb->sctp_ep->partial_delivery_point;
928 			}
929 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
930 
931 				/*
932 				 * Yes, we setup to start reception, by
933 				 * backing down the TSN just in case we
934 				 * can't deliver. If we
935 				 */
936 				asoc->fragmented_delivery_inprogress = 1;
937 				asoc->tsn_last_delivered =
938 				    chk->rec.data.TSN_seq - 1;
939 				asoc->str_of_pdapi =
940 				    chk->rec.data.stream_number;
941 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
942 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
943 				asoc->fragment_flags = chk->rec.data.rcv_flags;
944 				sctp_service_reassembly(stcb, asoc);
945 			}
946 		}
947 	} else {
948 		/*
949 		 * Service re-assembly will deliver stream data queued at
950 		 * the end of fragmented delivery.. but it wont know to go
951 		 * back and call itself again... we do that here with the
952 		 * got doit_again
953 		 */
954 		sctp_service_reassembly(stcb, asoc);
955 		if (asoc->fragmented_delivery_inprogress == 0) {
956 			/*
957 			 * finished our Fragmented delivery, could be more
958 			 * waiting?
959 			 */
960 			goto doit_again;
961 		}
962 	}
963 }
964 
965 /*
966  * Dump onto the re-assembly queue, in its proper place. After dumping on the
967  * queue, see if anthing can be delivered. If so pull it off (or as much as
968  * we can. If we run out of space then we must dump what we can and set the
969  * appropriate flag to say we queued what we could.
970  */
971 static void
972 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
973     struct sctp_tmit_chunk *chk, int *abort_flag)
974 {
975 	struct mbuf *oper;
976 	uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
977 	u_char last_flags;
978 	struct sctp_tmit_chunk *at, *prev, *next;
979 
980 	prev = next = NULL;
981 	cum_ackp1 = asoc->tsn_last_delivered + 1;
982 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
983 		/* This is the first one on the queue */
984 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
985 		/*
986 		 * we do not check for delivery of anything when only one
987 		 * fragment is here
988 		 */
989 		asoc->size_on_reasm_queue = chk->send_size;
990 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
991 		if (chk->rec.data.TSN_seq == cum_ackp1) {
992 			if (asoc->fragmented_delivery_inprogress == 0 &&
993 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
994 			    SCTP_DATA_FIRST_FRAG) {
995 				/*
996 				 * An empty queue, no delivery inprogress,
997 				 * we hit the next one and it does NOT have
998 				 * a FIRST fragment mark.
999 				 */
1000 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
1001 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1002 				    0, M_DONTWAIT, 1, MT_DATA);
1003 
1004 				if (oper) {
1005 					struct sctp_paramhdr *ph;
1006 					uint32_t *ippp;
1007 
1008 					SCTP_BUF_LEN(oper) =
1009 					    sizeof(struct sctp_paramhdr) +
1010 					    (sizeof(uint32_t) * 3);
1011 					ph = mtod(oper, struct sctp_paramhdr *);
1012 					ph->param_type =
1013 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1014 					ph->param_length = htons(SCTP_BUF_LEN(oper));
1015 					ippp = (uint32_t *) (ph + 1);
1016 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
1017 					ippp++;
1018 					*ippp = chk->rec.data.TSN_seq;
1019 					ippp++;
1020 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1021 
1022 				}
1023 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
1024 				sctp_abort_an_association(stcb->sctp_ep, stcb,
1025 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1026 				*abort_flag = 1;
1027 			} else if (asoc->fragmented_delivery_inprogress &&
1028 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1029 				/*
1030 				 * We are doing a partial delivery and the
1031 				 * NEXT chunk MUST be either the LAST or
1032 				 * MIDDLE fragment NOT a FIRST
1033 				 */
1034 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
1035 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1036 				    0, M_DONTWAIT, 1, MT_DATA);
1037 				if (oper) {
1038 					struct sctp_paramhdr *ph;
1039 					uint32_t *ippp;
1040 
1041 					SCTP_BUF_LEN(oper) =
1042 					    sizeof(struct sctp_paramhdr) +
1043 					    (3 * sizeof(uint32_t));
1044 					ph = mtod(oper, struct sctp_paramhdr *);
1045 					ph->param_type =
1046 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1047 					ph->param_length = htons(SCTP_BUF_LEN(oper));
1048 					ippp = (uint32_t *) (ph + 1);
1049 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
1050 					ippp++;
1051 					*ippp = chk->rec.data.TSN_seq;
1052 					ippp++;
1053 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1054 				}
1055 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
1056 				sctp_abort_an_association(stcb->sctp_ep, stcb,
1057 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1058 				*abort_flag = 1;
1059 			} else if (asoc->fragmented_delivery_inprogress) {
1060 				/*
1061 				 * Here we are ok with a MIDDLE or LAST
1062 				 * piece
1063 				 */
1064 				if (chk->rec.data.stream_number !=
1065 				    asoc->str_of_pdapi) {
1066 					/* Got to be the right STR No */
1067 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
1068 					    chk->rec.data.stream_number,
1069 					    asoc->str_of_pdapi);
1070 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1071 					    0, M_DONTWAIT, 1, MT_DATA);
1072 					if (oper) {
1073 						struct sctp_paramhdr *ph;
1074 						uint32_t *ippp;
1075 
1076 						SCTP_BUF_LEN(oper) =
1077 						    sizeof(struct sctp_paramhdr) +
1078 						    (sizeof(uint32_t) * 3);
1079 						ph = mtod(oper,
1080 						    struct sctp_paramhdr *);
1081 						ph->param_type =
1082 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1083 						ph->param_length =
1084 						    htons(SCTP_BUF_LEN(oper));
1085 						ippp = (uint32_t *) (ph + 1);
1086 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
1087 						ippp++;
1088 						*ippp = chk->rec.data.TSN_seq;
1089 						ippp++;
1090 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1091 					}
1092 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
1093 					sctp_abort_an_association(stcb->sctp_ep,
1094 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1095 					*abort_flag = 1;
1096 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
1097 					    SCTP_DATA_UNORDERED &&
1098 					    chk->rec.data.stream_seq !=
1099 				    asoc->ssn_of_pdapi) {
1100 					/* Got to be the right STR Seq */
1101 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
1102 					    chk->rec.data.stream_seq,
1103 					    asoc->ssn_of_pdapi);
1104 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1105 					    0, M_DONTWAIT, 1, MT_DATA);
1106 					if (oper) {
1107 						struct sctp_paramhdr *ph;
1108 						uint32_t *ippp;
1109 
1110 						SCTP_BUF_LEN(oper) =
1111 						    sizeof(struct sctp_paramhdr) +
1112 						    (3 * sizeof(uint32_t));
1113 						ph = mtod(oper,
1114 						    struct sctp_paramhdr *);
1115 						ph->param_type =
1116 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1117 						ph->param_length =
1118 						    htons(SCTP_BUF_LEN(oper));
1119 						ippp = (uint32_t *) (ph + 1);
1120 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1121 						ippp++;
1122 						*ippp = chk->rec.data.TSN_seq;
1123 						ippp++;
1124 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1125 
1126 					}
1127 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
1128 					sctp_abort_an_association(stcb->sctp_ep,
1129 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1130 					*abort_flag = 1;
1131 				}
1132 			}
1133 		}
1134 		return;
1135 	}
1136 	/* Find its place */
1137 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1138 		if (compare_with_wrap(at->rec.data.TSN_seq,
1139 		    chk->rec.data.TSN_seq, MAX_TSN)) {
1140 			/*
1141 			 * one in queue is bigger than the new one, insert
1142 			 * before this one
1143 			 */
1144 			/* A check */
1145 			asoc->size_on_reasm_queue += chk->send_size;
1146 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1147 			next = at;
1148 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1149 			break;
1150 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1151 			/* Gak, He sent me a duplicate str seq number */
1152 			/*
1153 			 * foo bar, I guess I will just free this new guy,
1154 			 * should we abort too? FIX ME MAYBE? Or it COULD be
1155 			 * that the SSN's have wrapped. Maybe I should
1156 			 * compare to TSN somehow... sigh for now just blow
1157 			 * away the chunk!
1158 			 */
1159 			if (chk->data) {
1160 				sctp_m_freem(chk->data);
1161 				chk->data = NULL;
1162 			}
1163 			sctp_free_a_chunk(stcb, chk);
1164 			return;
1165 		} else {
1166 			last_flags = at->rec.data.rcv_flags;
1167 			last_tsn = at->rec.data.TSN_seq;
1168 			prev = at;
1169 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1170 				/*
1171 				 * We are at the end, insert it after this
1172 				 * one
1173 				 */
1174 				/* check it first */
1175 				asoc->size_on_reasm_queue += chk->send_size;
1176 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1177 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1178 				break;
1179 			}
1180 		}
1181 	}
1182 	/* Now the audits */
1183 	if (prev) {
1184 		prev_tsn = chk->rec.data.TSN_seq - 1;
1185 		if (prev_tsn == prev->rec.data.TSN_seq) {
1186 			/*
1187 			 * Ok the one I am dropping onto the end is the
1188 			 * NEXT. A bit of valdiation here.
1189 			 */
1190 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1191 			    SCTP_DATA_FIRST_FRAG ||
1192 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1193 			    SCTP_DATA_MIDDLE_FRAG) {
1194 				/*
1195 				 * Insert chk MUST be a MIDDLE or LAST
1196 				 * fragment
1197 				 */
1198 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1199 				    SCTP_DATA_FIRST_FRAG) {
1200 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1201 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1202 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1203 					    0, M_DONTWAIT, 1, MT_DATA);
1204 					if (oper) {
1205 						struct sctp_paramhdr *ph;
1206 						uint32_t *ippp;
1207 
1208 						SCTP_BUF_LEN(oper) =
1209 						    sizeof(struct sctp_paramhdr) +
1210 						    (3 * sizeof(uint32_t));
1211 						ph = mtod(oper,
1212 						    struct sctp_paramhdr *);
1213 						ph->param_type =
1214 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1215 						ph->param_length =
1216 						    htons(SCTP_BUF_LEN(oper));
1217 						ippp = (uint32_t *) (ph + 1);
1218 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1219 						ippp++;
1220 						*ippp = chk->rec.data.TSN_seq;
1221 						ippp++;
1222 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1223 
1224 					}
1225 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1226 					sctp_abort_an_association(stcb->sctp_ep,
1227 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1228 					*abort_flag = 1;
1229 					return;
1230 				}
1231 				if (chk->rec.data.stream_number !=
1232 				    prev->rec.data.stream_number) {
1233 					/*
1234 					 * Huh, need the correct STR here,
1235 					 * they must be the same.
1236 					 */
1237 					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1238 					    chk->rec.data.stream_number,
1239 					    prev->rec.data.stream_number);
1240 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1241 					    0, M_DONTWAIT, 1, MT_DATA);
1242 					if (oper) {
1243 						struct sctp_paramhdr *ph;
1244 						uint32_t *ippp;
1245 
1246 						SCTP_BUF_LEN(oper) =
1247 						    sizeof(struct sctp_paramhdr) +
1248 						    (3 * sizeof(uint32_t));
1249 						ph = mtod(oper,
1250 						    struct sctp_paramhdr *);
1251 						ph->param_type =
1252 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1253 						ph->param_length =
1254 						    htons(SCTP_BUF_LEN(oper));
1255 						ippp = (uint32_t *) (ph + 1);
1256 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1257 						ippp++;
1258 						*ippp = chk->rec.data.TSN_seq;
1259 						ippp++;
1260 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1261 					}
1262 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1263 					sctp_abort_an_association(stcb->sctp_ep,
1264 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1265 
1266 					*abort_flag = 1;
1267 					return;
1268 				}
1269 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1270 				    chk->rec.data.stream_seq !=
1271 				    prev->rec.data.stream_seq) {
1272 					/*
1273 					 * Huh, need the correct STR here,
1274 					 * they must be the same.
1275 					 */
1276 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1277 					    chk->rec.data.stream_seq,
1278 					    prev->rec.data.stream_seq);
1279 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1280 					    0, M_DONTWAIT, 1, MT_DATA);
1281 					if (oper) {
1282 						struct sctp_paramhdr *ph;
1283 						uint32_t *ippp;
1284 
1285 						SCTP_BUF_LEN(oper) =
1286 						    sizeof(struct sctp_paramhdr) +
1287 						    (3 * sizeof(uint32_t));
1288 						ph = mtod(oper,
1289 						    struct sctp_paramhdr *);
1290 						ph->param_type =
1291 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1292 						ph->param_length =
1293 						    htons(SCTP_BUF_LEN(oper));
1294 						ippp = (uint32_t *) (ph + 1);
1295 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1296 						ippp++;
1297 						*ippp = chk->rec.data.TSN_seq;
1298 						ippp++;
1299 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1300 					}
1301 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1302 					sctp_abort_an_association(stcb->sctp_ep,
1303 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1304 
1305 					*abort_flag = 1;
1306 					return;
1307 				}
1308 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1309 			    SCTP_DATA_LAST_FRAG) {
1310 				/* Insert chk MUST be a FIRST */
1311 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1312 				    SCTP_DATA_FIRST_FRAG) {
1313 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1314 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1315 					    0, M_DONTWAIT, 1, MT_DATA);
1316 					if (oper) {
1317 						struct sctp_paramhdr *ph;
1318 						uint32_t *ippp;
1319 
1320 						SCTP_BUF_LEN(oper) =
1321 						    sizeof(struct sctp_paramhdr) +
1322 						    (3 * sizeof(uint32_t));
1323 						ph = mtod(oper,
1324 						    struct sctp_paramhdr *);
1325 						ph->param_type =
1326 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1327 						ph->param_length =
1328 						    htons(SCTP_BUF_LEN(oper));
1329 						ippp = (uint32_t *) (ph + 1);
1330 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1331 						ippp++;
1332 						*ippp = chk->rec.data.TSN_seq;
1333 						ippp++;
1334 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1335 
1336 					}
1337 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1338 					sctp_abort_an_association(stcb->sctp_ep,
1339 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1340 
1341 					*abort_flag = 1;
1342 					return;
1343 				}
1344 			}
1345 		}
1346 	}
1347 	if (next) {
1348 		post_tsn = chk->rec.data.TSN_seq + 1;
1349 		if (post_tsn == next->rec.data.TSN_seq) {
1350 			/*
1351 			 * Ok the one I am inserting ahead of is my NEXT
1352 			 * one. A bit of valdiation here.
1353 			 */
1354 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1355 				/* Insert chk MUST be a last fragment */
1356 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1357 				    != SCTP_DATA_LAST_FRAG) {
1358 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1359 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1360 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1361 					    0, M_DONTWAIT, 1, MT_DATA);
1362 					if (oper) {
1363 						struct sctp_paramhdr *ph;
1364 						uint32_t *ippp;
1365 
1366 						SCTP_BUF_LEN(oper) =
1367 						    sizeof(struct sctp_paramhdr) +
1368 						    (3 * sizeof(uint32_t));
1369 						ph = mtod(oper,
1370 						    struct sctp_paramhdr *);
1371 						ph->param_type =
1372 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1373 						ph->param_length =
1374 						    htons(SCTP_BUF_LEN(oper));
1375 						ippp = (uint32_t *) (ph + 1);
1376 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1377 						ippp++;
1378 						*ippp = chk->rec.data.TSN_seq;
1379 						ippp++;
1380 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1381 					}
1382 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1383 					sctp_abort_an_association(stcb->sctp_ep,
1384 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1385 
1386 					*abort_flag = 1;
1387 					return;
1388 				}
1389 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1390 				    SCTP_DATA_MIDDLE_FRAG ||
1391 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1392 			    SCTP_DATA_LAST_FRAG) {
1393 				/*
1394 				 * Insert chk CAN be MIDDLE or FIRST NOT
1395 				 * LAST
1396 				 */
1397 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1398 				    SCTP_DATA_LAST_FRAG) {
1399 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1400 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1401 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1402 					    0, M_DONTWAIT, 1, MT_DATA);
1403 					if (oper) {
1404 						struct sctp_paramhdr *ph;
1405 						uint32_t *ippp;
1406 
1407 						SCTP_BUF_LEN(oper) =
1408 						    sizeof(struct sctp_paramhdr) +
1409 						    (3 * sizeof(uint32_t));
1410 						ph = mtod(oper,
1411 						    struct sctp_paramhdr *);
1412 						ph->param_type =
1413 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1414 						ph->param_length =
1415 						    htons(SCTP_BUF_LEN(oper));
1416 						ippp = (uint32_t *) (ph + 1);
1417 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1418 						ippp++;
1419 						*ippp = chk->rec.data.TSN_seq;
1420 						ippp++;
1421 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1422 
1423 					}
1424 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1425 					sctp_abort_an_association(stcb->sctp_ep,
1426 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1427 
1428 					*abort_flag = 1;
1429 					return;
1430 				}
1431 				if (chk->rec.data.stream_number !=
1432 				    next->rec.data.stream_number) {
1433 					/*
1434 					 * Huh, need the correct STR here,
1435 					 * they must be the same.
1436 					 */
1437 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1438 					    chk->rec.data.stream_number,
1439 					    next->rec.data.stream_number);
1440 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1441 					    0, M_DONTWAIT, 1, MT_DATA);
1442 					if (oper) {
1443 						struct sctp_paramhdr *ph;
1444 						uint32_t *ippp;
1445 
1446 						SCTP_BUF_LEN(oper) =
1447 						    sizeof(struct sctp_paramhdr) +
1448 						    (3 * sizeof(uint32_t));
1449 						ph = mtod(oper,
1450 						    struct sctp_paramhdr *);
1451 						ph->param_type =
1452 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1453 						ph->param_length =
1454 						    htons(SCTP_BUF_LEN(oper));
1455 						ippp = (uint32_t *) (ph + 1);
1456 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1457 						ippp++;
1458 						*ippp = chk->rec.data.TSN_seq;
1459 						ippp++;
1460 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1461 
1462 					}
1463 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1464 					sctp_abort_an_association(stcb->sctp_ep,
1465 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1466 
1467 					*abort_flag = 1;
1468 					return;
1469 				}
1470 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1471 				    chk->rec.data.stream_seq !=
1472 				    next->rec.data.stream_seq) {
1473 					/*
1474 					 * Huh, need the correct STR here,
1475 					 * they must be the same.
1476 					 */
1477 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1478 					    chk->rec.data.stream_seq,
1479 					    next->rec.data.stream_seq);
1480 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1481 					    0, M_DONTWAIT, 1, MT_DATA);
1482 					if (oper) {
1483 						struct sctp_paramhdr *ph;
1484 						uint32_t *ippp;
1485 
1486 						SCTP_BUF_LEN(oper) =
1487 						    sizeof(struct sctp_paramhdr) +
1488 						    (3 * sizeof(uint32_t));
1489 						ph = mtod(oper,
1490 						    struct sctp_paramhdr *);
1491 						ph->param_type =
1492 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1493 						ph->param_length =
1494 						    htons(SCTP_BUF_LEN(oper));
1495 						ippp = (uint32_t *) (ph + 1);
1496 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1497 						ippp++;
1498 						*ippp = chk->rec.data.TSN_seq;
1499 						ippp++;
1500 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1501 					}
1502 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1503 					sctp_abort_an_association(stcb->sctp_ep,
1504 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1505 
1506 					*abort_flag = 1;
1507 					return;
1508 				}
1509 			}
1510 		}
1511 	}
1512 	/* Do we need to do some delivery? check */
1513 	sctp_deliver_reasm_check(stcb, asoc);
1514 }
1515 
1516 /*
1517  * This is an unfortunate routine. It checks to make sure a evil guy is not
1518  * stuffing us full of bad packet fragments. A broken peer could also do this
1519  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1520  * :< more cycles.
1521  */
1522 static int
1523 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1524     uint32_t TSN_seq)
1525 {
1526 	struct sctp_tmit_chunk *at;
1527 	uint32_t tsn_est;
1528 
1529 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1530 		if (compare_with_wrap(TSN_seq,
1531 		    at->rec.data.TSN_seq, MAX_TSN)) {
1532 			/* is it one bigger? */
1533 			tsn_est = at->rec.data.TSN_seq + 1;
1534 			if (tsn_est == TSN_seq) {
1535 				/* yep. It better be a last then */
1536 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1537 				    SCTP_DATA_LAST_FRAG) {
1538 					/*
1539 					 * Ok this guy belongs next to a guy
1540 					 * that is NOT last, it should be a
1541 					 * middle/last, not a complete
1542 					 * chunk.
1543 					 */
1544 					return (1);
1545 				} else {
1546 					/*
1547 					 * This guy is ok since its a LAST
1548 					 * and the new chunk is a fully
1549 					 * self- contained one.
1550 					 */
1551 					return (0);
1552 				}
1553 			}
1554 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1555 			/* Software error since I have a dup? */
1556 			return (1);
1557 		} else {
1558 			/*
1559 			 * Ok, 'at' is larger than new chunk but does it
1560 			 * need to be right before it.
1561 			 */
1562 			tsn_est = TSN_seq + 1;
1563 			if (tsn_est == at->rec.data.TSN_seq) {
1564 				/* Yep, It better be a first */
1565 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1566 				    SCTP_DATA_FIRST_FRAG) {
1567 					return (1);
1568 				} else {
1569 					return (0);
1570 				}
1571 			}
1572 		}
1573 	}
1574 	return (0);
1575 }
1576 
1577 
1578 static int
1579 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1580     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1581     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1582     int *break_flag, int last_chunk)
1583 {
1584 	/* Process a data chunk */
1585 	/* struct sctp_tmit_chunk *chk; */
1586 	struct sctp_tmit_chunk *chk;
1587 	uint32_t tsn, gap;
1588 
1589 	/* EY - for nr_sack */
1590 	uint32_t nr_gap;
1591 	struct mbuf *dmbuf;
1592 	int indx, the_len;
1593 	int need_reasm_check = 0;
1594 	uint16_t strmno, strmseq;
1595 	struct mbuf *oper;
1596 	struct sctp_queued_to_read *control;
1597 	int ordered;
1598 	uint32_t protocol_id;
1599 	uint8_t chunk_flags;
1600 	struct sctp_stream_reset_list *liste;
1601 
1602 	chk = NULL;
1603 	tsn = ntohl(ch->dp.tsn);
1604 	chunk_flags = ch->ch.chunk_flags;
1605 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1606 		asoc->send_sack = 1;
1607 	}
1608 	protocol_id = ch->dp.protocol_id;
1609 	ordered = ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0);
1610 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1611 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1612 	}
1613 	if (stcb == NULL) {
1614 		return (0);
1615 	}
1616 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1617 	if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1618 	    asoc->cumulative_tsn == tsn) {
1619 		/* It is a duplicate */
1620 		SCTP_STAT_INCR(sctps_recvdupdata);
1621 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1622 			/* Record a dup for the next outbound sack */
1623 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1624 			asoc->numduptsns++;
1625 		}
1626 		asoc->send_sack = 1;
1627 		return (0);
1628 	}
1629 	/* Calculate the number of TSN's between the base and this TSN */
1630 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1631 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1632 		/* Can't hold the bit in the mapping at max array, toss it */
1633 		return (0);
1634 	}
1635 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1636 		SCTP_TCB_LOCK_ASSERT(stcb);
1637 		if (sctp_expand_mapping_array(asoc, gap)) {
1638 			/* Can't expand, drop it */
1639 			return (0);
1640 		}
1641 	}
1642 	/* EY - for nr_sack */
1643 	nr_gap = gap;
1644 
1645 	if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1646 		*high_tsn = tsn;
1647 	}
1648 	/* See if we have received this one already */
1649 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
1650 		SCTP_STAT_INCR(sctps_recvdupdata);
1651 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1652 			/* Record a dup for the next outbound sack */
1653 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1654 			asoc->numduptsns++;
1655 		}
1656 		asoc->send_sack = 1;
1657 		return (0);
1658 	}
1659 	/*
1660 	 * Check to see about the GONE flag, duplicates would cause a sack
1661 	 * to be sent up above
1662 	 */
1663 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1664 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1665 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1666 	    ) {
1667 		/*
1668 		 * wait a minute, this guy is gone, there is no longer a
1669 		 * receiver. Send peer an ABORT!
1670 		 */
1671 		struct mbuf *op_err;
1672 
1673 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1674 		sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1675 		*abort_flag = 1;
1676 		return (0);
1677 	}
1678 	/*
1679 	 * Now before going further we see if there is room. If NOT then we
1680 	 * MAY let one through only IF this TSN is the one we are waiting
1681 	 * for on a partial delivery API.
1682 	 */
1683 
1684 	/* now do the tests */
1685 	if (((asoc->cnt_on_all_streams +
1686 	    asoc->cnt_on_reasm_queue +
1687 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1688 	    (((int)asoc->my_rwnd) <= 0)) {
1689 		/*
1690 		 * When we have NO room in the rwnd we check to make sure
1691 		 * the reader is doing its job...
1692 		 */
1693 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1694 			/* some to read, wake-up */
1695 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1696 			struct socket *so;
1697 
1698 			so = SCTP_INP_SO(stcb->sctp_ep);
1699 			atomic_add_int(&stcb->asoc.refcnt, 1);
1700 			SCTP_TCB_UNLOCK(stcb);
1701 			SCTP_SOCKET_LOCK(so, 1);
1702 			SCTP_TCB_LOCK(stcb);
1703 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1704 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1705 				/* assoc was freed while we were unlocked */
1706 				SCTP_SOCKET_UNLOCK(so, 1);
1707 				return (0);
1708 			}
1709 #endif
1710 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1711 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1712 			SCTP_SOCKET_UNLOCK(so, 1);
1713 #endif
1714 		}
1715 		/* now is it in the mapping array of what we have accepted? */
1716 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1717 			/* Nope not in the valid range dump it */
1718 			sctp_set_rwnd(stcb, asoc);
1719 			if ((asoc->cnt_on_all_streams +
1720 			    asoc->cnt_on_reasm_queue +
1721 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1722 				SCTP_STAT_INCR(sctps_datadropchklmt);
1723 			} else {
1724 				SCTP_STAT_INCR(sctps_datadroprwnd);
1725 			}
1726 			indx = *break_flag;
1727 			*break_flag = 1;
1728 			return (0);
1729 		}
1730 	}
1731 	strmno = ntohs(ch->dp.stream_id);
1732 	if (strmno >= asoc->streamincnt) {
1733 		struct sctp_paramhdr *phdr;
1734 		struct mbuf *mb;
1735 
1736 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1737 		    0, M_DONTWAIT, 1, MT_DATA);
1738 		if (mb != NULL) {
1739 			/* add some space up front so prepend will work well */
1740 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1741 			phdr = mtod(mb, struct sctp_paramhdr *);
1742 			/*
1743 			 * Error causes are just param's and this one has
1744 			 * two back to back phdr, one with the error type
1745 			 * and size, the other with the streamid and a rsvd
1746 			 */
1747 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1748 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1749 			phdr->param_length =
1750 			    htons(sizeof(struct sctp_paramhdr) * 2);
1751 			phdr++;
1752 			/* We insert the stream in the type field */
1753 			phdr->param_type = ch->dp.stream_id;
1754 			/* And set the length to 0 for the rsvd field */
1755 			phdr->param_length = 0;
1756 			sctp_queue_op_err(stcb, mb);
1757 		}
1758 		SCTP_STAT_INCR(sctps_badsid);
1759 		SCTP_TCB_LOCK_ASSERT(stcb);
1760 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1761 		/* EY set this tsn present in  nr_sack's nr_mapping_array */
1762 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
1763 			SCTP_TCB_LOCK_ASSERT(stcb);
1764 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1765 			SCTP_REVERSE_OUT_TSN_PRES(gap, tsn, asoc);
1766 		}
1767 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1768 			/* we have a new high score */
1769 			asoc->highest_tsn_inside_map = tsn;
1770 			/* EY nr_sack version of the above */
1771 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack)
1772 				asoc->highest_tsn_inside_nr_map = tsn;
1773 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1774 				sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
1775 			}
1776 		}
1777 		if (tsn == (asoc->cumulative_tsn + 1)) {
1778 			/* Update cum-ack */
1779 			asoc->cumulative_tsn = tsn;
1780 		}
1781 		return (0);
1782 	}
1783 	/*
1784 	 * Before we continue lets validate that we are not being fooled by
1785 	 * an evil attacker. We can only have 4k chunks based on our TSN
1786 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1787 	 * way our stream sequence numbers could have wrapped. We of course
1788 	 * only validate the FIRST fragment so the bit must be set.
1789 	 */
1790 	strmseq = ntohs(ch->dp.stream_sequence);
1791 #ifdef SCTP_ASOCLOG_OF_TSNS
1792 	SCTP_TCB_LOCK_ASSERT(stcb);
1793 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1794 		asoc->tsn_in_at = 0;
1795 		asoc->tsn_in_wrapped = 1;
1796 	}
1797 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1798 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1799 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1800 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1801 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1802 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1803 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1804 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1805 	asoc->tsn_in_at++;
1806 #endif
1807 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1808 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1809 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1810 	    (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1811 	    strmseq, MAX_SEQ) ||
1812 	    asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1813 		/* The incoming sseq is behind where we last delivered? */
1814 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1815 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1816 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1817 		    0, M_DONTWAIT, 1, MT_DATA);
1818 		if (oper) {
1819 			struct sctp_paramhdr *ph;
1820 			uint32_t *ippp;
1821 
1822 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1823 			    (3 * sizeof(uint32_t));
1824 			ph = mtod(oper, struct sctp_paramhdr *);
1825 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1826 			ph->param_length = htons(SCTP_BUF_LEN(oper));
1827 			ippp = (uint32_t *) (ph + 1);
1828 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1829 			ippp++;
1830 			*ippp = tsn;
1831 			ippp++;
1832 			*ippp = ((strmno << 16) | strmseq);
1833 
1834 		}
1835 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1836 		sctp_abort_an_association(stcb->sctp_ep, stcb,
1837 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1838 		*abort_flag = 1;
1839 		return (0);
1840 	}
1841 	/************************************
1842 	 * From here down we may find ch-> invalid
1843 	 * so its a good idea NOT to use it.
1844 	 *************************************/
1845 
1846 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1847 	if (last_chunk == 0) {
1848 		dmbuf = SCTP_M_COPYM(*m,
1849 		    (offset + sizeof(struct sctp_data_chunk)),
1850 		    the_len, M_DONTWAIT);
1851 #ifdef SCTP_MBUF_LOGGING
1852 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1853 			struct mbuf *mat;
1854 
1855 			mat = dmbuf;
1856 			while (mat) {
1857 				if (SCTP_BUF_IS_EXTENDED(mat)) {
1858 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1859 				}
1860 				mat = SCTP_BUF_NEXT(mat);
1861 			}
1862 		}
1863 #endif
1864 	} else {
1865 		/* We can steal the last chunk */
1866 		int l_len;
1867 
1868 		dmbuf = *m;
1869 		/* lop off the top part */
1870 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1871 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1872 			l_len = SCTP_BUF_LEN(dmbuf);
1873 		} else {
1874 			/*
1875 			 * need to count up the size hopefully does not hit
1876 			 * this to often :-0
1877 			 */
1878 			struct mbuf *lat;
1879 
1880 			l_len = 0;
1881 			lat = dmbuf;
1882 			while (lat) {
1883 				l_len += SCTP_BUF_LEN(lat);
1884 				lat = SCTP_BUF_NEXT(lat);
1885 			}
1886 		}
1887 		if (l_len > the_len) {
1888 			/* Trim the end round bytes off  too */
1889 			m_adj(dmbuf, -(l_len - the_len));
1890 		}
1891 	}
1892 	if (dmbuf == NULL) {
1893 		SCTP_STAT_INCR(sctps_nomem);
1894 		return (0);
1895 	}
1896 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1897 	    asoc->fragmented_delivery_inprogress == 0 &&
1898 	    TAILQ_EMPTY(&asoc->resetHead) &&
1899 	    ((ordered == 0) ||
1900 	    ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1901 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1902 		/* Candidate for express delivery */
1903 		/*
1904 		 * Its not fragmented, No PD-API is up, Nothing in the
1905 		 * delivery queue, Its un-ordered OR ordered and the next to
1906 		 * deliver AND nothing else is stuck on the stream queue,
1907 		 * And there is room for it in the socket buffer. Lets just
1908 		 * stuff it up the buffer....
1909 		 */
1910 
1911 		/* It would be nice to avoid this copy if we could :< */
1912 		sctp_alloc_a_readq(stcb, control);
1913 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1914 		    protocol_id,
1915 		    stcb->asoc.context,
1916 		    strmno, strmseq,
1917 		    chunk_flags,
1918 		    dmbuf);
1919 		if (control == NULL) {
1920 			goto failed_express_del;
1921 		}
1922 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1923 		    control, &stcb->sctp_socket->so_rcv,
1924 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1925 
1926 		/*
1927 		 * EY here I should check if this delivered tsn is
1928 		 * out_of_order, if yes then update the nr_map
1929 		 */
1930 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
1931 			/*
1932 			 * EY check if the mapping_array and nr_mapping
1933 			 * array are consistent
1934 			 */
1935 			if (asoc->mapping_array_base_tsn != asoc->nr_mapping_array_base_tsn)
1936 				/*
1937 				 * printf("EY-IN
1938 				 * sctp_process_a_data_chunk(5): Something
1939 				 * is wrong the map base tsn" "\nEY-and
1940 				 * nr_map base tsn should be equal.");
1941 				 */
1942 				/* EY debugging block */
1943 			{
1944 				/*
1945 				 * printf("\nEY-Calculating an
1946 				 * nr_gap!!\nmapping_array_size = %d
1947 				 * nr_mapping_array_size = %d"
1948 				 * "\nEY-mapping_array_base = %d
1949 				 * nr_mapping_array_base =
1950 				 * %d\nEY-highest_tsn_inside_map = %d"
1951 				 * "highest_tsn_inside_nr_map = %d\nEY-TSN =
1952 				 * %d nr_gap = %d",asoc->mapping_array_size,
1953 				 * asoc->nr_mapping_array_size,
1954 				 * asoc->mapping_array_base_tsn,
1955 				 * asoc->nr_mapping_array_base_tsn,
1956 				 * asoc->highest_tsn_inside_map,
1957 				 * asoc->highest_tsn_inside_nr_map,tsn,nr_gap
1958 				 * );
1959 				 */
1960 			}
1961 			/* EY - not %100 sure about the lock thing */
1962 			SCTP_TCB_LOCK_ASSERT(stcb);
1963 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
1964 			SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc);
1965 			if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
1966 				asoc->highest_tsn_inside_nr_map = tsn;
1967 		}
1968 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1969 			/* for ordered, bump what we delivered */
1970 			asoc->strmin[strmno].last_sequence_delivered++;
1971 		}
1972 		SCTP_STAT_INCR(sctps_recvexpress);
1973 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1974 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1975 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1976 		}
1977 		control = NULL;
1978 		goto finish_express_del;
1979 	}
1980 failed_express_del:
1981 	/* If we reach here this is a new chunk */
1982 	chk = NULL;
1983 	control = NULL;
1984 	/* Express for fragmented delivery? */
1985 	if ((asoc->fragmented_delivery_inprogress) &&
1986 	    (stcb->asoc.control_pdapi) &&
1987 	    (asoc->str_of_pdapi == strmno) &&
1988 	    (asoc->ssn_of_pdapi == strmseq)
1989 	    ) {
1990 		control = stcb->asoc.control_pdapi;
1991 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1992 			/* Can't be another first? */
1993 			goto failed_pdapi_express_del;
1994 		}
1995 		if (tsn == (control->sinfo_tsn + 1)) {
1996 			/* Yep, we can add it on */
1997 			int end = 0;
1998 			uint32_t cumack;
1999 
2000 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
2001 				end = 1;
2002 			}
2003 			cumack = asoc->cumulative_tsn;
2004 			if ((cumack + 1) == tsn)
2005 				cumack = tsn;
2006 
2007 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
2008 			    tsn,
2009 			    &stcb->sctp_socket->so_rcv)) {
2010 				SCTP_PRINTF("Append fails end:%d\n", end);
2011 				goto failed_pdapi_express_del;
2012 			}
2013 			/*
2014 			 * EY It is appended to the read queue in prev if
2015 			 * block here I should check if this delivered tsn
2016 			 * is out_of_order, if yes then update the nr_map
2017 			 */
2018 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2019 				/* EY debugging block */
2020 				{
2021 					/*
2022 					 * printf("\nEY-Calculating an
2023 					 * nr_gap!!\nEY-mapping_array_size =
2024 					 * %d nr_mapping_array_size = %d"
2025 					 * "\nEY-mapping_array_base = %d
2026 					 * nr_mapping_array_base =
2027 					 * %d\nEY-highest_tsn_inside_map =
2028 					 * %d" "highest_tsn_inside_nr_map =
2029 					 * %d\nEY-TSN = %d nr_gap =
2030 					 * %d",asoc->mapping_array_size,
2031 					 * asoc->nr_mapping_array_size,
2032 					 * asoc->mapping_array_base_tsn,
2033 					 * asoc->nr_mapping_array_base_tsn,
2034 					 * asoc->highest_tsn_inside_map,
2035 					 * asoc->highest_tsn_inside_nr_map,ts
2036 					 * n,nr_gap);
2037 					 */
2038 				}
2039 				/* EY - not %100 sure about the lock thing */
2040 				SCTP_TCB_LOCK_ASSERT(stcb);
2041 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
2042 				SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc);
2043 				if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
2044 					asoc->highest_tsn_inside_nr_map = tsn;
2045 			}
2046 			SCTP_STAT_INCR(sctps_recvexpressm);
2047 			control->sinfo_tsn = tsn;
2048 			asoc->tsn_last_delivered = tsn;
2049 			asoc->fragment_flags = chunk_flags;
2050 			asoc->tsn_of_pdapi_last_delivered = tsn;
2051 			asoc->last_flags_delivered = chunk_flags;
2052 			asoc->last_strm_seq_delivered = strmseq;
2053 			asoc->last_strm_no_delivered = strmno;
2054 			if (end) {
2055 				/* clean up the flags and such */
2056 				asoc->fragmented_delivery_inprogress = 0;
2057 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2058 					asoc->strmin[strmno].last_sequence_delivered++;
2059 				}
2060 				stcb->asoc.control_pdapi = NULL;
2061 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
2062 					/*
2063 					 * There could be another message
2064 					 * ready
2065 					 */
2066 					need_reasm_check = 1;
2067 				}
2068 			}
2069 			control = NULL;
2070 			goto finish_express_del;
2071 		}
2072 	}
2073 failed_pdapi_express_del:
2074 	control = NULL;
2075 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2076 		sctp_alloc_a_chunk(stcb, chk);
2077 		if (chk == NULL) {
2078 			/* No memory so we drop the chunk */
2079 			SCTP_STAT_INCR(sctps_nomem);
2080 			if (last_chunk == 0) {
2081 				/* we copied it, free the copy */
2082 				sctp_m_freem(dmbuf);
2083 			}
2084 			return (0);
2085 		}
2086 		chk->rec.data.TSN_seq = tsn;
2087 		chk->no_fr_allowed = 0;
2088 		chk->rec.data.stream_seq = strmseq;
2089 		chk->rec.data.stream_number = strmno;
2090 		chk->rec.data.payloadtype = protocol_id;
2091 		chk->rec.data.context = stcb->asoc.context;
2092 		chk->rec.data.doing_fast_retransmit = 0;
2093 		chk->rec.data.rcv_flags = chunk_flags;
2094 		chk->asoc = asoc;
2095 		chk->send_size = the_len;
2096 		chk->whoTo = net;
2097 		atomic_add_int(&net->ref_count, 1);
2098 		chk->data = dmbuf;
2099 	} else {
2100 		sctp_alloc_a_readq(stcb, control);
2101 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2102 		    protocol_id,
2103 		    stcb->asoc.context,
2104 		    strmno, strmseq,
2105 		    chunk_flags,
2106 		    dmbuf);
2107 		if (control == NULL) {
2108 			/* No memory so we drop the chunk */
2109 			SCTP_STAT_INCR(sctps_nomem);
2110 			if (last_chunk == 0) {
2111 				/* we copied it, free the copy */
2112 				sctp_m_freem(dmbuf);
2113 			}
2114 			return (0);
2115 		}
2116 		control->length = the_len;
2117 	}
2118 
2119 	/* Mark it as received */
2120 	/* Now queue it where it belongs */
2121 	if (control != NULL) {
2122 		/* First a sanity check */
2123 		if (asoc->fragmented_delivery_inprogress) {
2124 			/*
2125 			 * Ok, we have a fragmented delivery in progress if
2126 			 * this chunk is next to deliver OR belongs in our
2127 			 * view to the reassembly, the peer is evil or
2128 			 * broken.
2129 			 */
2130 			uint32_t estimate_tsn;
2131 
2132 			estimate_tsn = asoc->tsn_last_delivered + 1;
2133 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
2134 			    (estimate_tsn == control->sinfo_tsn)) {
2135 				/* Evil/Broke peer */
2136 				sctp_m_freem(control->data);
2137 				control->data = NULL;
2138 				if (control->whoFrom) {
2139 					sctp_free_remote_addr(control->whoFrom);
2140 					control->whoFrom = NULL;
2141 				}
2142 				sctp_free_a_readq(stcb, control);
2143 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2144 				    0, M_DONTWAIT, 1, MT_DATA);
2145 				if (oper) {
2146 					struct sctp_paramhdr *ph;
2147 					uint32_t *ippp;
2148 
2149 					SCTP_BUF_LEN(oper) =
2150 					    sizeof(struct sctp_paramhdr) +
2151 					    (3 * sizeof(uint32_t));
2152 					ph = mtod(oper, struct sctp_paramhdr *);
2153 					ph->param_type =
2154 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2155 					ph->param_length = htons(SCTP_BUF_LEN(oper));
2156 					ippp = (uint32_t *) (ph + 1);
2157 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
2158 					ippp++;
2159 					*ippp = tsn;
2160 					ippp++;
2161 					*ippp = ((strmno << 16) | strmseq);
2162 				}
2163 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
2164 				sctp_abort_an_association(stcb->sctp_ep, stcb,
2165 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2166 
2167 				*abort_flag = 1;
2168 				return (0);
2169 			} else {
2170 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2171 					sctp_m_freem(control->data);
2172 					control->data = NULL;
2173 					if (control->whoFrom) {
2174 						sctp_free_remote_addr(control->whoFrom);
2175 						control->whoFrom = NULL;
2176 					}
2177 					sctp_free_a_readq(stcb, control);
2178 
2179 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2180 					    0, M_DONTWAIT, 1, MT_DATA);
2181 					if (oper) {
2182 						struct sctp_paramhdr *ph;
2183 						uint32_t *ippp;
2184 
2185 						SCTP_BUF_LEN(oper) =
2186 						    sizeof(struct sctp_paramhdr) +
2187 						    (3 * sizeof(uint32_t));
2188 						ph = mtod(oper,
2189 						    struct sctp_paramhdr *);
2190 						ph->param_type =
2191 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2192 						ph->param_length =
2193 						    htons(SCTP_BUF_LEN(oper));
2194 						ippp = (uint32_t *) (ph + 1);
2195 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
2196 						ippp++;
2197 						*ippp = tsn;
2198 						ippp++;
2199 						*ippp = ((strmno << 16) | strmseq);
2200 					}
2201 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2202 					sctp_abort_an_association(stcb->sctp_ep,
2203 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2204 
2205 					*abort_flag = 1;
2206 					return (0);
2207 				}
2208 			}
2209 		} else {
2210 			/* No PDAPI running */
2211 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2212 				/*
2213 				 * Reassembly queue is NOT empty validate
2214 				 * that this tsn does not need to be in
2215 				 * reasembly queue. If it does then our peer
2216 				 * is broken or evil.
2217 				 */
2218 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2219 					sctp_m_freem(control->data);
2220 					control->data = NULL;
2221 					if (control->whoFrom) {
2222 						sctp_free_remote_addr(control->whoFrom);
2223 						control->whoFrom = NULL;
2224 					}
2225 					sctp_free_a_readq(stcb, control);
2226 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2227 					    0, M_DONTWAIT, 1, MT_DATA);
2228 					if (oper) {
2229 						struct sctp_paramhdr *ph;
2230 						uint32_t *ippp;
2231 
2232 						SCTP_BUF_LEN(oper) =
2233 						    sizeof(struct sctp_paramhdr) +
2234 						    (3 * sizeof(uint32_t));
2235 						ph = mtod(oper,
2236 						    struct sctp_paramhdr *);
2237 						ph->param_type =
2238 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2239 						ph->param_length =
2240 						    htons(SCTP_BUF_LEN(oper));
2241 						ippp = (uint32_t *) (ph + 1);
2242 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2243 						ippp++;
2244 						*ippp = tsn;
2245 						ippp++;
2246 						*ippp = ((strmno << 16) | strmseq);
2247 					}
2248 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2249 					sctp_abort_an_association(stcb->sctp_ep,
2250 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2251 
2252 					*abort_flag = 1;
2253 					return (0);
2254 				}
2255 			}
2256 		}
2257 		/* ok, if we reach here we have passed the sanity checks */
2258 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2259 			/* queue directly into socket buffer */
2260 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2261 			    control,
2262 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2263 
2264 			/*
2265 			 * EY It is added to the read queue in prev if block
2266 			 * here I should check if this delivered tsn is
2267 			 * out_of_order, if yes then update the nr_map
2268 			 */
2269 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2270 				/*
2271 				 * EY check if the mapping_array and
2272 				 * nr_mapping array are consistent
2273 				 */
2274 				if (asoc->mapping_array_base_tsn != asoc->nr_mapping_array_base_tsn)
2275 					/*
2276 					 * printf("EY-IN
2277 					 * sctp_process_a_data_chunk(6):
2278 					 * Something is wrong the map base
2279 					 * tsn" "\nEY-and nr_map base tsn
2280 					 * should be equal.");
2281 					 */
2282 					/*
2283 					 * EY - not %100 sure about the lock
2284 					 * thing, i think we don't need the
2285 					 * below,
2286 					 */
2287 					/* SCTP_TCB_LOCK_ASSERT(stcb); */
2288 				{
2289 					/*
2290 					 * printf("\nEY-Calculating an
2291 					 * nr_gap!!\nEY-mapping_array_size =
2292 					 * %d nr_mapping_array_size = %d"
2293 					 * "\nEY-mapping_array_base = %d
2294 					 * nr_mapping_array_base =
2295 					 * %d\nEY-highest_tsn_inside_map =
2296 					 * %d" "highest_tsn_inside_nr_map =
2297 					 * %d\nEY-TSN = %d nr_gap =
2298 					 * %d",asoc->mapping_array_size,
2299 					 * asoc->nr_mapping_array_size,
2300 					 * asoc->mapping_array_base_tsn,
2301 					 * asoc->nr_mapping_array_base_tsn,
2302 					 * asoc->highest_tsn_inside_map,
2303 					 * asoc->highest_tsn_inside_nr_map,ts
2304 					 * n,nr_gap);
2305 					 */
2306 				}
2307 				SCTP_TCB_LOCK_ASSERT(stcb);
2308 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
2309 				SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc);
2310 				if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
2311 					asoc->highest_tsn_inside_nr_map = tsn;
2312 			}
2313 		} else {
2314 			/*
2315 			 * Special check for when streams are resetting. We
2316 			 * could be more smart about this and check the
2317 			 * actual stream to see if it is not being reset..
2318 			 * that way we would not create a HOLB when amongst
2319 			 * streams being reset and those not being reset.
2320 			 *
2321 			 * We take complete messages that have a stream reset
2322 			 * intervening (aka the TSN is after where our
2323 			 * cum-ack needs to be) off and put them on a
2324 			 * pending_reply_queue. The reassembly ones we do
2325 			 * not have to worry about since they are all sorted
2326 			 * and proceessed by TSN order. It is only the
2327 			 * singletons I must worry about.
2328 			 */
2329 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2330 			    ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)))
2331 			    ) {
2332 				/*
2333 				 * yep its past where we need to reset... go
2334 				 * ahead and queue it.
2335 				 */
2336 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2337 					/* first one on */
2338 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2339 				} else {
2340 					struct sctp_queued_to_read *ctlOn;
2341 					unsigned char inserted = 0;
2342 
2343 					ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2344 					while (ctlOn) {
2345 						if (compare_with_wrap(control->sinfo_tsn,
2346 						    ctlOn->sinfo_tsn, MAX_TSN)) {
2347 							ctlOn = TAILQ_NEXT(ctlOn, next);
2348 						} else {
2349 							/* found it */
2350 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2351 							inserted = 1;
2352 							break;
2353 						}
2354 					}
2355 					if (inserted == 0) {
2356 						/*
2357 						 * must be put at end, use
2358 						 * prevP (all setup from
2359 						 * loop) to setup nextP.
2360 						 */
2361 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2362 					}
2363 				}
2364 			} else {
2365 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2366 				if (*abort_flag) {
2367 					return (0);
2368 				}
2369 			}
2370 		}
2371 	} else {
2372 		/* Into the re-assembly queue */
2373 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2374 		if (*abort_flag) {
2375 			/*
2376 			 * the assoc is now gone and chk was put onto the
2377 			 * reasm queue, which has all been freed.
2378 			 */
2379 			*m = NULL;
2380 			return (0);
2381 		}
2382 	}
2383 finish_express_del:
2384 	if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
2385 		/* we have a new high score */
2386 		asoc->highest_tsn_inside_map = tsn;
2387 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2388 			sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2389 		}
2390 	}
2391 	if (tsn == (asoc->cumulative_tsn + 1)) {
2392 		/* Update cum-ack */
2393 		asoc->cumulative_tsn = tsn;
2394 	}
2395 	if (last_chunk) {
2396 		*m = NULL;
2397 	}
2398 	if (ordered) {
2399 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2400 	} else {
2401 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2402 	}
2403 	SCTP_STAT_INCR(sctps_recvdata);
2404 	/* Set it present please */
2405 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2406 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2407 	}
2408 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2409 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2410 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2411 	}
2412 	SCTP_TCB_LOCK_ASSERT(stcb);
2413 	SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2414 
2415 	if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
2416 	    asoc->peer_supports_nr_sack &&
2417 	    (SCTP_BASE_SYSCTL(sctp_do_drain) == 0)) {
2418 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2419 		SCTP_REVERSE_OUT_TSN_PRES(nr_gap, tsn, asoc);
2420 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
2421 			asoc->highest_tsn_inside_nr_map = tsn;
2422 		}
2423 	}
2424 	/* check the special flag for stream resets */
2425 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2426 	    ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2427 	    (asoc->cumulative_tsn == liste->tsn))
2428 	    ) {
2429 		/*
2430 		 * we have finished working through the backlogged TSN's now
2431 		 * time to reset streams. 1: call reset function. 2: free
2432 		 * pending_reply space 3: distribute any chunks in
2433 		 * pending_reply_queue.
2434 		 */
2435 		struct sctp_queued_to_read *ctl;
2436 
2437 		sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2438 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2439 		SCTP_FREE(liste, SCTP_M_STRESET);
2440 		/* sa_ignore FREED_MEMORY */
2441 		liste = TAILQ_FIRST(&asoc->resetHead);
2442 		ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2443 		if (ctl && (liste == NULL)) {
2444 			/* All can be removed */
2445 			while (ctl) {
2446 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2447 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2448 				if (*abort_flag) {
2449 					return (0);
2450 				}
2451 				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2452 			}
2453 		} else if (ctl) {
2454 			/* more than one in queue */
2455 			while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2456 				/*
2457 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2458 				 * process it which is the NOT of
2459 				 * ctl->sinfo_tsn > liste->tsn
2460 				 */
2461 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2462 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2463 				if (*abort_flag) {
2464 					return (0);
2465 				}
2466 				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2467 			}
2468 		}
2469 		/*
2470 		 * Now service re-assembly to pick up anything that has been
2471 		 * held on reassembly queue?
2472 		 */
2473 		sctp_deliver_reasm_check(stcb, asoc);
2474 		need_reasm_check = 0;
2475 	}
2476 	if (need_reasm_check) {
2477 		/* Another one waits ? */
2478 		sctp_deliver_reasm_check(stcb, asoc);
2479 	}
2480 	return (1);
2481 }
2482 
2483 int8_t sctp_map_lookup_tab[256] = {
2484 	-1, 0, -1, 1, -1, 0, -1, 2,
2485 	-1, 0, -1, 1, -1, 0, -1, 3,
2486 	-1, 0, -1, 1, -1, 0, -1, 2,
2487 	-1, 0, -1, 1, -1, 0, -1, 4,
2488 	-1, 0, -1, 1, -1, 0, -1, 2,
2489 	-1, 0, -1, 1, -1, 0, -1, 3,
2490 	-1, 0, -1, 1, -1, 0, -1, 2,
2491 	-1, 0, -1, 1, -1, 0, -1, 5,
2492 	-1, 0, -1, 1, -1, 0, -1, 2,
2493 	-1, 0, -1, 1, -1, 0, -1, 3,
2494 	-1, 0, -1, 1, -1, 0, -1, 2,
2495 	-1, 0, -1, 1, -1, 0, -1, 4,
2496 	-1, 0, -1, 1, -1, 0, -1, 2,
2497 	-1, 0, -1, 1, -1, 0, -1, 3,
2498 	-1, 0, -1, 1, -1, 0, -1, 2,
2499 	-1, 0, -1, 1, -1, 0, -1, 6,
2500 	-1, 0, -1, 1, -1, 0, -1, 2,
2501 	-1, 0, -1, 1, -1, 0, -1, 3,
2502 	-1, 0, -1, 1, -1, 0, -1, 2,
2503 	-1, 0, -1, 1, -1, 0, -1, 4,
2504 	-1, 0, -1, 1, -1, 0, -1, 2,
2505 	-1, 0, -1, 1, -1, 0, -1, 3,
2506 	-1, 0, -1, 1, -1, 0, -1, 2,
2507 	-1, 0, -1, 1, -1, 0, -1, 5,
2508 	-1, 0, -1, 1, -1, 0, -1, 2,
2509 	-1, 0, -1, 1, -1, 0, -1, 3,
2510 	-1, 0, -1, 1, -1, 0, -1, 2,
2511 	-1, 0, -1, 1, -1, 0, -1, 4,
2512 	-1, 0, -1, 1, -1, 0, -1, 2,
2513 	-1, 0, -1, 1, -1, 0, -1, 3,
2514 	-1, 0, -1, 1, -1, 0, -1, 2,
2515 	-1, 0, -1, 1, -1, 0, -1, 7,
2516 };
2517 
2518 
2519 void
2520 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
2521 {
2522 	/*
2523 	 * Now we also need to check the mapping array in a couple of ways.
2524 	 * 1) Did we move the cum-ack point?
2525 	 */
2526 	struct sctp_association *asoc;
2527 	int at;
2528 	uint8_t comb_byte;
2529 	int last_all_ones = 0;
2530 	int slide_from, slide_end, lgap, distance;
2531 
2532 	/* EY nr_mapping array variables */
2533 	/* int nr_at; */
2534 	/* int nr_last_all_ones = 0; */
2535 	/* int nr_slide_from, nr_slide_end, nr_lgap, nr_distance; */
2536 
2537 	uint32_t old_cumack, old_base, old_highest;
2538 	unsigned char aux_array[64];
2539 
2540 	/*
2541 	 * EY! Don't think this is required but I am immitating the code for
2542 	 * map just to make sure
2543 	 */
2544 	unsigned char nr_aux_array[64];
2545 
2546 	asoc = &stcb->asoc;
2547 	at = 0;
2548 
2549 	old_cumack = asoc->cumulative_tsn;
2550 	old_base = asoc->mapping_array_base_tsn;
2551 	old_highest = asoc->highest_tsn_inside_map;
2552 	if (asoc->mapping_array_size < 64)
2553 		memcpy(aux_array, asoc->mapping_array,
2554 		    asoc->mapping_array_size);
2555 	else
2556 		memcpy(aux_array, asoc->mapping_array, 64);
2557 	/* EY do the same for nr_mapping_array */
2558 	if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2559 		if (asoc->nr_mapping_array_size != asoc->mapping_array_size) {
2560 			/*
2561 			 * printf("\nEY-IN sack_check method: \nEY-" "The
2562 			 * size of map and nr_map are inconsitent")
2563 			 */ ;
2564 		}
2565 		if (asoc->nr_mapping_array_base_tsn != asoc->mapping_array_base_tsn) {
2566 			/*
2567 			 * printf("\nEY-IN sack_check method VERY CRUCIAL
2568 			 * error: \nEY-" "The base tsns of map and nr_map
2569 			 * are inconsitent")
2570 			 */ ;
2571 		}
2572 		/* EY! just immitating the above code */
2573 		if (asoc->nr_mapping_array_size < 64)
2574 			memcpy(nr_aux_array, asoc->nr_mapping_array,
2575 			    asoc->nr_mapping_array_size);
2576 		else
2577 			memcpy(aux_array, asoc->nr_mapping_array, 64);
2578 	}
2579 	/*
2580 	 * We could probably improve this a small bit by calculating the
2581 	 * offset of the current cum-ack as the starting point.
2582 	 */
2583 	at = 0;
2584 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2585 		/*
2586 		 * We must combine the renegable and non-renegable arrays
2587 		 * here to form a unified view of what is acked right now
2588 		 * (since they are kept separate
2589 		 */
2590 		comb_byte = asoc->mapping_array[slide_from] | asoc->nr_mapping_array[slide_from];
2591 		if (comb_byte == 0xff) {
2592 			at += 8;
2593 			last_all_ones = 1;
2594 		} else {
2595 			/* there is a 0 bit */
2596 			at += sctp_map_lookup_tab[comb_byte];
2597 			last_all_ones = 0;
2598 			break;
2599 		}
2600 	}
2601 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - last_all_ones);
2602 	/* at is one off, since in the table a embedded -1 is present */
2603 	at++;
2604 
2605 	if (compare_with_wrap(asoc->cumulative_tsn,
2606 	    asoc->highest_tsn_inside_map,
2607 	    MAX_TSN)) {
2608 #ifdef INVARIANTS
2609 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2610 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2611 #else
2612 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2613 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2614 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2615 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2616 		}
2617 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2618 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2619 #endif
2620 	}
2621 	if ((asoc->cumulative_tsn == asoc->highest_tsn_inside_map) && (at >= 8)) {
2622 		/* The complete array was completed by a single FR */
2623 		/* higest becomes the cum-ack */
2624 		int clr;
2625 
2626 		asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
2627 		/* clear the array */
2628 		clr = (at >> 3) + 1;
2629 		if (clr > asoc->mapping_array_size) {
2630 			clr = asoc->mapping_array_size;
2631 		}
2632 		memset(asoc->mapping_array, 0, clr);
2633 		/* base becomes one ahead of the cum-ack */
2634 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2635 
2636 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2637 
2638 			if (clr > asoc->nr_mapping_array_size)
2639 				clr = asoc->nr_mapping_array_size;
2640 
2641 			memset(asoc->nr_mapping_array, 0, clr);
2642 			/* base becomes one ahead of the cum-ack */
2643 			asoc->nr_mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2644 			asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2645 		}
2646 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2647 			sctp_log_map(old_base, old_cumack, old_highest,
2648 			    SCTP_MAP_PREPARE_SLIDE);
2649 			sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2650 			    asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
2651 		}
2652 	} else if (at >= 8) {
2653 		/* we can slide the mapping array down */
2654 		/* slide_from holds where we hit the first NON 0xff byte */
2655 
2656 		/*
2657 		 * now calculate the ceiling of the move using our highest
2658 		 * TSN value
2659 		 */
2660 		if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
2661 			lgap = asoc->highest_tsn_inside_map -
2662 			    asoc->mapping_array_base_tsn;
2663 		} else {
2664 			lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
2665 			    asoc->highest_tsn_inside_map + 1;
2666 		}
2667 		slide_end = lgap >> 3;
2668 		if (slide_end < slide_from) {
2669 #ifdef INVARIANTS
2670 			panic("impossible slide");
2671 #else
2672 			printf("impossible slide?\n");
2673 			return;
2674 #endif
2675 		}
2676 		if (slide_end > asoc->mapping_array_size) {
2677 #ifdef INVARIANTS
2678 			panic("would overrun buffer");
2679 #else
2680 			printf("Gak, would have overrun map end:%d slide_end:%d\n",
2681 			    asoc->mapping_array_size, slide_end);
2682 			slide_end = asoc->mapping_array_size;
2683 #endif
2684 		}
2685 		distance = (slide_end - slide_from) + 1;
2686 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2687 			sctp_log_map(old_base, old_cumack, old_highest,
2688 			    SCTP_MAP_PREPARE_SLIDE);
2689 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2690 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2691 		}
2692 		if (distance + slide_from > asoc->mapping_array_size ||
2693 		    distance < 0) {
2694 			/*
2695 			 * Here we do NOT slide forward the array so that
2696 			 * hopefully when more data comes in to fill it up
2697 			 * we will be able to slide it forward. Really I
2698 			 * don't think this should happen :-0
2699 			 */
2700 
2701 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2702 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2703 				    (uint32_t) asoc->mapping_array_size,
2704 				    SCTP_MAP_SLIDE_NONE);
2705 			}
2706 		} else {
2707 			int ii;
2708 
2709 			for (ii = 0; ii < distance; ii++) {
2710 				asoc->mapping_array[ii] =
2711 				    asoc->mapping_array[slide_from + ii];
2712 			}
2713 			for (ii = distance; ii <= slide_end; ii++) {
2714 				asoc->mapping_array[ii] = 0;
2715 			}
2716 			asoc->mapping_array_base_tsn += (slide_from << 3);
2717 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2718 				sctp_log_map(asoc->mapping_array_base_tsn,
2719 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2720 				    SCTP_MAP_SLIDE_RESULT);
2721 			}
2722 			/*
2723 			 * EY if doing nr_sacks then slide the
2724 			 * nr_mapping_array accordingly please
2725 			 */
2726 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2727 				for (ii = 0; ii < distance; ii++) {
2728 					asoc->nr_mapping_array[ii] =
2729 					    asoc->nr_mapping_array[slide_from + ii];
2730 				}
2731 				for (ii = distance; ii <= slide_end; ii++) {
2732 					asoc->nr_mapping_array[ii] = 0;
2733 				}
2734 				asoc->nr_mapping_array_base_tsn += (slide_from << 3);
2735 			}
2736 		}
2737 	}
2738 	/*
2739 	 * Now we need to see if we need to queue a sack or just start the
2740 	 * timer (if allowed).
2741 	 */
2742 	if (ok_to_sack) {
2743 		if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2744 			/*
2745 			 * Ok special case, in SHUTDOWN-SENT case. here we
2746 			 * maker sure SACK timer is off and instead send a
2747 			 * SHUTDOWN and a SACK
2748 			 */
2749 			if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2750 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2751 				    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2752 			}
2753 			sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2754 			/*
2755 			 * EY if nr_sacks used then send an nr-sack , a sack
2756 			 * otherwise
2757 			 */
2758 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
2759 				sctp_send_nr_sack(stcb);
2760 			else
2761 				sctp_send_sack(stcb);
2762 		} else {
2763 			int is_a_gap;
2764 
2765 			/* is there a gap now ? */
2766 			is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2767 			    stcb->asoc.cumulative_tsn, MAX_TSN);
2768 
2769 			/*
2770 			 * CMT DAC algorithm: increase number of packets
2771 			 * received since last ack
2772 			 */
2773 			stcb->asoc.cmt_dac_pkts_rcvd++;
2774 
2775 			if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2776 								 * SACK */
2777 			    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2778 								 * longer is one */
2779 			    (stcb->asoc.numduptsns) ||	/* we have dup's */
2780 			    (is_a_gap) ||	/* is still a gap */
2781 			    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2782 			    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2783 			    ) {
2784 
2785 				if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off)) &&
2786 				    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2787 				    (stcb->asoc.send_sack == 0) &&
2788 				    (stcb->asoc.numduptsns == 0) &&
2789 				    (stcb->asoc.delayed_ack) &&
2790 				    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2791 
2792 					/*
2793 					 * CMT DAC algorithm: With CMT,
2794 					 * delay acks even in the face of
2795 					 *
2796 					 * reordering. Therefore, if acks that
2797 					 * do not have to be sent because of
2798 					 * the above reasons, will be
2799 					 * delayed. That is, acks that would
2800 					 * have been sent due to gap reports
2801 					 * will be delayed with DAC. Start
2802 					 * the delayed ack timer.
2803 					 */
2804 					sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2805 					    stcb->sctp_ep, stcb, NULL);
2806 				} else {
2807 					/*
2808 					 * Ok we must build a SACK since the
2809 					 * timer is pending, we got our
2810 					 * first packet OR there are gaps or
2811 					 * duplicates.
2812 					 */
2813 					(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2814 					/*
2815 					 * EY if nr_sacks used then send an
2816 					 * nr-sack , a sack otherwise
2817 					 */
2818 					if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
2819 						sctp_send_nr_sack(stcb);
2820 					else
2821 						sctp_send_sack(stcb);
2822 				}
2823 			} else {
2824 				if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2825 					sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2826 					    stcb->sctp_ep, stcb, NULL);
2827 				}
2828 			}
2829 		}
2830 	}
2831 }
2832 
2833 void
2834 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2835 {
2836 	struct sctp_tmit_chunk *chk;
2837 	uint32_t tsize, pd_point;
2838 	uint16_t nxt_todel;
2839 
2840 	if (asoc->fragmented_delivery_inprogress) {
2841 		sctp_service_reassembly(stcb, asoc);
2842 	}
2843 	/* Can we proceed further, i.e. the PD-API is complete */
2844 	if (asoc->fragmented_delivery_inprogress) {
2845 		/* no */
2846 		return;
2847 	}
2848 	/*
2849 	 * Now is there some other chunk I can deliver from the reassembly
2850 	 * queue.
2851 	 */
2852 doit_again:
2853 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2854 	if (chk == NULL) {
2855 		asoc->size_on_reasm_queue = 0;
2856 		asoc->cnt_on_reasm_queue = 0;
2857 		return;
2858 	}
2859 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2860 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2861 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2862 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2863 		/*
2864 		 * Yep the first one is here. We setup to start reception,
2865 		 * by backing down the TSN just in case we can't deliver.
2866 		 */
2867 
2868 		/*
2869 		 * Before we start though either all of the message should
2870 		 * be here or the socket buffer max or nothing on the
2871 		 * delivery queue and something can be delivered.
2872 		 */
2873 		if (stcb->sctp_socket) {
2874 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2875 			    stcb->sctp_ep->partial_delivery_point);
2876 		} else {
2877 			pd_point = stcb->sctp_ep->partial_delivery_point;
2878 		}
2879 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2880 			asoc->fragmented_delivery_inprogress = 1;
2881 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2882 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2883 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2884 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2885 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2886 			sctp_service_reassembly(stcb, asoc);
2887 			if (asoc->fragmented_delivery_inprogress == 0) {
2888 				goto doit_again;
2889 			}
2890 		}
2891 	}
2892 }
2893 
2894 int
2895 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2896     struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2897     struct sctp_nets *net, uint32_t * high_tsn)
2898 {
2899 	struct sctp_data_chunk *ch, chunk_buf;
2900 	struct sctp_association *asoc;
2901 	int num_chunks = 0;	/* number of control chunks processed */
2902 	int stop_proc = 0;
2903 	int chk_length, break_flag, last_chunk;
2904 	int abort_flag = 0, was_a_gap = 0;
2905 	struct mbuf *m;
2906 
2907 	/* set the rwnd */
2908 	sctp_set_rwnd(stcb, &stcb->asoc);
2909 
2910 	m = *mm;
2911 	SCTP_TCB_LOCK_ASSERT(stcb);
2912 	asoc = &stcb->asoc;
2913 	if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2914 	    stcb->asoc.cumulative_tsn, MAX_TSN)) {
2915 		/* there was a gap before this data was processed */
2916 		was_a_gap = 1;
2917 	}
2918 	/*
2919 	 * setup where we got the last DATA packet from for any SACK that
2920 	 * may need to go out. Don't bump the net. This is done ONLY when a
2921 	 * chunk is assigned.
2922 	 */
2923 	asoc->last_data_chunk_from = net;
2924 
2925 	/*-
2926 	 * Now before we proceed we must figure out if this is a wasted
2927 	 * cluster... i.e. it is a small packet sent in and yet the driver
2928 	 * underneath allocated a full cluster for it. If so we must copy it
2929 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2930 	 * with cluster starvation. Note for __Panda__ we don't do this
2931 	 * since it has clusters all the way down to 64 bytes.
2932 	 */
2933 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2934 		/* we only handle mbufs that are singletons.. not chains */
2935 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2936 		if (m) {
2937 			/* ok lets see if we can copy the data up */
2938 			caddr_t *from, *to;
2939 
2940 			/* get the pointers and copy */
2941 			to = mtod(m, caddr_t *);
2942 			from = mtod((*mm), caddr_t *);
2943 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2944 			/* copy the length and free up the old */
2945 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2946 			sctp_m_freem(*mm);
2947 			/* sucess, back copy */
2948 			*mm = m;
2949 		} else {
2950 			/* We are in trouble in the mbuf world .. yikes */
2951 			m = *mm;
2952 		}
2953 	}
2954 	/* get pointer to the first chunk header */
2955 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2956 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2957 	if (ch == NULL) {
2958 		return (1);
2959 	}
2960 	/*
2961 	 * process all DATA chunks...
2962 	 */
2963 	*high_tsn = asoc->cumulative_tsn;
2964 	break_flag = 0;
2965 	asoc->data_pkts_seen++;
2966 	while (stop_proc == 0) {
2967 		/* validate chunk length */
2968 		chk_length = ntohs(ch->ch.chunk_length);
2969 		if (length - *offset < chk_length) {
2970 			/* all done, mutulated chunk */
2971 			stop_proc = 1;
2972 			break;
2973 		}
2974 		if (ch->ch.chunk_type == SCTP_DATA) {
2975 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2976 				/*
2977 				 * Need to send an abort since we had a
2978 				 * invalid data chunk.
2979 				 */
2980 				struct mbuf *op_err;
2981 
2982 				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2983 				    0, M_DONTWAIT, 1, MT_DATA);
2984 
2985 				if (op_err) {
2986 					struct sctp_paramhdr *ph;
2987 					uint32_t *ippp;
2988 
2989 					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2990 					    (2 * sizeof(uint32_t));
2991 					ph = mtod(op_err, struct sctp_paramhdr *);
2992 					ph->param_type =
2993 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2994 					ph->param_length = htons(SCTP_BUF_LEN(op_err));
2995 					ippp = (uint32_t *) (ph + 1);
2996 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2997 					ippp++;
2998 					*ippp = asoc->cumulative_tsn;
2999 
3000 				}
3001 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
3002 				sctp_abort_association(inp, stcb, m, iphlen, sh,
3003 				    op_err, 0, net->port);
3004 				return (2);
3005 			}
3006 #ifdef SCTP_AUDITING_ENABLED
3007 			sctp_audit_log(0xB1, 0);
3008 #endif
3009 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
3010 				last_chunk = 1;
3011 			} else {
3012 				last_chunk = 0;
3013 			}
3014 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
3015 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
3016 			    last_chunk)) {
3017 				num_chunks++;
3018 			}
3019 			if (abort_flag)
3020 				return (2);
3021 
3022 			if (break_flag) {
3023 				/*
3024 				 * Set because of out of rwnd space and no
3025 				 * drop rep space left.
3026 				 */
3027 				stop_proc = 1;
3028 				break;
3029 			}
3030 		} else {
3031 			/* not a data chunk in the data region */
3032 			switch (ch->ch.chunk_type) {
3033 			case SCTP_INITIATION:
3034 			case SCTP_INITIATION_ACK:
3035 			case SCTP_SELECTIVE_ACK:
3036 			case SCTP_NR_SELECTIVE_ACK:	/* EY */
3037 			case SCTP_HEARTBEAT_REQUEST:
3038 			case SCTP_HEARTBEAT_ACK:
3039 			case SCTP_ABORT_ASSOCIATION:
3040 			case SCTP_SHUTDOWN:
3041 			case SCTP_SHUTDOWN_ACK:
3042 			case SCTP_OPERATION_ERROR:
3043 			case SCTP_COOKIE_ECHO:
3044 			case SCTP_COOKIE_ACK:
3045 			case SCTP_ECN_ECHO:
3046 			case SCTP_ECN_CWR:
3047 			case SCTP_SHUTDOWN_COMPLETE:
3048 			case SCTP_AUTHENTICATION:
3049 			case SCTP_ASCONF_ACK:
3050 			case SCTP_PACKET_DROPPED:
3051 			case SCTP_STREAM_RESET:
3052 			case SCTP_FORWARD_CUM_TSN:
3053 			case SCTP_ASCONF:
3054 				/*
3055 				 * Now, what do we do with KNOWN chunks that
3056 				 * are NOT in the right place?
3057 				 *
3058 				 * For now, I do nothing but ignore them. We
3059 				 * may later want to add sysctl stuff to
3060 				 * switch out and do either an ABORT() or
3061 				 * possibly process them.
3062 				 */
3063 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
3064 					struct mbuf *op_err;
3065 
3066 					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
3067 					sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
3068 					return (2);
3069 				}
3070 				break;
3071 			default:
3072 				/* unknown chunk type, use bit rules */
3073 				if (ch->ch.chunk_type & 0x40) {
3074 					/* Add a error report to the queue */
3075 					struct mbuf *merr;
3076 					struct sctp_paramhdr *phd;
3077 
3078 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
3079 					if (merr) {
3080 						phd = mtod(merr, struct sctp_paramhdr *);
3081 						/*
3082 						 * We cheat and use param
3083 						 * type since we did not
3084 						 * bother to define a error
3085 						 * cause struct. They are
3086 						 * the same basic format
3087 						 * with different names.
3088 						 */
3089 						phd->param_type =
3090 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
3091 						phd->param_length =
3092 						    htons(chk_length + sizeof(*phd));
3093 						SCTP_BUF_LEN(merr) = sizeof(*phd);
3094 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
3095 						    SCTP_SIZE32(chk_length),
3096 						    M_DONTWAIT);
3097 						if (SCTP_BUF_NEXT(merr)) {
3098 							sctp_queue_op_err(stcb, merr);
3099 						} else {
3100 							sctp_m_freem(merr);
3101 						}
3102 					}
3103 				}
3104 				if ((ch->ch.chunk_type & 0x80) == 0) {
3105 					/* discard the rest of this packet */
3106 					stop_proc = 1;
3107 				}	/* else skip this bad chunk and
3108 					 * continue... */
3109 				break;
3110 			};	/* switch of chunk type */
3111 		}
3112 		*offset += SCTP_SIZE32(chk_length);
3113 		if ((*offset >= length) || stop_proc) {
3114 			/* no more data left in the mbuf chain */
3115 			stop_proc = 1;
3116 			continue;
3117 		}
3118 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
3119 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
3120 		if (ch == NULL) {
3121 			*offset = length;
3122 			stop_proc = 1;
3123 			break;
3124 
3125 		}
3126 	}			/* while */
3127 	if (break_flag) {
3128 		/*
3129 		 * we need to report rwnd overrun drops.
3130 		 */
3131 		sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
3132 	}
3133 	if (num_chunks) {
3134 		/*
3135 		 * Did we get data, if so update the time for auto-close and
3136 		 * give peer credit for being alive.
3137 		 */
3138 		SCTP_STAT_INCR(sctps_recvpktwithdata);
3139 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3140 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3141 			    stcb->asoc.overall_error_count,
3142 			    0,
3143 			    SCTP_FROM_SCTP_INDATA,
3144 			    __LINE__);
3145 		}
3146 		stcb->asoc.overall_error_count = 0;
3147 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
3148 	}
3149 	/* now service all of the reassm queue if needed */
3150 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
3151 		sctp_service_queues(stcb, asoc);
3152 
3153 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
3154 		/* Assure that we ack right away */
3155 		stcb->asoc.send_sack = 1;
3156 	}
3157 	/* Start a sack timer or QUEUE a SACK for sending */
3158 	if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) &&
3159 	    (stcb->asoc.mapping_array[0] != 0xff)) {
3160 		if ((stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) ||
3161 		    (stcb->asoc.delayed_ack == 0) ||
3162 		    (stcb->asoc.numduptsns) ||
3163 		    (stcb->asoc.send_sack == 1)) {
3164 			if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
3165 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
3166 			}
3167 			/*
3168 			 * EY if nr_sacks used then send an nr-sack , a sack
3169 			 * otherwise
3170 			 */
3171 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
3172 				sctp_send_nr_sack(stcb);
3173 			else
3174 				sctp_send_sack(stcb);
3175 		} else {
3176 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
3177 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
3178 				    stcb->sctp_ep, stcb, NULL);
3179 			}
3180 		}
3181 	} else {
3182 		sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
3183 	}
3184 	if (abort_flag)
3185 		return (2);
3186 
3187 	return (0);
3188 }
3189 
3190 static int
3191 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
3192     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
3193     int *num_frs,
3194     uint32_t * biggest_newly_acked_tsn,
3195     uint32_t * this_sack_lowest_newack,
3196     int *ecn_seg_sums)
3197 {
3198 	struct sctp_tmit_chunk *tp1;
3199 	unsigned int theTSN;
3200 	int j, wake_him = 0;
3201 
3202 	/* Recover the tp1 we last saw */
3203 	tp1 = *p_tp1;
3204 	if (tp1 == NULL) {
3205 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3206 	}
3207 	for (j = frag_strt; j <= frag_end; j++) {
3208 		theTSN = j + last_tsn;
3209 		while (tp1) {
3210 			if (tp1->rec.data.doing_fast_retransmit)
3211 				(*num_frs) += 1;
3212 
3213 			/*-
3214 			 * CMT: CUCv2 algorithm. For each TSN being
3215 			 * processed from the sent queue, track the
3216 			 * next expected pseudo-cumack, or
3217 			 * rtx_pseudo_cumack, if required. Separate
3218 			 * cumack trackers for first transmissions,
3219 			 * and retransmissions.
3220 			 */
3221 			if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
3222 			    (tp1->snd_count == 1)) {
3223 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
3224 				tp1->whoTo->find_pseudo_cumack = 0;
3225 			}
3226 			if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
3227 			    (tp1->snd_count > 1)) {
3228 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
3229 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
3230 			}
3231 			if (tp1->rec.data.TSN_seq == theTSN) {
3232 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3233 					/*-
3234 					 * must be held until
3235 					 * cum-ack passes
3236 					 */
3237 					/*-
3238 					 * ECN Nonce: Add the nonce
3239 					 * value to the sender's
3240 					 * nonce sum
3241 					 */
3242 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3243 						/*-
3244 						 * If it is less than RESEND, it is
3245 						 * now no-longer in flight.
3246 						 * Higher values may already be set
3247 						 * via previous Gap Ack Blocks...
3248 						 * i.e. ACKED or RESEND.
3249 						 */
3250 						if (compare_with_wrap(tp1->rec.data.TSN_seq,
3251 						    *biggest_newly_acked_tsn, MAX_TSN)) {
3252 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
3253 						}
3254 						/*-
3255 						 * CMT: SFR algo (and HTNA) - set
3256 						 * saw_newack to 1 for dest being
3257 						 * newly acked. update
3258 						 * this_sack_highest_newack if
3259 						 * appropriate.
3260 						 */
3261 						if (tp1->rec.data.chunk_was_revoked == 0)
3262 							tp1->whoTo->saw_newack = 1;
3263 
3264 						if (compare_with_wrap(tp1->rec.data.TSN_seq,
3265 						    tp1->whoTo->this_sack_highest_newack,
3266 						    MAX_TSN)) {
3267 							tp1->whoTo->this_sack_highest_newack =
3268 							    tp1->rec.data.TSN_seq;
3269 						}
3270 						/*-
3271 						 * CMT DAC algo: also update
3272 						 * this_sack_lowest_newack
3273 						 */
3274 						if (*this_sack_lowest_newack == 0) {
3275 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3276 								sctp_log_sack(*this_sack_lowest_newack,
3277 								    last_tsn,
3278 								    tp1->rec.data.TSN_seq,
3279 								    0,
3280 								    0,
3281 								    SCTP_LOG_TSN_ACKED);
3282 							}
3283 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
3284 						}
3285 						/*-
3286 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3287 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3288 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3289 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3290 						 * Separate pseudo_cumack trackers for first transmissions and
3291 						 * retransmissions.
3292 						 */
3293 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
3294 							if (tp1->rec.data.chunk_was_revoked == 0) {
3295 								tp1->whoTo->new_pseudo_cumack = 1;
3296 							}
3297 							tp1->whoTo->find_pseudo_cumack = 1;
3298 						}
3299 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3300 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3301 						}
3302 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
3303 							if (tp1->rec.data.chunk_was_revoked == 0) {
3304 								tp1->whoTo->new_pseudo_cumack = 1;
3305 							}
3306 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3307 						}
3308 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3309 							sctp_log_sack(*biggest_newly_acked_tsn,
3310 							    last_tsn,
3311 							    tp1->rec.data.TSN_seq,
3312 							    frag_strt,
3313 							    frag_end,
3314 							    SCTP_LOG_TSN_ACKED);
3315 						}
3316 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3317 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3318 							    tp1->whoTo->flight_size,
3319 							    tp1->book_size,
3320 							    (uintptr_t) tp1->whoTo,
3321 							    tp1->rec.data.TSN_seq);
3322 						}
3323 						sctp_flight_size_decrease(tp1);
3324 						sctp_total_flight_decrease(stcb, tp1);
3325 
3326 						tp1->whoTo->net_ack += tp1->send_size;
3327 						if (tp1->snd_count < 2) {
3328 							/*-
3329 							 * True non-retransmited chunk
3330 							 */
3331 							tp1->whoTo->net_ack2 += tp1->send_size;
3332 
3333 							/*-
3334 							 * update RTO too ?
3335 							 */
3336 							if (tp1->do_rtt) {
3337 								tp1->whoTo->RTO =
3338 								    sctp_calculate_rto(stcb,
3339 								    &stcb->asoc,
3340 								    tp1->whoTo,
3341 								    &tp1->sent_rcv_time,
3342 								    sctp_align_safe_nocopy);
3343 								tp1->do_rtt = 0;
3344 							}
3345 						}
3346 					}
3347 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3348 						(*ecn_seg_sums) += tp1->rec.data.ect_nonce;
3349 						(*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
3350 						if (compare_with_wrap(tp1->rec.data.TSN_seq,
3351 						    stcb->asoc.this_sack_highest_gap,
3352 						    MAX_TSN)) {
3353 							stcb->asoc.this_sack_highest_gap =
3354 							    tp1->rec.data.TSN_seq;
3355 						}
3356 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3357 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3358 #ifdef SCTP_AUDITING_ENABLED
3359 							sctp_audit_log(0xB2,
3360 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3361 #endif
3362 						}
3363 					}
3364 					/*-
3365 					 * All chunks NOT UNSENT fall through here and are marked
3366 					 * (leave PR-SCTP ones that are to skip alone though)
3367 					 */
3368 					if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
3369 						tp1->sent = SCTP_DATAGRAM_MARKED;
3370 
3371 					if (tp1->rec.data.chunk_was_revoked) {
3372 						/* deflate the cwnd */
3373 						tp1->whoTo->cwnd -= tp1->book_size;
3374 						tp1->rec.data.chunk_was_revoked = 0;
3375 					}
3376 					/* NR Sack code here */
3377 					if (nr_sacking) {
3378 						if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
3379 							tp1->sent = SCTP_DATAGRAM_NR_MARKED;
3380 						/*
3381 						 * TAILQ_REMOVE(&asoc->sent_q
3382 						 * ueue, tp1, sctp_next);
3383 						 */
3384 						if (tp1->data) {
3385 							/*
3386 							 * sa_ignore
3387 							 * NO_NULL_CHK
3388 							 */
3389 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3390 							sctp_m_freem(tp1->data);
3391 						}
3392 						tp1->data = NULL;
3393 						/* asoc->sent_queue_cnt--; */
3394 						/*
3395 						 * sctp_free_a_chunk(stcb,
3396 						 * tp1);
3397 						 */
3398 						wake_him++;
3399 					}
3400 				}
3401 				break;
3402 			}	/* if (tp1->TSN_seq == theTSN) */
3403 			if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
3404 			    MAX_TSN))
3405 				break;
3406 
3407 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3408 		}		/* end while (tp1) */
3409 		/* In case the fragments were not in order we must reset */
3410 		if (tp1 == NULL) {
3411 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3412 		}
3413 	}			/* end for (j = fragStart */
3414 	*p_tp1 = tp1;
3415 	return (wake_him);	/* Return value only used for nr-sack */
3416 }
3417 
3418 
3419 static void
3420 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3421     struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3422     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3423     int num_seg, int *ecn_seg_sums)
3424 {
3425 	/************************************************/
3426 	/* process fragments and update sendqueue        */
3427 	/************************************************/
3428 	struct sctp_sack *sack;
3429 	struct sctp_gap_ack_block *frag, block;
3430 	struct sctp_tmit_chunk *tp1;
3431 	int i;
3432 	int num_frs = 0;
3433 
3434 	uint16_t frag_strt, frag_end, primary_flag_set;
3435 	u_long last_frag_high;
3436 
3437 	/*
3438 	 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
3439 	 */
3440 	if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3441 		primary_flag_set = 1;
3442 	} else {
3443 		primary_flag_set = 0;
3444 	}
3445 	sack = &ch->sack;
3446 
3447 	frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3448 	    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3449 	*offset += sizeof(block);
3450 	if (frag == NULL) {
3451 		return;
3452 	}
3453 	tp1 = NULL;
3454 	last_frag_high = 0;
3455 	for (i = 0; i < num_seg; i++) {
3456 		frag_strt = ntohs(frag->start);
3457 		frag_end = ntohs(frag->end);
3458 		/* some sanity checks on the fragment offsets */
3459 		if (frag_strt > frag_end) {
3460 			/* this one is malformed, skip */
3461 			frag++;
3462 			continue;
3463 		}
3464 		if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
3465 		    MAX_TSN))
3466 			*biggest_tsn_acked = frag_end + last_tsn;
3467 
3468 		/* mark acked dgs and find out the highestTSN being acked */
3469 		if (tp1 == NULL) {
3470 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3471 
3472 			/* save the locations of the last frags */
3473 			last_frag_high = frag_end + last_tsn;
3474 		} else {
3475 			/*
3476 			 * now lets see if we need to reset the queue due to
3477 			 * a out-of-order SACK fragment
3478 			 */
3479 			if (compare_with_wrap(frag_strt + last_tsn,
3480 			    last_frag_high, MAX_TSN)) {
3481 				/*
3482 				 * if the new frag starts after the last TSN
3483 				 * frag covered, we are ok and this one is
3484 				 * beyond the last one
3485 				 */
3486 				;
3487 			} else {
3488 				/*
3489 				 * ok, they have reset us, so we need to
3490 				 * reset the queue this will cause extra
3491 				 * hunting but hey, they chose the
3492 				 * performance hit when they failed to order
3493 				 * their gaps
3494 				 */
3495 				tp1 = TAILQ_FIRST(&asoc->sent_queue);
3496 			}
3497 			last_frag_high = frag_end + last_tsn;
3498 		}
3499 		sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3500 		    0, &num_frs, biggest_newly_acked_tsn,
3501 		    this_sack_lowest_newack, ecn_seg_sums);
3502 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3503 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3504 		*offset += sizeof(block);
3505 		if (frag == NULL) {
3506 			break;
3507 		}
3508 	}
3509 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3510 		if (num_frs)
3511 			sctp_log_fr(*biggest_tsn_acked,
3512 			    *biggest_newly_acked_tsn,
3513 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3514 	}
3515 }
3516 
3517 static void
3518 sctp_check_for_revoked(struct sctp_tcb *stcb,
3519     struct sctp_association *asoc, uint32_t cumack,
3520     u_long biggest_tsn_acked)
3521 {
3522 	struct sctp_tmit_chunk *tp1;
3523 	int tot_revoked = 0;
3524 
3525 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3526 	while (tp1) {
3527 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3528 		    MAX_TSN)) {
3529 			/*
3530 			 * ok this guy is either ACK or MARKED. If it is
3531 			 * ACKED it has been previously acked but not this
3532 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3533 			 * again.
3534 			 */
3535 			if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3536 			    MAX_TSN))
3537 				break;
3538 
3539 
3540 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3541 				/* it has been revoked */
3542 				tp1->sent = SCTP_DATAGRAM_SENT;
3543 				tp1->rec.data.chunk_was_revoked = 1;
3544 				/*
3545 				 * We must add this stuff back in to assure
3546 				 * timers and such get started.
3547 				 */
3548 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3549 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3550 					    tp1->whoTo->flight_size,
3551 					    tp1->book_size,
3552 					    (uintptr_t) tp1->whoTo,
3553 					    tp1->rec.data.TSN_seq);
3554 				}
3555 				sctp_flight_size_increase(tp1);
3556 				sctp_total_flight_increase(stcb, tp1);
3557 				/*
3558 				 * We inflate the cwnd to compensate for our
3559 				 * artificial inflation of the flight_size.
3560 				 */
3561 				tp1->whoTo->cwnd += tp1->book_size;
3562 				tot_revoked++;
3563 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3564 					sctp_log_sack(asoc->last_acked_seq,
3565 					    cumack,
3566 					    tp1->rec.data.TSN_seq,
3567 					    0,
3568 					    0,
3569 					    SCTP_LOG_TSN_REVOKED);
3570 				}
3571 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3572 				/* it has been re-acked in this SACK */
3573 				tp1->sent = SCTP_DATAGRAM_ACKED;
3574 			}
3575 		}
3576 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3577 			break;
3578 		tp1 = TAILQ_NEXT(tp1, sctp_next);
3579 	}
3580 	if (tot_revoked > 0) {
3581 		/*
3582 		 * Setup the ecn nonce re-sync point. We do this since once
3583 		 * data is revoked we begin to retransmit things, which do
3584 		 * NOT have the ECN bits set. This means we are now out of
3585 		 * sync and must wait until we get back in sync with the
3586 		 * peer to check ECN bits.
3587 		 */
3588 		tp1 = TAILQ_FIRST(&asoc->send_queue);
3589 		if (tp1 == NULL) {
3590 			asoc->nonce_resync_tsn = asoc->sending_seq;
3591 		} else {
3592 			asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3593 		}
3594 		asoc->nonce_wait_for_ecne = 0;
3595 		asoc->nonce_sum_check = 0;
3596 	}
3597 }
3598 
3599 
3600 static void
3601 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3602     u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved)
3603 {
3604 	struct sctp_tmit_chunk *tp1;
3605 	int strike_flag = 0;
3606 	struct timeval now;
3607 	int tot_retrans = 0;
3608 	uint32_t sending_seq;
3609 	struct sctp_nets *net;
3610 	int num_dests_sacked = 0;
3611 
3612 	/*
3613 	 * select the sending_seq, this is either the next thing ready to be
3614 	 * sent but not transmitted, OR, the next seq we assign.
3615 	 */
3616 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3617 	if (tp1 == NULL) {
3618 		sending_seq = asoc->sending_seq;
3619 	} else {
3620 		sending_seq = tp1->rec.data.TSN_seq;
3621 	}
3622 
3623 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3624 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3625 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3626 			if (net->saw_newack)
3627 				num_dests_sacked++;
3628 		}
3629 	}
3630 	if (stcb->asoc.peer_supports_prsctp) {
3631 		(void)SCTP_GETTIME_TIMEVAL(&now);
3632 	}
3633 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3634 	while (tp1) {
3635 		strike_flag = 0;
3636 		if (tp1->no_fr_allowed) {
3637 			/* this one had a timeout or something */
3638 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3639 			continue;
3640 		}
3641 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3642 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3643 				sctp_log_fr(biggest_tsn_newly_acked,
3644 				    tp1->rec.data.TSN_seq,
3645 				    tp1->sent,
3646 				    SCTP_FR_LOG_CHECK_STRIKE);
3647 		}
3648 		if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3649 		    MAX_TSN) ||
3650 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3651 			/* done */
3652 			break;
3653 		}
3654 		if (stcb->asoc.peer_supports_prsctp) {
3655 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3656 				/* Is it expired? */
3657 				if (
3658 				/*
3659 				 * TODO sctp_constants.h needs alternative
3660 				 * time macros when _KERNEL is undefined.
3661 				 */
3662 				    (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3663 				    ) {
3664 					/* Yes so drop it */
3665 					if (tp1->data != NULL) {
3666 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3667 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3668 						    SCTP_SO_NOT_LOCKED);
3669 					}
3670 					tp1 = TAILQ_NEXT(tp1, sctp_next);
3671 					continue;
3672 				}
3673 			}
3674 		}
3675 		if (compare_with_wrap(tp1->rec.data.TSN_seq,
3676 		    asoc->this_sack_highest_gap, MAX_TSN)) {
3677 			/* we are beyond the tsn in the sack  */
3678 			break;
3679 		}
3680 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3681 			/* either a RESEND, ACKED, or MARKED */
3682 			/* skip */
3683 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3684 			continue;
3685 		}
3686 		/*
3687 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3688 		 */
3689 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3690 			/*
3691 			 * No new acks were receieved for data sent to this
3692 			 * dest. Therefore, according to the SFR algo for
3693 			 * CMT, no data sent to this dest can be marked for
3694 			 * FR using this SACK.
3695 			 */
3696 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3697 			continue;
3698 		} else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq,
3699 		    tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3700 			/*
3701 			 * CMT: New acks were receieved for data sent to
3702 			 * this dest. But no new acks were seen for data
3703 			 * sent after tp1. Therefore, according to the SFR
3704 			 * algo for CMT, tp1 cannot be marked for FR using
3705 			 * this SACK. This step covers part of the DAC algo
3706 			 * and the HTNA algo as well.
3707 			 */
3708 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3709 			continue;
3710 		}
3711 		/*
3712 		 * Here we check to see if we were have already done a FR
3713 		 * and if so we see if the biggest TSN we saw in the sack is
3714 		 * smaller than the recovery point. If so we don't strike
3715 		 * the tsn... otherwise we CAN strike the TSN.
3716 		 */
3717 		/*
3718 		 * @@@ JRI: Check for CMT if (accum_moved &&
3719 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3720 		 * 0)) {
3721 		 */
3722 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3723 			/*
3724 			 * Strike the TSN if in fast-recovery and cum-ack
3725 			 * moved.
3726 			 */
3727 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3728 				sctp_log_fr(biggest_tsn_newly_acked,
3729 				    tp1->rec.data.TSN_seq,
3730 				    tp1->sent,
3731 				    SCTP_FR_LOG_STRIKE_CHUNK);
3732 			}
3733 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3734 				tp1->sent++;
3735 			}
3736 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3737 				/*
3738 				 * CMT DAC algorithm: If SACK flag is set to
3739 				 * 0, then lowest_newack test will not pass
3740 				 * because it would have been set to the
3741 				 * cumack earlier. If not already to be
3742 				 * rtx'd, If not a mixed sack and if tp1 is
3743 				 * not between two sacked TSNs, then mark by
3744 				 * one more. NOTE that we are marking by one
3745 				 * additional time since the SACK DAC flag
3746 				 * indicates that two packets have been
3747 				 * received after this missing TSN.
3748 				 */
3749 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3750 				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3751 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3752 						sctp_log_fr(16 + num_dests_sacked,
3753 						    tp1->rec.data.TSN_seq,
3754 						    tp1->sent,
3755 						    SCTP_FR_LOG_STRIKE_CHUNK);
3756 					}
3757 					tp1->sent++;
3758 				}
3759 			}
3760 		} else if ((tp1->rec.data.doing_fast_retransmit) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
3761 			/*
3762 			 * For those that have done a FR we must take
3763 			 * special consideration if we strike. I.e the
3764 			 * biggest_newly_acked must be higher than the
3765 			 * sending_seq at the time we did the FR.
3766 			 */
3767 			if (
3768 #ifdef SCTP_FR_TO_ALTERNATE
3769 			/*
3770 			 * If FR's go to new networks, then we must only do
3771 			 * this for singly homed asoc's. However if the FR's
3772 			 * go to the same network (Armando's work) then its
3773 			 * ok to FR multiple times.
3774 			 */
3775 			    (asoc->numnets < 2)
3776 #else
3777 			    (1)
3778 #endif
3779 			    ) {
3780 
3781 				if ((compare_with_wrap(biggest_tsn_newly_acked,
3782 				    tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3783 				    (biggest_tsn_newly_acked ==
3784 				    tp1->rec.data.fast_retran_tsn)) {
3785 					/*
3786 					 * Strike the TSN, since this ack is
3787 					 * beyond where things were when we
3788 					 * did a FR.
3789 					 */
3790 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3791 						sctp_log_fr(biggest_tsn_newly_acked,
3792 						    tp1->rec.data.TSN_seq,
3793 						    tp1->sent,
3794 						    SCTP_FR_LOG_STRIKE_CHUNK);
3795 					}
3796 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3797 						tp1->sent++;
3798 					}
3799 					strike_flag = 1;
3800 					if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3801 						/*
3802 						 * CMT DAC algorithm: If
3803 						 * SACK flag is set to 0,
3804 						 * then lowest_newack test
3805 						 * will not pass because it
3806 						 * would have been set to
3807 						 * the cumack earlier. If
3808 						 * not already to be rtx'd,
3809 						 * If not a mixed sack and
3810 						 * if tp1 is not between two
3811 						 * sacked TSNs, then mark by
3812 						 * one more. NOTE that we
3813 						 * are marking by one
3814 						 * additional time since the
3815 						 * SACK DAC flag indicates
3816 						 * that two packets have
3817 						 * been received after this
3818 						 * missing TSN.
3819 						 */
3820 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3821 						    (num_dests_sacked == 1) &&
3822 						    compare_with_wrap(this_sack_lowest_newack,
3823 						    tp1->rec.data.TSN_seq, MAX_TSN)) {
3824 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3825 								sctp_log_fr(32 + num_dests_sacked,
3826 								    tp1->rec.data.TSN_seq,
3827 								    tp1->sent,
3828 								    SCTP_FR_LOG_STRIKE_CHUNK);
3829 							}
3830 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3831 								tp1->sent++;
3832 							}
3833 						}
3834 					}
3835 				}
3836 			}
3837 			/*
3838 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3839 			 * algo covers HTNA.
3840 			 */
3841 		} else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3842 		    biggest_tsn_newly_acked, MAX_TSN)) {
3843 			/*
3844 			 * We don't strike these: This is the  HTNA
3845 			 * algorithm i.e. we don't strike If our TSN is
3846 			 * larger than the Highest TSN Newly Acked.
3847 			 */
3848 			;
3849 		} else {
3850 			/* Strike the TSN */
3851 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3852 				sctp_log_fr(biggest_tsn_newly_acked,
3853 				    tp1->rec.data.TSN_seq,
3854 				    tp1->sent,
3855 				    SCTP_FR_LOG_STRIKE_CHUNK);
3856 			}
3857 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3858 				tp1->sent++;
3859 			}
3860 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3861 				/*
3862 				 * CMT DAC algorithm: If SACK flag is set to
3863 				 * 0, then lowest_newack test will not pass
3864 				 * because it would have been set to the
3865 				 * cumack earlier. If not already to be
3866 				 * rtx'd, If not a mixed sack and if tp1 is
3867 				 * not between two sacked TSNs, then mark by
3868 				 * one more. NOTE that we are marking by one
3869 				 * additional time since the SACK DAC flag
3870 				 * indicates that two packets have been
3871 				 * received after this missing TSN.
3872 				 */
3873 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3874 				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3875 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3876 						sctp_log_fr(48 + num_dests_sacked,
3877 						    tp1->rec.data.TSN_seq,
3878 						    tp1->sent,
3879 						    SCTP_FR_LOG_STRIKE_CHUNK);
3880 					}
3881 					tp1->sent++;
3882 				}
3883 			}
3884 		}
3885 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3886 			struct sctp_nets *alt;
3887 
3888 			/* fix counts and things */
3889 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3890 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3891 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3892 				    tp1->book_size,
3893 				    (uintptr_t) tp1->whoTo,
3894 				    tp1->rec.data.TSN_seq);
3895 			}
3896 			if (tp1->whoTo) {
3897 				tp1->whoTo->net_ack++;
3898 				sctp_flight_size_decrease(tp1);
3899 			}
3900 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3901 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3902 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3903 			}
3904 			/* add back to the rwnd */
3905 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3906 
3907 			/* remove from the total flight */
3908 			sctp_total_flight_decrease(stcb, tp1);
3909 
3910 			if ((stcb->asoc.peer_supports_prsctp) &&
3911 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3912 				/*
3913 				 * Has it been retransmitted tv_sec times? -
3914 				 * we store the retran count there.
3915 				 */
3916 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3917 					/* Yes, so drop it */
3918 					if (tp1->data != NULL) {
3919 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3920 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3921 						    SCTP_SO_NOT_LOCKED);
3922 					}
3923 					/* Make sure to flag we had a FR */
3924 					tp1->whoTo->net_ack++;
3925 					tp1 = TAILQ_NEXT(tp1, sctp_next);
3926 					continue;
3927 				}
3928 			}
3929 			/* printf("OK, we are now ready to FR this guy\n"); */
3930 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3931 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3932 				    0, SCTP_FR_MARKED);
3933 			}
3934 			if (strike_flag) {
3935 				/* This is a subsequent FR */
3936 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3937 			}
3938 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3939 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
3940 				/*
3941 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3942 				 * If CMT is being used, then pick dest with
3943 				 * largest ssthresh for any retransmission.
3944 				 */
3945 				tp1->no_fr_allowed = 1;
3946 				alt = tp1->whoTo;
3947 				/* sa_ignore NO_NULL_CHK */
3948 				if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
3949 					/*
3950 					 * JRS 5/18/07 - If CMT PF is on,
3951 					 * use the PF version of
3952 					 * find_alt_net()
3953 					 */
3954 					alt = sctp_find_alternate_net(stcb, alt, 2);
3955 				} else {
3956 					/*
3957 					 * JRS 5/18/07 - If only CMT is on,
3958 					 * use the CMT version of
3959 					 * find_alt_net()
3960 					 */
3961 					/* sa_ignore NO_NULL_CHK */
3962 					alt = sctp_find_alternate_net(stcb, alt, 1);
3963 				}
3964 				if (alt == NULL) {
3965 					alt = tp1->whoTo;
3966 				}
3967 				/*
3968 				 * CUCv2: If a different dest is picked for
3969 				 * the retransmission, then new
3970 				 * (rtx-)pseudo_cumack needs to be tracked
3971 				 * for orig dest. Let CUCv2 track new (rtx-)
3972 				 * pseudo-cumack always.
3973 				 */
3974 				if (tp1->whoTo) {
3975 					tp1->whoTo->find_pseudo_cumack = 1;
3976 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3977 				}
3978 			} else {/* CMT is OFF */
3979 
3980 #ifdef SCTP_FR_TO_ALTERNATE
3981 				/* Can we find an alternate? */
3982 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3983 #else
3984 				/*
3985 				 * default behavior is to NOT retransmit
3986 				 * FR's to an alternate. Armando Caro's
3987 				 * paper details why.
3988 				 */
3989 				alt = tp1->whoTo;
3990 #endif
3991 			}
3992 
3993 			tp1->rec.data.doing_fast_retransmit = 1;
3994 			tot_retrans++;
3995 			/* mark the sending seq for possible subsequent FR's */
3996 			/*
3997 			 * printf("Marking TSN for FR new value %x\n",
3998 			 * (uint32_t)tpi->rec.data.TSN_seq);
3999 			 */
4000 			if (TAILQ_EMPTY(&asoc->send_queue)) {
4001 				/*
4002 				 * If the queue of send is empty then its
4003 				 * the next sequence number that will be
4004 				 * assigned so we subtract one from this to
4005 				 * get the one we last sent.
4006 				 */
4007 				tp1->rec.data.fast_retran_tsn = sending_seq;
4008 			} else {
4009 				/*
4010 				 * If there are chunks on the send queue
4011 				 * (unsent data that has made it from the
4012 				 * stream queues but not out the door, we
4013 				 * take the first one (which will have the
4014 				 * lowest TSN) and subtract one to get the
4015 				 * one we last sent.
4016 				 */
4017 				struct sctp_tmit_chunk *ttt;
4018 
4019 				ttt = TAILQ_FIRST(&asoc->send_queue);
4020 				tp1->rec.data.fast_retran_tsn =
4021 				    ttt->rec.data.TSN_seq;
4022 			}
4023 
4024 			if (tp1->do_rtt) {
4025 				/*
4026 				 * this guy had a RTO calculation pending on
4027 				 * it, cancel it
4028 				 */
4029 				tp1->do_rtt = 0;
4030 			}
4031 			if (alt != tp1->whoTo) {
4032 				/* yes, there is an alternate. */
4033 				sctp_free_remote_addr(tp1->whoTo);
4034 				/* sa_ignore FREED_MEMORY */
4035 				tp1->whoTo = alt;
4036 				atomic_add_int(&alt->ref_count, 1);
4037 			}
4038 		}
4039 		tp1 = TAILQ_NEXT(tp1, sctp_next);
4040 	}			/* while (tp1) */
4041 
4042 	if (tot_retrans > 0) {
4043 		/*
4044 		 * Setup the ecn nonce re-sync point. We do this since once
4045 		 * we go to FR something we introduce a Karn's rule scenario
4046 		 * and won't know the totals for the ECN bits.
4047 		 */
4048 		asoc->nonce_resync_tsn = sending_seq;
4049 		asoc->nonce_wait_for_ecne = 0;
4050 		asoc->nonce_sum_check = 0;
4051 	}
4052 }
4053 
4054 struct sctp_tmit_chunk *
4055 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
4056     struct sctp_association *asoc)
4057 {
4058 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
4059 	struct timeval now;
4060 	int now_filled = 0;
4061 
4062 	if (asoc->peer_supports_prsctp == 0) {
4063 		return (NULL);
4064 	}
4065 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4066 	while (tp1) {
4067 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
4068 		    tp1->sent != SCTP_DATAGRAM_RESEND) {
4069 			/* no chance to advance, out of here */
4070 			break;
4071 		}
4072 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
4073 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
4074 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
4075 				    asoc->advanced_peer_ack_point,
4076 				    tp1->rec.data.TSN_seq, 0, 0);
4077 			}
4078 		}
4079 		if (!PR_SCTP_ENABLED(tp1->flags)) {
4080 			/*
4081 			 * We can't fwd-tsn past any that are reliable aka
4082 			 * retransmitted until the asoc fails.
4083 			 */
4084 			break;
4085 		}
4086 		if (!now_filled) {
4087 			(void)SCTP_GETTIME_TIMEVAL(&now);
4088 			now_filled = 1;
4089 		}
4090 		tp2 = TAILQ_NEXT(tp1, sctp_next);
4091 		/*
4092 		 * now we got a chunk which is marked for another
4093 		 * retransmission to a PR-stream but has run out its chances
4094 		 * already maybe OR has been marked to skip now. Can we skip
4095 		 * it if its a resend?
4096 		 */
4097 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
4098 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
4099 			/*
4100 			 * Now is this one marked for resend and its time is
4101 			 * now up?
4102 			 */
4103 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
4104 				/* Yes so drop it */
4105 				if (tp1->data) {
4106 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
4107 					    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
4108 					    SCTP_SO_NOT_LOCKED);
4109 				}
4110 			} else {
4111 				/*
4112 				 * No, we are done when hit one for resend
4113 				 * whos time as not expired.
4114 				 */
4115 				break;
4116 			}
4117 		}
4118 		/*
4119 		 * Ok now if this chunk is marked to drop it we can clean up
4120 		 * the chunk, advance our peer ack point and we can check
4121 		 * the next chunk.
4122 		 */
4123 		if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
4124 			/* advance PeerAckPoint goes forward */
4125 			if (compare_with_wrap(tp1->rec.data.TSN_seq,
4126 			    asoc->advanced_peer_ack_point,
4127 			    MAX_TSN)) {
4128 
4129 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
4130 				a_adv = tp1;
4131 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
4132 				/* No update but we do save the chk */
4133 				a_adv = tp1;
4134 			}
4135 		} else {
4136 			/*
4137 			 * If it is still in RESEND we can advance no
4138 			 * further
4139 			 */
4140 			break;
4141 		}
4142 		/*
4143 		 * If we hit here we just dumped tp1, move to next tsn on
4144 		 * sent queue.
4145 		 */
4146 		tp1 = tp2;
4147 	}
4148 	return (a_adv);
4149 }
4150 
4151 static int
4152 sctp_fs_audit(struct sctp_association *asoc)
4153 {
4154 	struct sctp_tmit_chunk *chk;
4155 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
4156 	int entry_flight, entry_cnt, ret;
4157 
4158 	entry_flight = asoc->total_flight;
4159 	entry_cnt = asoc->total_flight_count;
4160 	ret = 0;
4161 
4162 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
4163 		return (0);
4164 
4165 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
4166 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
4167 			printf("Chk TSN:%u size:%d inflight cnt:%d\n",
4168 			    chk->rec.data.TSN_seq,
4169 			    chk->send_size,
4170 			    chk->snd_count
4171 			    );
4172 			inflight++;
4173 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
4174 			resend++;
4175 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
4176 			inbetween++;
4177 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
4178 			above++;
4179 		} else {
4180 			acked++;
4181 		}
4182 	}
4183 
4184 	if ((inflight > 0) || (inbetween > 0)) {
4185 #ifdef INVARIANTS
4186 		panic("Flight size-express incorrect? \n");
4187 #else
4188 		printf("asoc->total_flight:%d cnt:%d\n",
4189 		    entry_flight, entry_cnt);
4190 
4191 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
4192 		    inflight, inbetween, resend, above, acked);
4193 		ret = 1;
4194 #endif
4195 	}
4196 	return (ret);
4197 }
4198 
4199 
4200 static void
4201 sctp_window_probe_recovery(struct sctp_tcb *stcb,
4202     struct sctp_association *asoc,
4203     struct sctp_nets *net,
4204     struct sctp_tmit_chunk *tp1)
4205 {
4206 	tp1->window_probe = 0;
4207 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
4208 		/* TSN's skipped we do NOT move back. */
4209 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
4210 		    tp1->whoTo->flight_size,
4211 		    tp1->book_size,
4212 		    (uintptr_t) tp1->whoTo,
4213 		    tp1->rec.data.TSN_seq);
4214 		return;
4215 	}
4216 	/* First setup this by shrinking flight */
4217 	sctp_flight_size_decrease(tp1);
4218 	sctp_total_flight_decrease(stcb, tp1);
4219 	/* Now mark for resend */
4220 	tp1->sent = SCTP_DATAGRAM_RESEND;
4221 	asoc->sent_queue_retran_cnt++;
4222 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4223 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
4224 		    tp1->whoTo->flight_size,
4225 		    tp1->book_size,
4226 		    (uintptr_t) tp1->whoTo,
4227 		    tp1->rec.data.TSN_seq);
4228 	}
4229 }
4230 
4231 void
4232 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
4233     uint32_t rwnd, int nonce_sum_flag, int *abort_now)
4234 {
4235 	struct sctp_nets *net;
4236 	struct sctp_association *asoc;
4237 	struct sctp_tmit_chunk *tp1, *tp2;
4238 	uint32_t old_rwnd;
4239 	int win_probe_recovery = 0;
4240 	int win_probe_recovered = 0;
4241 	int j, done_once = 0;
4242 
4243 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4244 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
4245 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4246 	}
4247 	SCTP_TCB_LOCK_ASSERT(stcb);
4248 #ifdef SCTP_ASOCLOG_OF_TSNS
4249 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
4250 	stcb->asoc.cumack_log_at++;
4251 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4252 		stcb->asoc.cumack_log_at = 0;
4253 	}
4254 #endif
4255 	asoc = &stcb->asoc;
4256 	old_rwnd = asoc->peers_rwnd;
4257 	if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
4258 		/* old ack */
4259 		return;
4260 	} else if (asoc->last_acked_seq == cumack) {
4261 		/* Window update sack */
4262 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4263 		    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4264 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4265 			/* SWS sender side engages */
4266 			asoc->peers_rwnd = 0;
4267 		}
4268 		if (asoc->peers_rwnd > old_rwnd) {
4269 			goto again;
4270 		}
4271 		return;
4272 	}
4273 	/* First setup for CC stuff */
4274 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4275 		net->prev_cwnd = net->cwnd;
4276 		net->net_ack = 0;
4277 		net->net_ack2 = 0;
4278 
4279 		/*
4280 		 * CMT: Reset CUC and Fast recovery algo variables before
4281 		 * SACK processing
4282 		 */
4283 		net->new_pseudo_cumack = 0;
4284 		net->will_exit_fast_recovery = 0;
4285 	}
4286 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4287 		uint32_t send_s;
4288 
4289 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4290 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4291 			    sctpchunk_listhead);
4292 			send_s = tp1->rec.data.TSN_seq + 1;
4293 		} else {
4294 			send_s = asoc->sending_seq;
4295 		}
4296 		if ((cumack == send_s) ||
4297 		    compare_with_wrap(cumack, send_s, MAX_TSN)) {
4298 #ifndef INVARIANTS
4299 			struct mbuf *oper;
4300 
4301 #endif
4302 #ifdef INVARIANTS
4303 			panic("Impossible sack 1");
4304 #else
4305 			*abort_now = 1;
4306 			/* XXX */
4307 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4308 			    0, M_DONTWAIT, 1, MT_DATA);
4309 			if (oper) {
4310 				struct sctp_paramhdr *ph;
4311 				uint32_t *ippp;
4312 
4313 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4314 				    sizeof(uint32_t);
4315 				ph = mtod(oper, struct sctp_paramhdr *);
4316 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4317 				ph->param_length = htons(SCTP_BUF_LEN(oper));
4318 				ippp = (uint32_t *) (ph + 1);
4319 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4320 			}
4321 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4322 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4323 			return;
4324 #endif
4325 		}
4326 	}
4327 	asoc->this_sack_highest_gap = cumack;
4328 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4329 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4330 		    stcb->asoc.overall_error_count,
4331 		    0,
4332 		    SCTP_FROM_SCTP_INDATA,
4333 		    __LINE__);
4334 	}
4335 	stcb->asoc.overall_error_count = 0;
4336 	if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
4337 		/* process the new consecutive TSN first */
4338 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
4339 		while (tp1) {
4340 			tp2 = TAILQ_NEXT(tp1, sctp_next);
4341 			if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
4342 			    MAX_TSN) ||
4343 			    cumack == tp1->rec.data.TSN_seq) {
4344 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4345 					printf("Warning, an unsent is now acked?\n");
4346 				}
4347 				/*
4348 				 * ECN Nonce: Add the nonce to the sender's
4349 				 * nonce sum
4350 				 */
4351 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4352 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4353 					/*
4354 					 * If it is less than ACKED, it is
4355 					 * now no-longer in flight. Higher
4356 					 * values may occur during marking
4357 					 */
4358 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4359 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4360 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4361 							    tp1->whoTo->flight_size,
4362 							    tp1->book_size,
4363 							    (uintptr_t) tp1->whoTo,
4364 							    tp1->rec.data.TSN_seq);
4365 						}
4366 						sctp_flight_size_decrease(tp1);
4367 						/* sa_ignore NO_NULL_CHK */
4368 						sctp_total_flight_decrease(stcb, tp1);
4369 					}
4370 					tp1->whoTo->net_ack += tp1->send_size;
4371 					if (tp1->snd_count < 2) {
4372 						/*
4373 						 * True non-retransmited
4374 						 * chunk
4375 						 */
4376 						tp1->whoTo->net_ack2 +=
4377 						    tp1->send_size;
4378 
4379 						/* update RTO too? */
4380 						if (tp1->do_rtt) {
4381 							tp1->whoTo->RTO =
4382 							/*
4383 							 * sa_ignore
4384 							 * NO_NULL_CHK
4385 							 */
4386 							    sctp_calculate_rto(stcb,
4387 							    asoc, tp1->whoTo,
4388 							    &tp1->sent_rcv_time,
4389 							    sctp_align_safe_nocopy);
4390 							tp1->do_rtt = 0;
4391 						}
4392 					}
4393 					/*
4394 					 * CMT: CUCv2 algorithm. From the
4395 					 * cumack'd TSNs, for each TSN being
4396 					 * acked for the first time, set the
4397 					 * following variables for the
4398 					 * corresp destination.
4399 					 * new_pseudo_cumack will trigger a
4400 					 * cwnd update.
4401 					 * find_(rtx_)pseudo_cumack will
4402 					 * trigger search for the next
4403 					 * expected (rtx-)pseudo-cumack.
4404 					 */
4405 					tp1->whoTo->new_pseudo_cumack = 1;
4406 					tp1->whoTo->find_pseudo_cumack = 1;
4407 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4408 
4409 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4410 						/* sa_ignore NO_NULL_CHK */
4411 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4412 					}
4413 				}
4414 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4415 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4416 				}
4417 				if (tp1->rec.data.chunk_was_revoked) {
4418 					/* deflate the cwnd */
4419 					tp1->whoTo->cwnd -= tp1->book_size;
4420 					tp1->rec.data.chunk_was_revoked = 0;
4421 				}
4422 				tp1->sent = SCTP_DATAGRAM_ACKED;
4423 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4424 				if (tp1->data) {
4425 					/* sa_ignore NO_NULL_CHK */
4426 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4427 					sctp_m_freem(tp1->data);
4428 				}
4429 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4430 					sctp_log_sack(asoc->last_acked_seq,
4431 					    cumack,
4432 					    tp1->rec.data.TSN_seq,
4433 					    0,
4434 					    0,
4435 					    SCTP_LOG_FREE_SENT);
4436 				}
4437 				tp1->data = NULL;
4438 				asoc->sent_queue_cnt--;
4439 				sctp_free_a_chunk(stcb, tp1);
4440 				tp1 = tp2;
4441 			} else {
4442 				break;
4443 			}
4444 		}
4445 
4446 	}
4447 	/* sa_ignore NO_NULL_CHK */
4448 	if (stcb->sctp_socket) {
4449 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4450 		struct socket *so;
4451 
4452 #endif
4453 
4454 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4455 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4456 			/* sa_ignore NO_NULL_CHK */
4457 			sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4458 		}
4459 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4460 		so = SCTP_INP_SO(stcb->sctp_ep);
4461 		atomic_add_int(&stcb->asoc.refcnt, 1);
4462 		SCTP_TCB_UNLOCK(stcb);
4463 		SCTP_SOCKET_LOCK(so, 1);
4464 		SCTP_TCB_LOCK(stcb);
4465 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4466 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4467 			/* assoc was freed while we were unlocked */
4468 			SCTP_SOCKET_UNLOCK(so, 1);
4469 			return;
4470 		}
4471 #endif
4472 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4473 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4474 		SCTP_SOCKET_UNLOCK(so, 1);
4475 #endif
4476 	} else {
4477 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4478 			sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4479 		}
4480 	}
4481 
4482 	/* JRS - Use the congestion control given in the CC module */
4483 	if (asoc->last_acked_seq != cumack)
4484 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4485 
4486 	asoc->last_acked_seq = cumack;
4487 
4488 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4489 		/* nothing left in-flight */
4490 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4491 			net->flight_size = 0;
4492 			net->partial_bytes_acked = 0;
4493 		}
4494 		asoc->total_flight = 0;
4495 		asoc->total_flight_count = 0;
4496 	}
4497 	/* ECN Nonce updates */
4498 	if (asoc->ecn_nonce_allowed) {
4499 		if (asoc->nonce_sum_check) {
4500 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4501 				if (asoc->nonce_wait_for_ecne == 0) {
4502 					struct sctp_tmit_chunk *lchk;
4503 
4504 					lchk = TAILQ_FIRST(&asoc->send_queue);
4505 					asoc->nonce_wait_for_ecne = 1;
4506 					if (lchk) {
4507 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4508 					} else {
4509 						asoc->nonce_wait_tsn = asoc->sending_seq;
4510 					}
4511 				} else {
4512 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4513 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4514 						/*
4515 						 * Misbehaving peer. We need
4516 						 * to react to this guy
4517 						 */
4518 						asoc->ecn_allowed = 0;
4519 						asoc->ecn_nonce_allowed = 0;
4520 					}
4521 				}
4522 			}
4523 		} else {
4524 			/* See if Resynchronization Possible */
4525 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4526 				asoc->nonce_sum_check = 1;
4527 				/*
4528 				 * now we must calculate what the base is.
4529 				 * We do this based on two things, we know
4530 				 * the total's for all the segments
4531 				 * gap-acked in the SACK (none), We also
4532 				 * know the SACK's nonce sum, its in
4533 				 * nonce_sum_flag. So we can build a truth
4534 				 * table to back-calculate the new value of
4535 				 * asoc->nonce_sum_expect_base:
4536 				 *
4537 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
4538 				 * 1                    0 1 0 1 1 1
4539 				 * 1 0
4540 				 */
4541 				asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4542 			}
4543 		}
4544 	}
4545 	/* RWND update */
4546 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4547 	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4548 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4549 		/* SWS sender side engages */
4550 		asoc->peers_rwnd = 0;
4551 	}
4552 	if (asoc->peers_rwnd > old_rwnd) {
4553 		win_probe_recovery = 1;
4554 	}
4555 	/* Now assure a timer where data is queued at */
4556 again:
4557 	j = 0;
4558 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4559 		int to_ticks;
4560 
4561 		if (win_probe_recovery && (net->window_probe)) {
4562 			win_probe_recovered = 1;
4563 			/*
4564 			 * Find first chunk that was used with window probe
4565 			 * and clear the sent
4566 			 */
4567 			/* sa_ignore FREED_MEMORY */
4568 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4569 				if (tp1->window_probe) {
4570 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
4571 					break;
4572 				}
4573 			}
4574 		}
4575 		if (net->RTO == 0) {
4576 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4577 		} else {
4578 			to_ticks = MSEC_TO_TICKS(net->RTO);
4579 		}
4580 		if (net->flight_size) {
4581 			j++;
4582 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4583 			    sctp_timeout_handler, &net->rxt_timer);
4584 			if (net->window_probe) {
4585 				net->window_probe = 0;
4586 			}
4587 		} else {
4588 			if (net->window_probe) {
4589 				/*
4590 				 * In window probes we must assure a timer
4591 				 * is still running there
4592 				 */
4593 				net->window_probe = 0;
4594 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4595 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4596 					    sctp_timeout_handler, &net->rxt_timer);
4597 				}
4598 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4599 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4600 				    stcb, net,
4601 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4602 			}
4603 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4604 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4605 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4606 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4607 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4608 				}
4609 			}
4610 		}
4611 	}
4612 	if ((j == 0) &&
4613 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4614 	    (asoc->sent_queue_retran_cnt == 0) &&
4615 	    (win_probe_recovered == 0) &&
4616 	    (done_once == 0)) {
4617 		/*
4618 		 * huh, this should not happen unless all packets are
4619 		 * PR-SCTP and marked to skip of course.
4620 		 */
4621 		if (sctp_fs_audit(asoc)) {
4622 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4623 				if (net->flight_size) {
4624 					net->flight_size = 0;
4625 				}
4626 			}
4627 			asoc->total_flight = 0;
4628 			asoc->total_flight_count = 0;
4629 			asoc->sent_queue_retran_cnt = 0;
4630 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4631 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4632 					sctp_flight_size_increase(tp1);
4633 					sctp_total_flight_increase(stcb, tp1);
4634 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4635 					asoc->sent_queue_retran_cnt++;
4636 				}
4637 			}
4638 		}
4639 		done_once = 1;
4640 		goto again;
4641 	}
4642 	/**********************************/
4643 	/* Now what about shutdown issues */
4644 	/**********************************/
4645 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4646 		/* nothing left on sendqueue.. consider done */
4647 		/* clean up */
4648 		if ((asoc->stream_queue_cnt == 1) &&
4649 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4650 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4651 		    (asoc->locked_on_sending)
4652 		    ) {
4653 			struct sctp_stream_queue_pending *sp;
4654 
4655 			/*
4656 			 * I may be in a state where we got all across.. but
4657 			 * cannot write more due to a shutdown... we abort
4658 			 * since the user did not indicate EOR in this case.
4659 			 * The sp will be cleaned during free of the asoc.
4660 			 */
4661 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4662 			    sctp_streamhead);
4663 			if ((sp) && (sp->length == 0)) {
4664 				/* Let cleanup code purge it */
4665 				if (sp->msg_is_complete) {
4666 					asoc->stream_queue_cnt--;
4667 				} else {
4668 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4669 					asoc->locked_on_sending = NULL;
4670 					asoc->stream_queue_cnt--;
4671 				}
4672 			}
4673 		}
4674 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4675 		    (asoc->stream_queue_cnt == 0)) {
4676 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4677 				/* Need to abort here */
4678 				struct mbuf *oper;
4679 
4680 		abort_out_now:
4681 				*abort_now = 1;
4682 				/* XXX */
4683 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4684 				    0, M_DONTWAIT, 1, MT_DATA);
4685 				if (oper) {
4686 					struct sctp_paramhdr *ph;
4687 					uint32_t *ippp;
4688 
4689 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4690 					    sizeof(uint32_t);
4691 					ph = mtod(oper, struct sctp_paramhdr *);
4692 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4693 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4694 					ippp = (uint32_t *) (ph + 1);
4695 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4696 				}
4697 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4698 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4699 			} else {
4700 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4701 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4702 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4703 				}
4704 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4705 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4706 				sctp_stop_timers_for_shutdown(stcb);
4707 				sctp_send_shutdown(stcb,
4708 				    stcb->asoc.primary_destination);
4709 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4710 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4711 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4712 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4713 			}
4714 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4715 		    (asoc->stream_queue_cnt == 0)) {
4716 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4717 				goto abort_out_now;
4718 			}
4719 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4720 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4721 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4722 			sctp_send_shutdown_ack(stcb,
4723 			    stcb->asoc.primary_destination);
4724 
4725 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4726 			    stcb->sctp_ep, stcb, asoc->primary_destination);
4727 		}
4728 	}
4729 	/*********************************************/
4730 	/* Here we perform PR-SCTP procedures        */
4731 	/* (section 4.2)                             */
4732 	/*********************************************/
4733 	/* C1. update advancedPeerAckPoint */
4734 	if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4735 		asoc->advanced_peer_ack_point = cumack;
4736 	}
4737 	/* PR-Sctp issues need to be addressed too */
4738 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4739 		struct sctp_tmit_chunk *lchk;
4740 		uint32_t old_adv_peer_ack_point;
4741 
4742 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4743 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4744 		/* C3. See if we need to send a Fwd-TSN */
4745 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cumack,
4746 		    MAX_TSN)) {
4747 			/*
4748 			 * ISSUE with ECN, see FWD-TSN processing for notes
4749 			 * on issues that will occur when the ECN NONCE
4750 			 * stuff is put into SCTP for cross checking.
4751 			 */
4752 			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
4753 			    MAX_TSN)) {
4754 				send_forward_tsn(stcb, asoc);
4755 				/*
4756 				 * ECN Nonce: Disable Nonce Sum check when
4757 				 * FWD TSN is sent and store resync tsn
4758 				 */
4759 				asoc->nonce_sum_check = 0;
4760 				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4761 			} else if (lchk) {
4762 				/* try to FR fwd-tsn's that get lost too */
4763 				lchk->rec.data.fwd_tsn_cnt++;
4764 				if (lchk->rec.data.fwd_tsn_cnt > 3) {
4765 					send_forward_tsn(stcb, asoc);
4766 					lchk->rec.data.fwd_tsn_cnt = 0;
4767 				}
4768 			}
4769 		}
4770 		if (lchk) {
4771 			/* Assure a timer is up */
4772 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4773 			    stcb->sctp_ep, stcb, lchk->whoTo);
4774 		}
4775 	}
4776 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4777 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4778 		    rwnd,
4779 		    stcb->asoc.peers_rwnd,
4780 		    stcb->asoc.total_flight,
4781 		    stcb->asoc.total_output_queue_size);
4782 	}
4783 }
4784 
4785 void
4786 sctp_handle_sack(struct mbuf *m, int offset,
4787     struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
4788     struct sctp_nets *net_from, int *abort_now, int sack_len, uint32_t rwnd)
4789 {
4790 	struct sctp_association *asoc;
4791 	struct sctp_sack *sack;
4792 	struct sctp_tmit_chunk *tp1, *tp2;
4793 	uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
4794 	         this_sack_lowest_newack;
4795 	uint32_t sav_cum_ack;
4796 	uint16_t num_seg, num_dup;
4797 	uint16_t wake_him = 0;
4798 	unsigned int sack_length;
4799 	uint32_t send_s = 0;
4800 	long j;
4801 	int accum_moved = 0;
4802 	int will_exit_fast_recovery = 0;
4803 	uint32_t a_rwnd, old_rwnd;
4804 	int win_probe_recovery = 0;
4805 	int win_probe_recovered = 0;
4806 	struct sctp_nets *net = NULL;
4807 	int nonce_sum_flag, ecn_seg_sums = 0;
4808 	int done_once;
4809 	uint8_t reneged_all = 0;
4810 	uint8_t cmt_dac_flag;
4811 
4812 	/*
4813 	 * we take any chance we can to service our queues since we cannot
4814 	 * get awoken when the socket is read from :<
4815 	 */
4816 	/*
4817 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4818 	 * old sack, if so discard. 2) If there is nothing left in the send
4819 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4820 	 * too, update any rwnd change and verify no timers are running.
4821 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4822 	 * moved process these first and note that it moved. 4) Process any
4823 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4824 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4825 	 * sync up flightsizes and things, stop all timers and also check
4826 	 * for shutdown_pending state. If so then go ahead and send off the
4827 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4828 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4829 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4830 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4831 	 * if in shutdown_recv state.
4832 	 */
4833 	SCTP_TCB_LOCK_ASSERT(stcb);
4834 	sack = &ch->sack;
4835 	/* CMT DAC algo */
4836 	this_sack_lowest_newack = 0;
4837 	j = 0;
4838 	sack_length = (unsigned int)sack_len;
4839 	/* ECN Nonce */
4840 	SCTP_STAT_INCR(sctps_slowpath_sack);
4841 	nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
4842 	cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
4843 #ifdef SCTP_ASOCLOG_OF_TSNS
4844 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4845 	stcb->asoc.cumack_log_at++;
4846 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4847 		stcb->asoc.cumack_log_at = 0;
4848 	}
4849 #endif
4850 	num_seg = ntohs(sack->num_gap_ack_blks);
4851 	a_rwnd = rwnd;
4852 
4853 	/* CMT DAC algo */
4854 	cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
4855 	num_dup = ntohs(sack->num_dup_tsns);
4856 
4857 	old_rwnd = stcb->asoc.peers_rwnd;
4858 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4859 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4860 		    stcb->asoc.overall_error_count,
4861 		    0,
4862 		    SCTP_FROM_SCTP_INDATA,
4863 		    __LINE__);
4864 	}
4865 	stcb->asoc.overall_error_count = 0;
4866 	asoc = &stcb->asoc;
4867 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4868 		sctp_log_sack(asoc->last_acked_seq,
4869 		    cum_ack,
4870 		    0,
4871 		    num_seg,
4872 		    num_dup,
4873 		    SCTP_LOG_NEW_SACK);
4874 	}
4875 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4876 		int off_to_dup, iii;
4877 		uint32_t *dupdata, dblock;
4878 
4879 		off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk);
4880 		if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) {
4881 			dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
4882 			    sizeof(uint32_t), (uint8_t *) & dblock);
4883 			off_to_dup += sizeof(uint32_t);
4884 			if (dupdata) {
4885 				for (iii = 0; iii < num_dup; iii++) {
4886 					sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4887 					dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
4888 					    sizeof(uint32_t), (uint8_t *) & dblock);
4889 					if (dupdata == NULL)
4890 						break;
4891 					off_to_dup += sizeof(uint32_t);
4892 				}
4893 			}
4894 		} else {
4895 			SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n",
4896 			    off_to_dup, num_dup, sack_length, num_seg);
4897 		}
4898 	}
4899 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4900 		/* reality check */
4901 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4902 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4903 			    sctpchunk_listhead);
4904 			send_s = tp1->rec.data.TSN_seq + 1;
4905 		} else {
4906 			send_s = asoc->sending_seq;
4907 		}
4908 		if (cum_ack == send_s ||
4909 		    compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4910 #ifndef INVARIANTS
4911 			struct mbuf *oper;
4912 
4913 #endif
4914 #ifdef INVARIANTS
4915 	hopeless_peer:
4916 			panic("Impossible sack 1");
4917 #else
4918 
4919 
4920 			/*
4921 			 * no way, we have not even sent this TSN out yet.
4922 			 * Peer is hopelessly messed up with us.
4923 			 */
4924 	hopeless_peer:
4925 			*abort_now = 1;
4926 			/* XXX */
4927 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4928 			    0, M_DONTWAIT, 1, MT_DATA);
4929 			if (oper) {
4930 				struct sctp_paramhdr *ph;
4931 				uint32_t *ippp;
4932 
4933 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4934 				    sizeof(uint32_t);
4935 				ph = mtod(oper, struct sctp_paramhdr *);
4936 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4937 				ph->param_length = htons(SCTP_BUF_LEN(oper));
4938 				ippp = (uint32_t *) (ph + 1);
4939 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4940 			}
4941 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4942 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4943 			return;
4944 #endif
4945 		}
4946 	}
4947 	/**********************/
4948 	/* 1) check the range */
4949 	/**********************/
4950 	if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4951 		/* acking something behind */
4952 		return;
4953 	}
4954 	sav_cum_ack = asoc->last_acked_seq;
4955 
4956 	/* update the Rwnd of the peer */
4957 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4958 	    TAILQ_EMPTY(&asoc->send_queue) &&
4959 	    (asoc->stream_queue_cnt == 0)
4960 	    ) {
4961 		/* nothing left on send/sent and strmq */
4962 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4963 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4964 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4965 		}
4966 		asoc->peers_rwnd = a_rwnd;
4967 		if (asoc->sent_queue_retran_cnt) {
4968 			asoc->sent_queue_retran_cnt = 0;
4969 		}
4970 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4971 			/* SWS sender side engages */
4972 			asoc->peers_rwnd = 0;
4973 		}
4974 		/* stop any timers */
4975 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4976 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4977 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4978 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4979 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4980 					SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4981 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4982 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4983 				}
4984 			}
4985 			net->partial_bytes_acked = 0;
4986 			net->flight_size = 0;
4987 		}
4988 		asoc->total_flight = 0;
4989 		asoc->total_flight_count = 0;
4990 		return;
4991 	}
4992 	/*
4993 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4994 	 * things. The total byte count acked is tracked in netAckSz AND
4995 	 * netAck2 is used to track the total bytes acked that are un-
4996 	 * amibguious and were never retransmitted. We track these on a per
4997 	 * destination address basis.
4998 	 */
4999 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5000 		net->prev_cwnd = net->cwnd;
5001 		net->net_ack = 0;
5002 		net->net_ack2 = 0;
5003 
5004 		/*
5005 		 * CMT: Reset CUC and Fast recovery algo variables before
5006 		 * SACK processing
5007 		 */
5008 		net->new_pseudo_cumack = 0;
5009 		net->will_exit_fast_recovery = 0;
5010 	}
5011 	/* process the new consecutive TSN first */
5012 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
5013 	while (tp1) {
5014 		if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
5015 		    MAX_TSN) ||
5016 		    last_tsn == tp1->rec.data.TSN_seq) {
5017 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
5018 				/*
5019 				 * ECN Nonce: Add the nonce to the sender's
5020 				 * nonce sum
5021 				 */
5022 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
5023 				accum_moved = 1;
5024 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
5025 					/*
5026 					 * If it is less than ACKED, it is
5027 					 * now no-longer in flight. Higher
5028 					 * values may occur during marking
5029 					 */
5030 					if ((tp1->whoTo->dest_state &
5031 					    SCTP_ADDR_UNCONFIRMED) &&
5032 					    (tp1->snd_count < 2)) {
5033 						/*
5034 						 * If there was no retran
5035 						 * and the address is
5036 						 * un-confirmed and we sent
5037 						 * there and are now
5038 						 * sacked.. its confirmed,
5039 						 * mark it so.
5040 						 */
5041 						tp1->whoTo->dest_state &=
5042 						    ~SCTP_ADDR_UNCONFIRMED;
5043 					}
5044 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5045 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
5046 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
5047 							    tp1->whoTo->flight_size,
5048 							    tp1->book_size,
5049 							    (uintptr_t) tp1->whoTo,
5050 							    tp1->rec.data.TSN_seq);
5051 						}
5052 						sctp_flight_size_decrease(tp1);
5053 						sctp_total_flight_decrease(stcb, tp1);
5054 					}
5055 					tp1->whoTo->net_ack += tp1->send_size;
5056 
5057 					/* CMT SFR and DAC algos */
5058 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
5059 					tp1->whoTo->saw_newack = 1;
5060 
5061 					if (tp1->snd_count < 2) {
5062 						/*
5063 						 * True non-retransmited
5064 						 * chunk
5065 						 */
5066 						tp1->whoTo->net_ack2 +=
5067 						    tp1->send_size;
5068 
5069 						/* update RTO too? */
5070 						if (tp1->do_rtt) {
5071 							tp1->whoTo->RTO =
5072 							    sctp_calculate_rto(stcb,
5073 							    asoc, tp1->whoTo,
5074 							    &tp1->sent_rcv_time,
5075 							    sctp_align_safe_nocopy);
5076 							tp1->do_rtt = 0;
5077 						}
5078 					}
5079 					/*
5080 					 * CMT: CUCv2 algorithm. From the
5081 					 * cumack'd TSNs, for each TSN being
5082 					 * acked for the first time, set the
5083 					 * following variables for the
5084 					 * corresp destination.
5085 					 * new_pseudo_cumack will trigger a
5086 					 * cwnd update.
5087 					 * find_(rtx_)pseudo_cumack will
5088 					 * trigger search for the next
5089 					 * expected (rtx-)pseudo-cumack.
5090 					 */
5091 					tp1->whoTo->new_pseudo_cumack = 1;
5092 					tp1->whoTo->find_pseudo_cumack = 1;
5093 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
5094 
5095 
5096 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
5097 						sctp_log_sack(asoc->last_acked_seq,
5098 						    cum_ack,
5099 						    tp1->rec.data.TSN_seq,
5100 						    0,
5101 						    0,
5102 						    SCTP_LOG_TSN_ACKED);
5103 					}
5104 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
5105 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
5106 					}
5107 				}
5108 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5109 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
5110 #ifdef SCTP_AUDITING_ENABLED
5111 					sctp_audit_log(0xB3,
5112 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
5113 #endif
5114 				}
5115 				if (tp1->rec.data.chunk_was_revoked) {
5116 					/* deflate the cwnd */
5117 					tp1->whoTo->cwnd -= tp1->book_size;
5118 					tp1->rec.data.chunk_was_revoked = 0;
5119 				}
5120 				tp1->sent = SCTP_DATAGRAM_ACKED;
5121 			}
5122 		} else {
5123 			break;
5124 		}
5125 		tp1 = TAILQ_NEXT(tp1, sctp_next);
5126 	}
5127 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
5128 	/* always set this up to cum-ack */
5129 	asoc->this_sack_highest_gap = last_tsn;
5130 
5131 	/* Move offset up to point to gaps/dups */
5132 	offset += sizeof(struct sctp_sack_chunk);
5133 	if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) {
5134 
5135 		/* skip corrupt segments */
5136 		goto skip_segments;
5137 	}
5138 	if (num_seg > 0) {
5139 
5140 		/*
5141 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
5142 		 * to be greater than the cumack. Also reset saw_newack to 0
5143 		 * for all dests.
5144 		 */
5145 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5146 			net->saw_newack = 0;
5147 			net->this_sack_highest_newack = last_tsn;
5148 		}
5149 
5150 		/*
5151 		 * thisSackHighestGap will increase while handling NEW
5152 		 * segments this_sack_highest_newack will increase while
5153 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
5154 		 * used for CMT DAC algo. saw_newack will also change.
5155 		 */
5156 		sctp_handle_segments(m, &offset, stcb, asoc, ch, last_tsn,
5157 		    &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
5158 		    num_seg, &ecn_seg_sums);
5159 
5160 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
5161 			/*
5162 			 * validate the biggest_tsn_acked in the gap acks if
5163 			 * strict adherence is wanted.
5164 			 */
5165 			if ((biggest_tsn_acked == send_s) ||
5166 			    (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
5167 				/*
5168 				 * peer is either confused or we are under
5169 				 * attack. We must abort.
5170 				 */
5171 				goto hopeless_peer;
5172 			}
5173 		}
5174 	}
5175 skip_segments:
5176 	/*******************************************/
5177 	/* cancel ALL T3-send timer if accum moved */
5178 	/*******************************************/
5179 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
5180 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5181 			if (net->new_pseudo_cumack)
5182 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5183 				    stcb, net,
5184 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
5185 
5186 		}
5187 	} else {
5188 		if (accum_moved) {
5189 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5190 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5191 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
5192 			}
5193 		}
5194 	}
5195 	/********************************************/
5196 	/* drop the acked chunks from the sendqueue */
5197 	/********************************************/
5198 	asoc->last_acked_seq = cum_ack;
5199 
5200 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
5201 	if (tp1 == NULL)
5202 		goto done_with_it;
5203 	do {
5204 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
5205 		    MAX_TSN)) {
5206 			break;
5207 		}
5208 		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
5209 			/* no more sent on list */
5210 			printf("Warning, tp1->sent == %d and its now acked?\n",
5211 			    tp1->sent);
5212 		}
5213 		tp2 = TAILQ_NEXT(tp1, sctp_next);
5214 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
5215 		if (tp1->pr_sctp_on) {
5216 			if (asoc->pr_sctp_cnt != 0)
5217 				asoc->pr_sctp_cnt--;
5218 		}
5219 		if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
5220 		    (asoc->total_flight > 0)) {
5221 #ifdef INVARIANTS
5222 			panic("Warning flight size is postive and should be 0");
5223 #else
5224 			SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
5225 			    asoc->total_flight);
5226 #endif
5227 			asoc->total_flight = 0;
5228 		}
5229 		if (tp1->data) {
5230 			/* sa_ignore NO_NULL_CHK */
5231 			sctp_free_bufspace(stcb, asoc, tp1, 1);
5232 			sctp_m_freem(tp1->data);
5233 			if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
5234 				asoc->sent_queue_cnt_removeable--;
5235 			}
5236 		}
5237 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
5238 			sctp_log_sack(asoc->last_acked_seq,
5239 			    cum_ack,
5240 			    tp1->rec.data.TSN_seq,
5241 			    0,
5242 			    0,
5243 			    SCTP_LOG_FREE_SENT);
5244 		}
5245 		tp1->data = NULL;
5246 		asoc->sent_queue_cnt--;
5247 		sctp_free_a_chunk(stcb, tp1);
5248 		wake_him++;
5249 		tp1 = tp2;
5250 	} while (tp1 != NULL);
5251 
5252 done_with_it:
5253 	/* sa_ignore NO_NULL_CHK */
5254 	if ((wake_him) && (stcb->sctp_socket)) {
5255 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5256 		struct socket *so;
5257 
5258 #endif
5259 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
5260 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
5261 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
5262 		}
5263 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5264 		so = SCTP_INP_SO(stcb->sctp_ep);
5265 		atomic_add_int(&stcb->asoc.refcnt, 1);
5266 		SCTP_TCB_UNLOCK(stcb);
5267 		SCTP_SOCKET_LOCK(so, 1);
5268 		SCTP_TCB_LOCK(stcb);
5269 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
5270 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5271 			/* assoc was freed while we were unlocked */
5272 			SCTP_SOCKET_UNLOCK(so, 1);
5273 			return;
5274 		}
5275 #endif
5276 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
5277 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5278 		SCTP_SOCKET_UNLOCK(so, 1);
5279 #endif
5280 	} else {
5281 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
5282 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
5283 		}
5284 	}
5285 
5286 	if (asoc->fast_retran_loss_recovery && accum_moved) {
5287 		if (compare_with_wrap(asoc->last_acked_seq,
5288 		    asoc->fast_recovery_tsn, MAX_TSN) ||
5289 		    asoc->last_acked_seq == asoc->fast_recovery_tsn) {
5290 			/* Setup so we will exit RFC2582 fast recovery */
5291 			will_exit_fast_recovery = 1;
5292 		}
5293 	}
5294 	/*
5295 	 * Check for revoked fragments:
5296 	 *
5297 	 * if Previous sack - Had no frags then we can't have any revoked if
5298 	 * Previous sack - Had frag's then - If we now have frags aka
5299 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
5300 	 * some of them. else - The peer revoked all ACKED fragments, since
5301 	 * we had some before and now we have NONE.
5302 	 */
5303 
5304 	if (num_seg)
5305 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
5306 	else if (asoc->saw_sack_with_frags) {
5307 		int cnt_revoked = 0;
5308 
5309 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
5310 		if (tp1 != NULL) {
5311 			/* Peer revoked all dg's marked or acked */
5312 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5313 				if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
5314 				    (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
5315 					tp1->sent = SCTP_DATAGRAM_SENT;
5316 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
5317 						sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
5318 						    tp1->whoTo->flight_size,
5319 						    tp1->book_size,
5320 						    (uintptr_t) tp1->whoTo,
5321 						    tp1->rec.data.TSN_seq);
5322 					}
5323 					sctp_flight_size_increase(tp1);
5324 					sctp_total_flight_increase(stcb, tp1);
5325 					tp1->rec.data.chunk_was_revoked = 1;
5326 					/*
5327 					 * To ensure that this increase in
5328 					 * flightsize, which is artificial,
5329 					 * does not throttle the sender, we
5330 					 * also increase the cwnd
5331 					 * artificially.
5332 					 */
5333 					tp1->whoTo->cwnd += tp1->book_size;
5334 					cnt_revoked++;
5335 				}
5336 			}
5337 			if (cnt_revoked) {
5338 				reneged_all = 1;
5339 			}
5340 		}
5341 		asoc->saw_sack_with_frags = 0;
5342 	}
5343 	if (num_seg)
5344 		asoc->saw_sack_with_frags = 1;
5345 	else
5346 		asoc->saw_sack_with_frags = 0;
5347 
5348 	/* JRS - Use the congestion control given in the CC module */
5349 	asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5350 
5351 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
5352 		/* nothing left in-flight */
5353 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5354 			/* stop all timers */
5355 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5356 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5357 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5358 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5359 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
5360 				}
5361 			}
5362 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5363 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
5364 			net->flight_size = 0;
5365 			net->partial_bytes_acked = 0;
5366 		}
5367 		asoc->total_flight = 0;
5368 		asoc->total_flight_count = 0;
5369 	}
5370 	/**********************************/
5371 	/* Now what about shutdown issues */
5372 	/**********************************/
5373 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5374 		/* nothing left on sendqueue.. consider done */
5375 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5376 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5377 			    asoc->peers_rwnd, 0, 0, a_rwnd);
5378 		}
5379 		asoc->peers_rwnd = a_rwnd;
5380 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5381 			/* SWS sender side engages */
5382 			asoc->peers_rwnd = 0;
5383 		}
5384 		/* clean up */
5385 		if ((asoc->stream_queue_cnt == 1) &&
5386 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5387 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5388 		    (asoc->locked_on_sending)
5389 		    ) {
5390 			struct sctp_stream_queue_pending *sp;
5391 
5392 			/*
5393 			 * I may be in a state where we got all across.. but
5394 			 * cannot write more due to a shutdown... we abort
5395 			 * since the user did not indicate EOR in this case.
5396 			 */
5397 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
5398 			    sctp_streamhead);
5399 			if ((sp) && (sp->length == 0)) {
5400 				asoc->locked_on_sending = NULL;
5401 				if (sp->msg_is_complete) {
5402 					asoc->stream_queue_cnt--;
5403 				} else {
5404 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5405 					asoc->stream_queue_cnt--;
5406 				}
5407 			}
5408 		}
5409 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5410 		    (asoc->stream_queue_cnt == 0)) {
5411 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5412 				/* Need to abort here */
5413 				struct mbuf *oper;
5414 
5415 		abort_out_now:
5416 				*abort_now = 1;
5417 				/* XXX */
5418 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
5419 				    0, M_DONTWAIT, 1, MT_DATA);
5420 				if (oper) {
5421 					struct sctp_paramhdr *ph;
5422 					uint32_t *ippp;
5423 
5424 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5425 					    sizeof(uint32_t);
5426 					ph = mtod(oper, struct sctp_paramhdr *);
5427 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
5428 					ph->param_length = htons(SCTP_BUF_LEN(oper));
5429 					ippp = (uint32_t *) (ph + 1);
5430 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
5431 				}
5432 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
5433 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
5434 				return;
5435 			} else {
5436 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5437 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5438 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5439 				}
5440 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5441 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5442 				sctp_stop_timers_for_shutdown(stcb);
5443 				sctp_send_shutdown(stcb,
5444 				    stcb->asoc.primary_destination);
5445 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5446 				    stcb->sctp_ep, stcb, asoc->primary_destination);
5447 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5448 				    stcb->sctp_ep, stcb, asoc->primary_destination);
5449 			}
5450 			return;
5451 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5452 		    (asoc->stream_queue_cnt == 0)) {
5453 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5454 				goto abort_out_now;
5455 			}
5456 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5457 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5458 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5459 			sctp_send_shutdown_ack(stcb,
5460 			    stcb->asoc.primary_destination);
5461 
5462 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5463 			    stcb->sctp_ep, stcb, asoc->primary_destination);
5464 			return;
5465 		}
5466 	}
5467 	/*
5468 	 * Now here we are going to recycle net_ack for a different use...
5469 	 * HEADS UP.
5470 	 */
5471 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5472 		net->net_ack = 0;
5473 	}
5474 
5475 	/*
5476 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5477 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5478 	 * automatically ensure that.
5479 	 */
5480 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
5481 		this_sack_lowest_newack = cum_ack;
5482 	}
5483 	if (num_seg > 0) {
5484 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5485 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5486 	}
5487 	/* JRS - Use the congestion control given in the CC module */
5488 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5489 
5490 	/******************************************************************
5491 	 *  Here we do the stuff with ECN Nonce checking.
5492 	 *  We basically check to see if the nonce sum flag was incorrect
5493 	 *  or if resynchronization needs to be done. Also if we catch a
5494 	 *  misbehaving receiver we give him the kick.
5495 	 ******************************************************************/
5496 
5497 	if (asoc->ecn_nonce_allowed) {
5498 		if (asoc->nonce_sum_check) {
5499 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5500 				if (asoc->nonce_wait_for_ecne == 0) {
5501 					struct sctp_tmit_chunk *lchk;
5502 
5503 					lchk = TAILQ_FIRST(&asoc->send_queue);
5504 					asoc->nonce_wait_for_ecne = 1;
5505 					if (lchk) {
5506 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5507 					} else {
5508 						asoc->nonce_wait_tsn = asoc->sending_seq;
5509 					}
5510 				} else {
5511 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5512 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5513 						/*
5514 						 * Misbehaving peer. We need
5515 						 * to react to this guy
5516 						 */
5517 						asoc->ecn_allowed = 0;
5518 						asoc->ecn_nonce_allowed = 0;
5519 					}
5520 				}
5521 			}
5522 		} else {
5523 			/* See if Resynchronization Possible */
5524 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5525 				asoc->nonce_sum_check = 1;
5526 				/*
5527 				 * now we must calculate what the base is.
5528 				 * We do this based on two things, we know
5529 				 * the total's for all the segments
5530 				 * gap-acked in the SACK, its stored in
5531 				 * ecn_seg_sums. We also know the SACK's
5532 				 * nonce sum, its in nonce_sum_flag. So we
5533 				 * can build a truth table to back-calculate
5534 				 * the new value of
5535 				 * asoc->nonce_sum_expect_base:
5536 				 *
5537 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
5538 				 * 1                    0 1 0 1 1 1
5539 				 * 1 0
5540 				 */
5541 				asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5542 			}
5543 		}
5544 	}
5545 	/* Now are we exiting loss recovery ? */
5546 	if (will_exit_fast_recovery) {
5547 		/* Ok, we must exit fast recovery */
5548 		asoc->fast_retran_loss_recovery = 0;
5549 	}
5550 	if ((asoc->sat_t3_loss_recovery) &&
5551 	    ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5552 	    MAX_TSN) ||
5553 	    (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5554 		/* end satellite t3 loss recovery */
5555 		asoc->sat_t3_loss_recovery = 0;
5556 	}
5557 	/*
5558 	 * CMT Fast recovery
5559 	 */
5560 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5561 		if (net->will_exit_fast_recovery) {
5562 			/* Ok, we must exit fast recovery */
5563 			net->fast_retran_loss_recovery = 0;
5564 		}
5565 	}
5566 
5567 	/* Adjust and set the new rwnd value */
5568 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5569 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5570 		    asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5571 	}
5572 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5573 	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5574 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5575 		/* SWS sender side engages */
5576 		asoc->peers_rwnd = 0;
5577 	}
5578 	if (asoc->peers_rwnd > old_rwnd) {
5579 		win_probe_recovery = 1;
5580 	}
5581 	/*
5582 	 * Now we must setup so we have a timer up for anyone with
5583 	 * outstanding data.
5584 	 */
5585 	done_once = 0;
5586 again:
5587 	j = 0;
5588 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5589 		if (win_probe_recovery && (net->window_probe)) {
5590 			win_probe_recovered = 1;
5591 			/*-
5592 			 * Find first chunk that was used with
5593 			 * window probe and clear the event. Put
5594 			 * it back into the send queue as if has
5595 			 * not been sent.
5596 			 */
5597 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5598 				if (tp1->window_probe) {
5599 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
5600 					break;
5601 				}
5602 			}
5603 		}
5604 		if (net->flight_size) {
5605 			j++;
5606 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5607 			    stcb->sctp_ep, stcb, net);
5608 			if (net->window_probe) {
5609 			}
5610 		} else {
5611 			if (net->window_probe) {
5612 				/*
5613 				 * In window probes we must assure a timer
5614 				 * is still running there
5615 				 */
5616 
5617 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5618 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5619 					    stcb->sctp_ep, stcb, net);
5620 
5621 				}
5622 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5623 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5624 				    stcb, net,
5625 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5626 			}
5627 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5628 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5629 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5630 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5631 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5632 				}
5633 			}
5634 		}
5635 	}
5636 	if ((j == 0) &&
5637 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5638 	    (asoc->sent_queue_retran_cnt == 0) &&
5639 	    (win_probe_recovered == 0) &&
5640 	    (done_once == 0)) {
5641 		/*
5642 		 * huh, this should not happen unless all packets are
5643 		 * PR-SCTP and marked to skip of course.
5644 		 */
5645 		if (sctp_fs_audit(asoc)) {
5646 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5647 				net->flight_size = 0;
5648 			}
5649 			asoc->total_flight = 0;
5650 			asoc->total_flight_count = 0;
5651 			asoc->sent_queue_retran_cnt = 0;
5652 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5653 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5654 					sctp_flight_size_increase(tp1);
5655 					sctp_total_flight_increase(stcb, tp1);
5656 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5657 					asoc->sent_queue_retran_cnt++;
5658 				}
5659 			}
5660 		}
5661 		done_once = 1;
5662 		goto again;
5663 	}
5664 	/* Fix up the a-p-a-p for future PR-SCTP sends */
5665 	if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5666 		asoc->advanced_peer_ack_point = cum_ack;
5667 	}
5668 	/* C2. try to further move advancedPeerAckPoint ahead */
5669 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5670 		struct sctp_tmit_chunk *lchk;
5671 		uint32_t old_adv_peer_ack_point;
5672 
5673 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5674 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5675 		/* C3. See if we need to send a Fwd-TSN */
5676 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5677 		    MAX_TSN)) {
5678 			/*
5679 			 * ISSUE with ECN, see FWD-TSN processing for notes
5680 			 * on issues that will occur when the ECN NONCE
5681 			 * stuff is put into SCTP for cross checking.
5682 			 */
5683 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5684 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5685 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5686 				    old_adv_peer_ack_point);
5687 			}
5688 			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
5689 			    MAX_TSN)) {
5690 				send_forward_tsn(stcb, asoc);
5691 				/*
5692 				 * ECN Nonce: Disable Nonce Sum check when
5693 				 * FWD TSN is sent and store resync tsn
5694 				 */
5695 				asoc->nonce_sum_check = 0;
5696 				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5697 			} else if (lchk) {
5698 				/* try to FR fwd-tsn's that get lost too */
5699 				lchk->rec.data.fwd_tsn_cnt++;
5700 				if (lchk->rec.data.fwd_tsn_cnt > 3) {
5701 					send_forward_tsn(stcb, asoc);
5702 					lchk->rec.data.fwd_tsn_cnt = 0;
5703 				}
5704 			}
5705 		}
5706 		if (lchk) {
5707 			/* Assure a timer is up */
5708 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5709 			    stcb->sctp_ep, stcb, lchk->whoTo);
5710 		}
5711 	}
5712 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5713 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5714 		    a_rwnd,
5715 		    stcb->asoc.peers_rwnd,
5716 		    stcb->asoc.total_flight,
5717 		    stcb->asoc.total_output_queue_size);
5718 	}
5719 }
5720 
5721 void
5722 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5723     struct sctp_nets *netp, int *abort_flag)
5724 {
5725 	/* Copy cum-ack */
5726 	uint32_t cum_ack, a_rwnd;
5727 
5728 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5729 	/* Arrange so a_rwnd does NOT change */
5730 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5731 
5732 	/* Now call the express sack handling */
5733 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5734 }
5735 
5736 static void
5737 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5738     struct sctp_stream_in *strmin)
5739 {
5740 	struct sctp_queued_to_read *ctl, *nctl;
5741 	struct sctp_association *asoc;
5742 	int tt;
5743 
5744 	/* EY -used to calculate nr_gap information */
5745 	uint32_t nr_tsn, nr_gap;
5746 
5747 	asoc = &stcb->asoc;
5748 	tt = strmin->last_sequence_delivered;
5749 	/*
5750 	 * First deliver anything prior to and including the stream no that
5751 	 * came in
5752 	 */
5753 	ctl = TAILQ_FIRST(&strmin->inqueue);
5754 	while (ctl) {
5755 		nctl = TAILQ_NEXT(ctl, next);
5756 		if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5757 		    (tt == ctl->sinfo_ssn)) {
5758 			/* this is deliverable now */
5759 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5760 			/* subtract pending on streams */
5761 			asoc->size_on_all_streams -= ctl->length;
5762 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5763 			/* deliver it to at least the delivery-q */
5764 			if (stcb->sctp_socket) {
5765 				/* EY need the tsn info for calculating nr */
5766 				nr_tsn = ctl->sinfo_tsn;
5767 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5768 				    ctl,
5769 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5770 				/*
5771 				 * EY this is the chunk that should be
5772 				 * tagged nr gapped calculate the gap and
5773 				 * such then tag this TSN nr
5774 				 * chk->rec.data.TSN_seq
5775 				 */
5776 				if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
5777 
5778 					SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
5779 					if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
5780 					    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
5781 						/*
5782 						 * EY These should never
5783 						 * happen- explained before
5784 						 */
5785 					} else {
5786 						SCTP_TCB_LOCK_ASSERT(stcb);
5787 						SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
5788 						SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
5789 						if (compare_with_wrap(nr_tsn,
5790 						    asoc->highest_tsn_inside_nr_map,
5791 						    MAX_TSN))
5792 							asoc->highest_tsn_inside_nr_map = nr_tsn;
5793 					}
5794 					if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, nr_gap))
5795 						/*
5796 						 * printf("In
5797 						 * sctp_kick_prsctp_reorder_q
5798 						 * ueue(7): Something wrong,
5799 						 * the TSN to be tagged"
5800 						 * "\nas NR is not even in
5801 						 * the mapping_array, or map
5802 						 * and nr_map are
5803 						 * inconsistent");
5804 						 */
5805 						/*
5806 						 * EY - not %100 sure about
5807 						 * the lock thing, don't
5808 						 * think its required
5809 						 */
5810 						/*
5811 						 * SCTP_TCB_LOCK_ASSERT(stcb)
5812 						 * ;
5813 						 */
5814 					{
5815 						/*
5816 						 * printf("\nCalculating an
5817 						 * nr_gap!!\nmapping_array_si
5818 						 * ze = %d
5819 						 * nr_mapping_array_size =
5820 						 * %d" "\nmapping_array_base
5821 						 * = %d
5822 						 * nr_mapping_array_base =
5823 						 * %d\nhighest_tsn_inside_map
5824 						 *  = %d"
5825 						 * "highest_tsn_inside_nr_map
5826 						 *  = %d\nTSN = %d nr_gap =
5827 						 * %d",asoc->mapping_array_si
5828 						 * ze,
5829 						 * asoc->nr_mapping_array_siz
5830 						 * e,
5831 						 * asoc->mapping_array_base_t
5832 						 * sn,
5833 						 * asoc->nr_mapping_array_bas
5834 						 * e_tsn,
5835 						 * asoc->highest_tsn_inside_m
5836 						 * ap,
5837 						 * asoc->highest_tsn_inside_n
5838 						 * r_map,tsn,nr_gap);
5839 						 */
5840 					}
5841 				}
5842 			}
5843 		} else {
5844 			/* no more delivery now. */
5845 			break;
5846 		}
5847 		ctl = nctl;
5848 	}
5849 	/*
5850 	 * now we must deliver things in queue the normal way  if any are
5851 	 * now ready.
5852 	 */
5853 	tt = strmin->last_sequence_delivered + 1;
5854 	ctl = TAILQ_FIRST(&strmin->inqueue);
5855 	while (ctl) {
5856 		nctl = TAILQ_NEXT(ctl, next);
5857 		if (tt == ctl->sinfo_ssn) {
5858 			/* this is deliverable now */
5859 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5860 			/* subtract pending on streams */
5861 			asoc->size_on_all_streams -= ctl->length;
5862 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5863 			/* deliver it to at least the delivery-q */
5864 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5865 			if (stcb->sctp_socket) {
5866 				/* EY */
5867 				nr_tsn = ctl->sinfo_tsn;
5868 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5869 				    ctl,
5870 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5871 				/*
5872 				 * EY this is the chunk that should be
5873 				 * tagged nr gapped calculate the gap and
5874 				 * such then tag this TSN nr
5875 				 * chk->rec.data.TSN_seq
5876 				 */
5877 				if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
5878 					SCTP_CALC_TSN_TO_GAP(nr_gap, nr_tsn, asoc->nr_mapping_array_base_tsn);
5879 					if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
5880 					    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
5881 						/*
5882 						 * EY These should never
5883 						 * happen, explained before
5884 						 */
5885 					} else {
5886 						SCTP_TCB_LOCK_ASSERT(stcb);
5887 						SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
5888 						SCTP_REVERSE_OUT_TSN_PRES(nr_gap, nr_tsn, asoc);
5889 						if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map,
5890 						    MAX_TSN))
5891 							asoc->highest_tsn_inside_nr_map = nr_tsn;
5892 					}
5893 					if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, nr_gap))
5894 						/*
5895 						 * printf("In
5896 						 * sctp_kick_prsctp_reorder_q
5897 						 * ueue(8): Something wrong,
5898 						 * the TSN to be tagged"
5899 						 * "\nas NR is not even in
5900 						 * the mapping_array, or map
5901 						 * and nr_map are
5902 						 * inconsistent");
5903 						 */
5904 						/*
5905 						 * EY - not %100 sure about
5906 						 * the lock thing, don't
5907 						 * think its required
5908 						 */
5909 						/*
5910 						 * SCTP_TCB_LOCK_ASSERT(stcb)
5911 						 * ;
5912 						 */
5913 					{
5914 						/*
5915 						 * printf("\nCalculating an
5916 						 * nr_gap!!\nmapping_array_si
5917 						 * ze = %d
5918 						 * nr_mapping_array_size =
5919 						 * %d" "\nmapping_array_base
5920 						 * = %d
5921 						 * nr_mapping_array_base =
5922 						 * %d\nhighest_tsn_inside_map
5923 						 *  = %d"
5924 						 * "highest_tsn_inside_nr_map
5925 						 *  = %d\nTSN = %d nr_gap =
5926 						 * %d",asoc->mapping_array_si
5927 						 * ze,
5928 						 * asoc->nr_mapping_array_siz
5929 						 * e,
5930 						 * asoc->mapping_array_base_t
5931 						 * sn,
5932 						 * asoc->nr_mapping_array_bas
5933 						 * e_tsn,
5934 						 * asoc->highest_tsn_inside_m
5935 						 * ap,
5936 						 * asoc->highest_tsn_inside_n
5937 						 * r_map,tsn,nr_gap);
5938 						 */
5939 					}
5940 				}
5941 			}
5942 			tt = strmin->last_sequence_delivered + 1;
5943 		} else {
5944 			break;
5945 		}
5946 		ctl = nctl;
5947 	}
5948 }
5949 
5950 static void
5951 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5952     struct sctp_association *asoc,
5953     uint16_t stream, uint16_t seq)
5954 {
5955 	struct sctp_tmit_chunk *chk, *at;
5956 
5957 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5958 		/* For each one on here see if we need to toss it */
5959 		/*
5960 		 * For now large messages held on the reasmqueue that are
5961 		 * complete will be tossed too. We could in theory do more
5962 		 * work to spin through and stop after dumping one msg aka
5963 		 * seeing the start of a new msg at the head, and call the
5964 		 * delivery function... to see if it can be delivered... But
5965 		 * for now we just dump everything on the queue.
5966 		 */
5967 		chk = TAILQ_FIRST(&asoc->reasmqueue);
5968 		while (chk) {
5969 			at = TAILQ_NEXT(chk, sctp_next);
5970 			/*
5971 			 * Do not toss it if on a different stream or marked
5972 			 * for unordered delivery in which case the stream
5973 			 * sequence number has no meaning.
5974 			 */
5975 			if ((chk->rec.data.stream_number != stream) ||
5976 			    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5977 				chk = at;
5978 				continue;
5979 			}
5980 			if (chk->rec.data.stream_seq == seq) {
5981 				/* It needs to be tossed */
5982 				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5983 				if (compare_with_wrap(chk->rec.data.TSN_seq,
5984 				    asoc->tsn_last_delivered, MAX_TSN)) {
5985 					asoc->tsn_last_delivered =
5986 					    chk->rec.data.TSN_seq;
5987 					asoc->str_of_pdapi =
5988 					    chk->rec.data.stream_number;
5989 					asoc->ssn_of_pdapi =
5990 					    chk->rec.data.stream_seq;
5991 					asoc->fragment_flags =
5992 					    chk->rec.data.rcv_flags;
5993 				}
5994 				asoc->size_on_reasm_queue -= chk->send_size;
5995 				sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5996 
5997 				/* Clear up any stream problem */
5998 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5999 				    SCTP_DATA_UNORDERED &&
6000 				    (compare_with_wrap(chk->rec.data.stream_seq,
6001 				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
6002 				    MAX_SEQ))) {
6003 					/*
6004 					 * We must dump forward this streams
6005 					 * sequence number if the chunk is
6006 					 * not unordered that is being
6007 					 * skipped. There is a chance that
6008 					 * if the peer does not include the
6009 					 * last fragment in its FWD-TSN we
6010 					 * WILL have a problem here since
6011 					 * you would have a partial chunk in
6012 					 * queue that may not be
6013 					 * deliverable. Also if a Partial
6014 					 * delivery API as started the user
6015 					 * may get a partial chunk. The next
6016 					 * read returning a new chunk...
6017 					 * really ugly but I see no way
6018 					 * around it! Maybe a notify??
6019 					 */
6020 					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
6021 					    chk->rec.data.stream_seq;
6022 				}
6023 				if (chk->data) {
6024 					sctp_m_freem(chk->data);
6025 					chk->data = NULL;
6026 				}
6027 				sctp_free_a_chunk(stcb, chk);
6028 			} else if (compare_with_wrap(chk->rec.data.stream_seq, seq, MAX_SEQ)) {
6029 				/*
6030 				 * If the stream_seq is > than the purging
6031 				 * one, we are done
6032 				 */
6033 				break;
6034 			}
6035 			chk = at;
6036 		}
6037 	}
6038 }
6039 
6040 
6041 void
6042 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
6043     struct sctp_forward_tsn_chunk *fwd, int *abort_flag, struct mbuf *m, int offset)
6044 {
6045 	/*
6046 	 * ISSUES that MUST be fixed for ECN! When we are the sender of the
6047 	 * forward TSN, when the SACK comes back that acknowledges the
6048 	 * FWD-TSN we must reset the NONCE sum to match correctly. This will
6049 	 * get quite tricky since we may have sent more data interveneing
6050 	 * and must carefully account for what the SACK says on the nonce
6051 	 * and any gaps that are reported. This work will NOT be done here,
6052 	 * but I note it here since it is really related to PR-SCTP and
6053 	 * FWD-TSN's
6054 	 */
6055 
6056 	/* The pr-sctp fwd tsn */
6057 	/*
6058 	 * here we will perform all the data receiver side steps for
6059 	 * processing FwdTSN, as required in by pr-sctp draft:
6060 	 *
6061 	 * Assume we get FwdTSN(x):
6062 	 *
6063 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
6064 	 * others we have 3) examine and update re-ordering queue on
6065 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
6066 	 * report where we are.
6067 	 */
6068 	struct sctp_association *asoc;
6069 	uint32_t new_cum_tsn, gap;
6070 	unsigned int i, fwd_sz, cumack_set_flag, m_size;
6071 	uint32_t str_seq;
6072 	struct sctp_stream_in *strm;
6073 	struct sctp_tmit_chunk *chk, *at;
6074 	struct sctp_queued_to_read *ctl, *sv;
6075 
6076 	cumack_set_flag = 0;
6077 	asoc = &stcb->asoc;
6078 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
6079 		SCTPDBG(SCTP_DEBUG_INDATA1,
6080 		    "Bad size too small/big fwd-tsn\n");
6081 		return;
6082 	}
6083 	m_size = (stcb->asoc.mapping_array_size << 3);
6084 	/*************************************************************/
6085 	/* 1. Here we update local cumTSN and shift the bitmap array */
6086 	/*************************************************************/
6087 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
6088 
6089 	if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
6090 	    asoc->cumulative_tsn == new_cum_tsn) {
6091 		/* Already got there ... */
6092 		return;
6093 	}
6094 	if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
6095 	    MAX_TSN)) {
6096 		asoc->highest_tsn_inside_map = new_cum_tsn;
6097 		/* EY nr_mapping_array version of the above */
6098 		/*
6099 		 * if(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
6100 		 * asoc->peer_supports_nr_sack)
6101 		 */
6102 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
6103 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
6104 			sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
6105 		}
6106 	}
6107 	/*
6108 	 * now we know the new TSN is more advanced, let's find the actual
6109 	 * gap
6110 	 */
6111 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
6112 	if (gap >= m_size) {
6113 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
6114 			sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
6115 		}
6116 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
6117 			struct mbuf *oper;
6118 
6119 			/*
6120 			 * out of range (of single byte chunks in the rwnd I
6121 			 * give out). This must be an attacker.
6122 			 */
6123 			*abort_flag = 1;
6124 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
6125 			    0, M_DONTWAIT, 1, MT_DATA);
6126 			if (oper) {
6127 				struct sctp_paramhdr *ph;
6128 				uint32_t *ippp;
6129 
6130 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
6131 				    (sizeof(uint32_t) * 3);
6132 				ph = mtod(oper, struct sctp_paramhdr *);
6133 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
6134 				ph->param_length = htons(SCTP_BUF_LEN(oper));
6135 				ippp = (uint32_t *) (ph + 1);
6136 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
6137 				ippp++;
6138 				*ippp = asoc->highest_tsn_inside_map;
6139 				ippp++;
6140 				*ippp = new_cum_tsn;
6141 			}
6142 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
6143 			sctp_abort_an_association(stcb->sctp_ep, stcb,
6144 			    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
6145 			return;
6146 		}
6147 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
6148 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
6149 		cumack_set_flag = 1;
6150 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
6151 		asoc->cumulative_tsn = asoc->highest_tsn_inside_map = new_cum_tsn;
6152 		/* EY - nr_sack: nr_mapping_array version of the above */
6153 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
6154 			memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
6155 			asoc->nr_mapping_array_base_tsn = new_cum_tsn + 1;
6156 			asoc->highest_tsn_inside_nr_map = new_cum_tsn;
6157 			if (asoc->nr_mapping_array_size != asoc->mapping_array_size) {
6158 				/*
6159 				 * printf("IN sctp_handle_forward_tsn:
6160 				 * Something is wrong the size of" "map and
6161 				 * nr_map should be equal!")
6162 				 */ ;
6163 			}
6164 		}
6165 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
6166 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
6167 		}
6168 		asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
6169 	} else {
6170 		SCTP_TCB_LOCK_ASSERT(stcb);
6171 		for (i = 0; i <= gap; i++) {
6172 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack
6173 			    && SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
6174 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
6175 			} else {
6176 				SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
6177 			}
6178 		}
6179 		/*
6180 		 * Now after marking all, slide thing forward but no sack
6181 		 * please.
6182 		 */
6183 		sctp_sack_check(stcb, 0, 0, abort_flag);
6184 		if (*abort_flag)
6185 			return;
6186 	}
6187 	/*************************************************************/
6188 	/* 2. Clear up re-assembly queue                             */
6189 	/*************************************************************/
6190 	/*
6191 	 * First service it if pd-api is up, just in case we can progress it
6192 	 * forward
6193 	 */
6194 	if (asoc->fragmented_delivery_inprogress) {
6195 		sctp_service_reassembly(stcb, asoc);
6196 	}
6197 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
6198 		/* For each one on here see if we need to toss it */
6199 		/*
6200 		 * For now large messages held on the reasmqueue that are
6201 		 * complete will be tossed too. We could in theory do more
6202 		 * work to spin through and stop after dumping one msg aka
6203 		 * seeing the start of a new msg at the head, and call the
6204 		 * delivery function... to see if it can be delivered... But
6205 		 * for now we just dump everything on the queue.
6206 		 */
6207 		chk = TAILQ_FIRST(&asoc->reasmqueue);
6208 		while (chk) {
6209 			at = TAILQ_NEXT(chk, sctp_next);
6210 			if ((compare_with_wrap(new_cum_tsn,
6211 			    chk->rec.data.TSN_seq, MAX_TSN)) ||
6212 			    (new_cum_tsn == chk->rec.data.TSN_seq)) {
6213 				/* It needs to be tossed */
6214 				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
6215 				if (compare_with_wrap(chk->rec.data.TSN_seq,
6216 				    asoc->tsn_last_delivered, MAX_TSN)) {
6217 					asoc->tsn_last_delivered =
6218 					    chk->rec.data.TSN_seq;
6219 					asoc->str_of_pdapi =
6220 					    chk->rec.data.stream_number;
6221 					asoc->ssn_of_pdapi =
6222 					    chk->rec.data.stream_seq;
6223 					asoc->fragment_flags =
6224 					    chk->rec.data.rcv_flags;
6225 				}
6226 				asoc->size_on_reasm_queue -= chk->send_size;
6227 				sctp_ucount_decr(asoc->cnt_on_reasm_queue);
6228 
6229 				/* Clear up any stream problem */
6230 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
6231 				    SCTP_DATA_UNORDERED &&
6232 				    (compare_with_wrap(chk->rec.data.stream_seq,
6233 				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
6234 				    MAX_SEQ))) {
6235 					/*
6236 					 * We must dump forward this streams
6237 					 * sequence number if the chunk is
6238 					 * not unordered that is being
6239 					 * skipped. There is a chance that
6240 					 * if the peer does not include the
6241 					 * last fragment in its FWD-TSN we
6242 					 * WILL have a problem here since
6243 					 * you would have a partial chunk in
6244 					 * queue that may not be
6245 					 * deliverable. Also if a Partial
6246 					 * delivery API as started the user
6247 					 * may get a partial chunk. The next
6248 					 * read returning a new chunk...
6249 					 * really ugly but I see no way
6250 					 * around it! Maybe a notify??
6251 					 */
6252 					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
6253 					    chk->rec.data.stream_seq;
6254 				}
6255 				if (chk->data) {
6256 					sctp_m_freem(chk->data);
6257 					chk->data = NULL;
6258 				}
6259 				sctp_free_a_chunk(stcb, chk);
6260 			} else {
6261 				/*
6262 				 * Ok we have gone beyond the end of the
6263 				 * fwd-tsn's mark.
6264 				 */
6265 				break;
6266 			}
6267 			chk = at;
6268 		}
6269 	}
6270 	/*******************************************************/
6271 	/* 3. Update the PR-stream re-ordering queues and fix  */
6272 	/* delivery issues as needed.                       */
6273 	/*******************************************************/
6274 	fwd_sz -= sizeof(*fwd);
6275 	if (m && fwd_sz) {
6276 		/* New method. */
6277 		unsigned int num_str;
6278 		struct sctp_strseq *stseq, strseqbuf;
6279 
6280 		offset += sizeof(*fwd);
6281 
6282 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
6283 		num_str = fwd_sz / sizeof(struct sctp_strseq);
6284 		for (i = 0; i < num_str; i++) {
6285 			uint16_t st;
6286 
6287 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
6288 			    sizeof(struct sctp_strseq),
6289 			    (uint8_t *) & strseqbuf);
6290 			offset += sizeof(struct sctp_strseq);
6291 			if (stseq == NULL) {
6292 				break;
6293 			}
6294 			/* Convert */
6295 			st = ntohs(stseq->stream);
6296 			stseq->stream = st;
6297 			st = ntohs(stseq->sequence);
6298 			stseq->sequence = st;
6299 
6300 			/* now process */
6301 
6302 			/*
6303 			 * Ok we now look for the stream/seq on the read
6304 			 * queue where its not all delivered. If we find it
6305 			 * we transmute the read entry into a PDI_ABORTED.
6306 			 */
6307 			if (stseq->stream >= asoc->streamincnt) {
6308 				/* screwed up streams, stop!  */
6309 				break;
6310 			}
6311 			if ((asoc->str_of_pdapi == stseq->stream) &&
6312 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
6313 				/*
6314 				 * If this is the one we were partially
6315 				 * delivering now then we no longer are.
6316 				 * Note this will change with the reassembly
6317 				 * re-write.
6318 				 */
6319 				asoc->fragmented_delivery_inprogress = 0;
6320 			}
6321 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
6322 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
6323 				if ((ctl->sinfo_stream == stseq->stream) &&
6324 				    (ctl->sinfo_ssn == stseq->sequence)) {
6325 					str_seq = (stseq->stream << 16) | stseq->sequence;
6326 					ctl->end_added = 1;
6327 					ctl->pdapi_aborted = 1;
6328 					sv = stcb->asoc.control_pdapi;
6329 					stcb->asoc.control_pdapi = ctl;
6330 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
6331 					    stcb,
6332 					    SCTP_PARTIAL_DELIVERY_ABORTED,
6333 					    (void *)&str_seq,
6334 					    SCTP_SO_NOT_LOCKED);
6335 					stcb->asoc.control_pdapi = sv;
6336 					break;
6337 				} else if ((ctl->sinfo_stream == stseq->stream) &&
6338 				    (compare_with_wrap(ctl->sinfo_ssn, stseq->sequence, MAX_SEQ))) {
6339 					/* We are past our victim SSN */
6340 					break;
6341 				}
6342 			}
6343 			strm = &asoc->strmin[stseq->stream];
6344 			if (compare_with_wrap(stseq->sequence,
6345 			    strm->last_sequence_delivered, MAX_SEQ)) {
6346 				/* Update the sequence number */
6347 				strm->last_sequence_delivered =
6348 				    stseq->sequence;
6349 			}
6350 			/* now kick the stream the new way */
6351 			/* sa_ignore NO_NULL_CHK */
6352 			sctp_kick_prsctp_reorder_queue(stcb, strm);
6353 		}
6354 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
6355 	}
6356 	if (TAILQ_FIRST(&asoc->reasmqueue)) {
6357 		/* now lets kick out and check for more fragmented delivery */
6358 		/* sa_ignore NO_NULL_CHK */
6359 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
6360 	}
6361 }
6362 
6363 /* EY fully identical to sctp_express_handle_sack, duplicated for only naming convention */
6364 void
6365 sctp_express_handle_nr_sack(struct sctp_tcb *stcb, uint32_t cumack,
6366     uint32_t rwnd, int nonce_sum_flag, int *abort_now)
6367 {
6368 	struct sctp_nets *net;
6369 	struct sctp_association *asoc;
6370 	struct sctp_tmit_chunk *tp1, *tp2;
6371 	uint32_t old_rwnd;
6372 	int win_probe_recovery = 0;
6373 	int win_probe_recovered = 0;
6374 	int j, done_once = 0;
6375 
6376 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
6377 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
6378 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
6379 	}
6380 	SCTP_TCB_LOCK_ASSERT(stcb);
6381 #ifdef SCTP_ASOCLOG_OF_TSNS
6382 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
6383 	stcb->asoc.cumack_log_at++;
6384 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
6385 		stcb->asoc.cumack_log_at = 0;
6386 	}
6387 #endif
6388 	asoc = &stcb->asoc;
6389 	old_rwnd = asoc->peers_rwnd;
6390 	if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
6391 		/* old ack */
6392 		return;
6393 	} else if (asoc->last_acked_seq == cumack) {
6394 		/* Window update sack */
6395 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
6396 		    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
6397 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6398 			/* SWS sender side engages */
6399 			asoc->peers_rwnd = 0;
6400 		}
6401 		if (asoc->peers_rwnd > old_rwnd) {
6402 			goto again;
6403 		}
6404 		return;
6405 	}
6406 	/* First setup for CC stuff */
6407 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6408 		net->prev_cwnd = net->cwnd;
6409 		net->net_ack = 0;
6410 		net->net_ack2 = 0;
6411 
6412 		/*
6413 		 * CMT: Reset CUC and Fast recovery algo variables before
6414 		 * SACK processing
6415 		 */
6416 		net->new_pseudo_cumack = 0;
6417 		net->will_exit_fast_recovery = 0;
6418 	}
6419 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
6420 		uint32_t send_s;
6421 
6422 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
6423 			tp1 = TAILQ_LAST(&asoc->sent_queue,
6424 			    sctpchunk_listhead);
6425 			send_s = tp1->rec.data.TSN_seq + 1;
6426 		} else {
6427 			send_s = asoc->sending_seq;
6428 		}
6429 		if ((cumack == send_s) ||
6430 		    compare_with_wrap(cumack, send_s, MAX_TSN)) {
6431 #ifndef INVARIANTS
6432 			struct mbuf *oper;
6433 
6434 #endif
6435 #ifdef INVARIANTS
6436 			panic("Impossible sack 1");
6437 #else
6438 			*abort_now = 1;
6439 			/* XXX */
6440 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
6441 			    0, M_DONTWAIT, 1, MT_DATA);
6442 			if (oper) {
6443 				struct sctp_paramhdr *ph;
6444 				uint32_t *ippp;
6445 
6446 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
6447 				    sizeof(uint32_t);
6448 				ph = mtod(oper, struct sctp_paramhdr *);
6449 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
6450 				ph->param_length = htons(SCTP_BUF_LEN(oper));
6451 				ippp = (uint32_t *) (ph + 1);
6452 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
6453 			}
6454 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
6455 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
6456 			return;
6457 #endif
6458 		}
6459 	}
6460 	asoc->this_sack_highest_gap = cumack;
6461 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
6462 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
6463 		    stcb->asoc.overall_error_count,
6464 		    0,
6465 		    SCTP_FROM_SCTP_INDATA,
6466 		    __LINE__);
6467 	}
6468 	stcb->asoc.overall_error_count = 0;
6469 	if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
6470 		/* process the new consecutive TSN first */
6471 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
6472 		while (tp1) {
6473 			tp2 = TAILQ_NEXT(tp1, sctp_next);
6474 			if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
6475 			    MAX_TSN) ||
6476 			    cumack == tp1->rec.data.TSN_seq) {
6477 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
6478 					printf("Warning, an unsent is now acked?\n");
6479 				}
6480 				/*
6481 				 * ECN Nonce: Add the nonce to the sender's
6482 				 * nonce sum
6483 				 */
6484 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
6485 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
6486 					/*
6487 					 * If it is less than ACKED, it is
6488 					 * now no-longer in flight. Higher
6489 					 * values may occur during marking
6490 					 */
6491 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6492 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
6493 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
6494 							    tp1->whoTo->flight_size,
6495 							    tp1->book_size,
6496 							    (uintptr_t) tp1->whoTo,
6497 							    tp1->rec.data.TSN_seq);
6498 						}
6499 						sctp_flight_size_decrease(tp1);
6500 						/* sa_ignore NO_NULL_CHK */
6501 						sctp_total_flight_decrease(stcb, tp1);
6502 					}
6503 					tp1->whoTo->net_ack += tp1->send_size;
6504 					if (tp1->snd_count < 2) {
6505 						/*
6506 						 * True non-retransmited
6507 						 * chunk
6508 						 */
6509 						tp1->whoTo->net_ack2 +=
6510 						    tp1->send_size;
6511 
6512 						/* update RTO too? */
6513 						if (tp1->do_rtt) {
6514 							tp1->whoTo->RTO =
6515 							/*
6516 							 * sa_ignore
6517 							 * NO_NULL_CHK
6518 							 */
6519 							    sctp_calculate_rto(stcb,
6520 							    asoc, tp1->whoTo,
6521 							    &tp1->sent_rcv_time,
6522 							    sctp_align_safe_nocopy);
6523 							tp1->do_rtt = 0;
6524 						}
6525 					}
6526 					/*
6527 					 * CMT: CUCv2 algorithm. From the
6528 					 * cumack'd TSNs, for each TSN being
6529 					 * acked for the first time, set the
6530 					 * following variables for the
6531 					 * corresp destination.
6532 					 * new_pseudo_cumack will trigger a
6533 					 * cwnd update.
6534 					 * find_(rtx_)pseudo_cumack will
6535 					 * trigger search for the next
6536 					 * expected (rtx-)pseudo-cumack.
6537 					 */
6538 					tp1->whoTo->new_pseudo_cumack = 1;
6539 					tp1->whoTo->find_pseudo_cumack = 1;
6540 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
6541 
6542 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
6543 						/* sa_ignore NO_NULL_CHK */
6544 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
6545 					}
6546 				}
6547 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
6548 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
6549 				}
6550 				if (tp1->rec.data.chunk_was_revoked) {
6551 					/* deflate the cwnd */
6552 					tp1->whoTo->cwnd -= tp1->book_size;
6553 					tp1->rec.data.chunk_was_revoked = 0;
6554 				}
6555 				tp1->sent = SCTP_DATAGRAM_ACKED;
6556 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
6557 				if (tp1->data) {
6558 					/* sa_ignore NO_NULL_CHK */
6559 					sctp_free_bufspace(stcb, asoc, tp1, 1);
6560 					sctp_m_freem(tp1->data);
6561 				}
6562 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
6563 					sctp_log_sack(asoc->last_acked_seq,
6564 					    cumack,
6565 					    tp1->rec.data.TSN_seq,
6566 					    0,
6567 					    0,
6568 					    SCTP_LOG_FREE_SENT);
6569 				}
6570 				tp1->data = NULL;
6571 				asoc->sent_queue_cnt--;
6572 				sctp_free_a_chunk(stcb, tp1);
6573 				tp1 = tp2;
6574 			} else {
6575 				break;
6576 			}
6577 		}
6578 
6579 	}
6580 	/* sa_ignore NO_NULL_CHK */
6581 	if (stcb->sctp_socket) {
6582 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6583 		struct socket *so;
6584 
6585 #endif
6586 
6587 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
6588 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
6589 			/* sa_ignore NO_NULL_CHK */
6590 			sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
6591 		}
6592 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6593 		so = SCTP_INP_SO(stcb->sctp_ep);
6594 		atomic_add_int(&stcb->asoc.refcnt, 1);
6595 		SCTP_TCB_UNLOCK(stcb);
6596 		SCTP_SOCKET_LOCK(so, 1);
6597 		SCTP_TCB_LOCK(stcb);
6598 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
6599 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
6600 			/* assoc was freed while we were unlocked */
6601 			SCTP_SOCKET_UNLOCK(so, 1);
6602 			return;
6603 		}
6604 #endif
6605 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
6606 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6607 		SCTP_SOCKET_UNLOCK(so, 1);
6608 #endif
6609 	} else {
6610 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
6611 			sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
6612 		}
6613 	}
6614 
6615 	/* JRS - Use the congestion control given in the CC module */
6616 	if (asoc->last_acked_seq != cumack)
6617 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
6618 
6619 	asoc->last_acked_seq = cumack;
6620 
6621 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
6622 		/* nothing left in-flight */
6623 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6624 			net->flight_size = 0;
6625 			net->partial_bytes_acked = 0;
6626 		}
6627 		asoc->total_flight = 0;
6628 		asoc->total_flight_count = 0;
6629 	}
6630 	/* Fix up the a-p-a-p for future PR-SCTP sends */
6631 	if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
6632 		asoc->advanced_peer_ack_point = cumack;
6633 	}
6634 	/* ECN Nonce updates */
6635 	if (asoc->ecn_nonce_allowed) {
6636 		if (asoc->nonce_sum_check) {
6637 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
6638 				if (asoc->nonce_wait_for_ecne == 0) {
6639 					struct sctp_tmit_chunk *lchk;
6640 
6641 					lchk = TAILQ_FIRST(&asoc->send_queue);
6642 					asoc->nonce_wait_for_ecne = 1;
6643 					if (lchk) {
6644 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
6645 					} else {
6646 						asoc->nonce_wait_tsn = asoc->sending_seq;
6647 					}
6648 				} else {
6649 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
6650 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
6651 						/*
6652 						 * Misbehaving peer. We need
6653 						 * to react to this guy
6654 						 */
6655 						asoc->ecn_allowed = 0;
6656 						asoc->ecn_nonce_allowed = 0;
6657 					}
6658 				}
6659 			}
6660 		} else {
6661 			/* See if Resynchronization Possible */
6662 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
6663 				asoc->nonce_sum_check = 1;
6664 				/*
6665 				 * now we must calculate what the base is.
6666 				 * We do this based on two things, we know
6667 				 * the total's for all the segments
6668 				 * gap-acked in the SACK (none), We also
6669 				 * know the SACK's nonce sum, its in
6670 				 * nonce_sum_flag. So we can build a truth
6671 				 * table to back-calculate the new value of
6672 				 * asoc->nonce_sum_expect_base:
6673 				 *
6674 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
6675 				 * 1                    0 1 0 1 1 1 1 0
6676 				 */
6677 				asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
6678 			}
6679 		}
6680 	}
6681 	/* RWND update */
6682 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
6683 	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
6684 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6685 		/* SWS sender side engages */
6686 		asoc->peers_rwnd = 0;
6687 	}
6688 	if (asoc->peers_rwnd > old_rwnd) {
6689 		win_probe_recovery = 1;
6690 	}
6691 	/* Now assure a timer where data is queued at */
6692 again:
6693 	j = 0;
6694 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6695 		int to_ticks;
6696 
6697 		if (win_probe_recovery && (net->window_probe)) {
6698 			win_probe_recovered = 1;
6699 			/*
6700 			 * Find first chunk that was used with window probe
6701 			 * and clear the sent
6702 			 */
6703 			/* sa_ignore FREED_MEMORY */
6704 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
6705 				if (tp1->window_probe) {
6706 					/* move back to data send queue */
6707 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
6708 					break;
6709 				}
6710 			}
6711 		}
6712 		if (net->RTO == 0) {
6713 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
6714 		} else {
6715 			to_ticks = MSEC_TO_TICKS(net->RTO);
6716 		}
6717 		if (net->flight_size) {
6718 
6719 			j++;
6720 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
6721 			    sctp_timeout_handler, &net->rxt_timer);
6722 			if (net->window_probe) {
6723 				net->window_probe = 0;
6724 			}
6725 		} else {
6726 			if (net->window_probe) {
6727 				/*
6728 				 * In window probes we must assure a timer
6729 				 * is still running there
6730 				 */
6731 				net->window_probe = 0;
6732 				(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
6733 				    sctp_timeout_handler, &net->rxt_timer);
6734 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
6735 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
6736 				    stcb, net,
6737 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
6738 			}
6739 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
6740 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
6741 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
6742 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
6743 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
6744 				}
6745 			}
6746 		}
6747 	}
6748 	if ((j == 0) &&
6749 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
6750 	    (asoc->sent_queue_retran_cnt == 0) &&
6751 	    (win_probe_recovered == 0) &&
6752 	    (done_once == 0)) {
6753 		/*
6754 		 * huh, this should not happen unless all packets are
6755 		 * PR-SCTP and marked to skip of course.
6756 		 */
6757 		if (sctp_fs_audit(asoc)) {
6758 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6759 				net->flight_size = 0;
6760 			}
6761 			asoc->total_flight = 0;
6762 			asoc->total_flight_count = 0;
6763 			asoc->sent_queue_retran_cnt = 0;
6764 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
6765 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6766 					sctp_flight_size_increase(tp1);
6767 					sctp_total_flight_increase(stcb, tp1);
6768 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
6769 					asoc->sent_queue_retran_cnt++;
6770 				}
6771 			}
6772 		}
6773 		done_once = 1;
6774 		goto again;
6775 	}
6776 	/**********************************/
6777 	/* Now what about shutdown issues */
6778 	/**********************************/
6779 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
6780 		/* nothing left on sendqueue.. consider done */
6781 		/* clean up */
6782 		if ((asoc->stream_queue_cnt == 1) &&
6783 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
6784 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
6785 		    (asoc->locked_on_sending)
6786 		    ) {
6787 			struct sctp_stream_queue_pending *sp;
6788 
6789 			/*
6790 			 * I may be in a state where we got all across.. but
6791 			 * cannot write more due to a shutdown... we abort
6792 			 * since the user did not indicate EOR in this case.
6793 			 * The sp will be cleaned during free of the asoc.
6794 			 */
6795 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
6796 			    sctp_streamhead);
6797 			if ((sp) && (sp->length == 0)) {
6798 				/* Let cleanup code purge it */
6799 				if (sp->msg_is_complete) {
6800 					asoc->stream_queue_cnt--;
6801 				} else {
6802 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6803 					asoc->locked_on_sending = NULL;
6804 					asoc->stream_queue_cnt--;
6805 				}
6806 			}
6807 		}
6808 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
6809 		    (asoc->stream_queue_cnt == 0)) {
6810 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
6811 				/* Need to abort here */
6812 				struct mbuf *oper;
6813 
6814 		abort_out_now:
6815 				*abort_now = 1;
6816 				/* XXX */
6817 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
6818 				    0, M_DONTWAIT, 1, MT_DATA);
6819 				if (oper) {
6820 					struct sctp_paramhdr *ph;
6821 					uint32_t *ippp;
6822 
6823 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
6824 					    sizeof(uint32_t);
6825 					ph = mtod(oper, struct sctp_paramhdr *);
6826 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6827 					ph->param_length = htons(SCTP_BUF_LEN(oper));
6828 					ippp = (uint32_t *) (ph + 1);
6829 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
6830 				}
6831 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
6832 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
6833 			} else {
6834 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
6835 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
6836 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6837 				}
6838 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6839 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6840 				sctp_stop_timers_for_shutdown(stcb);
6841 				sctp_send_shutdown(stcb,
6842 				    stcb->asoc.primary_destination);
6843 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
6844 				    stcb->sctp_ep, stcb, asoc->primary_destination);
6845 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
6846 				    stcb->sctp_ep, stcb, asoc->primary_destination);
6847 			}
6848 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
6849 		    (asoc->stream_queue_cnt == 0)) {
6850 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
6851 				goto abort_out_now;
6852 			}
6853 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6854 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
6855 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6856 			sctp_send_shutdown_ack(stcb,
6857 			    stcb->asoc.primary_destination);
6858 
6859 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
6860 			    stcb->sctp_ep, stcb, asoc->primary_destination);
6861 		}
6862 	}
6863 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
6864 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
6865 		    rwnd,
6866 		    stcb->asoc.peers_rwnd,
6867 		    stcb->asoc.total_flight,
6868 		    stcb->asoc.total_output_queue_size);
6869 	}
6870 }
6871 
6872 /* EY! nr_sack version of sctp_handle_segments, nr-gapped TSNs get removed from RtxQ in this method*/
6873 static void
6874 sctp_handle_nr_sack_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
6875     struct sctp_nr_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
6876     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
6877     uint32_t num_seg, uint32_t num_nr_seg, int *ecn_seg_sums)
6878 {
6879 	/************************************************/
6880 	/* process fragments and update sendqueue        */
6881 	/************************************************/
6882 	struct sctp_nr_sack *nr_sack;
6883 	struct sctp_gap_ack_block *frag, block;
6884 	struct sctp_nr_gap_ack_block *nr_frag, nr_block;
6885 	struct sctp_tmit_chunk *tp1;
6886 	uint32_t i;
6887 	int wake_him = 0;
6888 	int num_frs = 0;
6889 
6890 	uint16_t frag_strt, frag_end, primary_flag_set;
6891 	uint16_t nr_frag_strt, nr_frag_end;
6892 
6893 	uint32_t last_frag_high;
6894 	uint32_t last_nr_frag_high;
6895 
6896 	/*
6897 	 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
6898 	 */
6899 	if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
6900 		primary_flag_set = 1;
6901 	} else {
6902 		primary_flag_set = 0;
6903 	}
6904 	nr_sack = &ch->nr_sack;
6905 
6906 	/*
6907 	 * EY! - I will process nr_gaps similarly,by going to this position
6908 	 * again if All bit is set
6909 	 */
6910 	frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
6911 	    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
6912 	*offset += sizeof(block);
6913 	if (frag == NULL) {
6914 		return;
6915 	}
6916 	tp1 = NULL;
6917 	last_frag_high = 0;
6918 	for (i = 0; i < num_seg; i++) {
6919 		frag_strt = ntohs(frag->start);
6920 		frag_end = ntohs(frag->end);
6921 		/* some sanity checks on the fargment offsets */
6922 		if (frag_strt > frag_end) {
6923 			/* this one is malformed, skip */
6924 			frag++;
6925 			continue;
6926 		}
6927 		if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
6928 		    MAX_TSN))
6929 			*biggest_tsn_acked = frag_end + last_tsn;
6930 
6931 		/* mark acked dgs and find out the highestTSN being acked */
6932 		if (tp1 == NULL) {
6933 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
6934 
6935 			/* save the locations of the last frags */
6936 			last_frag_high = frag_end + last_tsn;
6937 		} else {
6938 			/*
6939 			 * now lets see if we need to reset the queue due to
6940 			 * a out-of-order SACK fragment
6941 			 */
6942 			if (compare_with_wrap(frag_strt + last_tsn,
6943 			    last_frag_high, MAX_TSN)) {
6944 				/*
6945 				 * if the new frag starts after the last TSN
6946 				 * frag covered, we are ok and this one is
6947 				 * beyond the last one
6948 				 */
6949 				;
6950 			} else {
6951 				/*
6952 				 * ok, they have reset us, so we need to
6953 				 * reset the queue this will cause extra
6954 				 * hunting but hey, they chose the
6955 				 * performance hit when they failed to order
6956 				 * there gaps..
6957 				 */
6958 				tp1 = TAILQ_FIRST(&asoc->sent_queue);
6959 			}
6960 			last_frag_high = frag_end + last_tsn;
6961 		}
6962 		sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
6963 		    0, &num_frs, biggest_newly_acked_tsn,
6964 		    this_sack_lowest_newack, ecn_seg_sums);
6965 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
6966 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
6967 		*offset += sizeof(block);
6968 		if (frag == NULL) {
6969 			break;
6970 		}
6971 	}
6972 
6973 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
6974 		if (num_frs)
6975 			sctp_log_fr(*biggest_tsn_acked,
6976 			    *biggest_newly_acked_tsn,
6977 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
6978 	}
6979 	nr_frag = (struct sctp_nr_gap_ack_block *)sctp_m_getptr(m, *offset,
6980 	    sizeof(struct sctp_nr_gap_ack_block), (uint8_t *) & nr_block);
6981 	*offset += sizeof(nr_block);
6982 
6983 
6984 
6985 	if (nr_frag == NULL) {
6986 		return;
6987 	}
6988 	tp1 = NULL;
6989 	last_nr_frag_high = 0;
6990 	/* Reset to beginning for the nr_sack section */
6991 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
6992 
6993 	for (i = 0; i < num_nr_seg; i++) {
6994 
6995 		nr_frag_strt = ntohs(nr_frag->start);
6996 		nr_frag_end = ntohs(nr_frag->end);
6997 
6998 		/* some sanity checks on the nr fargment offsets */
6999 		if (nr_frag_strt > nr_frag_end) {
7000 			/* this one is malformed, skip */
7001 			nr_frag++;
7002 			continue;
7003 		}
7004 		/* mark acked dgs and find out the highestTSN being acked */
7005 		if (tp1 == NULL) {
7006 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
7007 
7008 			/* save the locations of the last frags */
7009 			last_nr_frag_high = nr_frag_end + last_tsn;
7010 		} else {
7011 			/*
7012 			 * now lets see if we need to reset the queue due to
7013 			 * a out-of-order SACK fragment
7014 			 */
7015 			if (compare_with_wrap(nr_frag_strt + last_tsn,
7016 			    last_nr_frag_high, MAX_TSN)) {
7017 				/*
7018 				 * if the new frag starts after the last TSN
7019 				 * frag covered, we are ok and this one is
7020 				 * beyond the last one
7021 				 */
7022 				;
7023 			} else {
7024 				/*
7025 				 * ok, they have reset us, so we need to
7026 				 * reset the queue this will cause extra
7027 				 * hunting but hey, they chose the
7028 				 * performance hit when they failed to order
7029 				 * there gaps..
7030 				 */
7031 				tp1 = TAILQ_FIRST(&asoc->sent_queue);
7032 			}
7033 			last_nr_frag_high = nr_frag_end + last_tsn;
7034 		}
7035 		num_frs = 0;
7036 		wake_him = sctp_process_segment_range(stcb, &tp1, last_tsn,
7037 		    nr_frag_strt, nr_frag_end, 1,
7038 		    &num_frs, biggest_newly_acked_tsn,
7039 		    this_sack_lowest_newack, ecn_seg_sums);
7040 
7041 		nr_frag = (struct sctp_nr_gap_ack_block *)sctp_m_getptr(m, *offset,
7042 		    sizeof(struct sctp_nr_gap_ack_block),
7043 		    (uint8_t *) & nr_block);
7044 		*offset += sizeof(nr_block);
7045 		if (nr_frag == NULL) {
7046 			break;
7047 		}
7048 	}
7049 
7050 	/*
7051 	 * EY- wake up the socket if things have been removed from the sent
7052 	 * queue
7053 	 */
7054 	if ((wake_him) && (stcb->sctp_socket)) {
7055 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7056 		struct socket *so;
7057 
7058 #endif
7059 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
7060 		/*
7061 		 * if (SCTP_BASE_SYSCTL(sctp_logging_level) &
7062 		 * SCTP_WAKE_LOGGING_ENABLE) { sctp_wakeup_log(stcb,
7063 		 * cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);}
7064 		 */
7065 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7066 		so = SCTP_INP_SO(stcb->sctp_ep);
7067 		atomic_add_int(&stcb->asoc.refcnt, 1);
7068 		SCTP_TCB_UNLOCK(stcb);
7069 		SCTP_SOCKET_LOCK(so, 1);
7070 		SCTP_TCB_LOCK(stcb);
7071 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
7072 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7073 			/* assoc was freed while we were unlocked */
7074 			SCTP_SOCKET_UNLOCK(so, 1);
7075 			return;
7076 		}
7077 #endif
7078 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
7079 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7080 		SCTP_SOCKET_UNLOCK(so, 1);
7081 #endif
7082 	}			/* else { if
7083 				 * (SCTP_BASE_SYSCTL(sctp_logging_level) &
7084 				 * SCTP_WAKE_LOGGING_ENABLE) {
7085 				 * sctp_wakeup_log(stcb, cum_ack, wake_him,
7086 				 * SCTP_NOWAKE_FROM_SACK); } } */
7087 }
7088 
7089 /* EY- nr_sack */
7090 /* Identifies the non-renegable tsns that are revoked*/
7091 static void
7092 sctp_check_for_nr_revoked(struct sctp_tcb *stcb,
7093     struct sctp_association *asoc, uint32_t cumack,
7094     u_long biggest_tsn_acked)
7095 {
7096 	struct sctp_tmit_chunk *tp1;
7097 
7098 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
7099 	while (tp1) {
7100 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
7101 		    MAX_TSN)) {
7102 			/*
7103 			 * ok this guy is either ACK or MARKED. If it is
7104 			 * ACKED it has been previously acked but not this
7105 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
7106 			 * again.
7107 			 */
7108 			if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
7109 			    MAX_TSN))
7110 				break;
7111 
7112 
7113 			if (tp1->sent == SCTP_DATAGRAM_NR_ACKED) {
7114 				/*
7115 				 * EY! a non-renegable TSN is revoked, need
7116 				 * to abort the association
7117 				 */
7118 				/*
7119 				 * EY TODO: put in the code to abort the
7120 				 * assoc.
7121 				 */
7122 				return;
7123 			} else if (tp1->sent == SCTP_DATAGRAM_NR_MARKED) {
7124 				/* it has been re-acked in this SACK */
7125 				tp1->sent = SCTP_DATAGRAM_NR_ACKED;
7126 			}
7127 		}
7128 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
7129 			break;
7130 		tp1 = TAILQ_NEXT(tp1, sctp_next);
7131 	}
7132 }
7133 
7134 /* EY! nr_sack version of sctp_handle_sack, nr_gap_ack processing should be added to this method*/
7135 void
7136 sctp_handle_nr_sack(struct mbuf *m, int offset,
7137     struct sctp_nr_sack_chunk *ch, struct sctp_tcb *stcb,
7138     struct sctp_nets *net_from, int *abort_now, int nr_sack_len, uint32_t rwnd)
7139 {
7140 	struct sctp_association *asoc;
7141 
7142 	/* EY sack */
7143 	struct sctp_nr_sack *nr_sack;
7144 	struct sctp_tmit_chunk *tp1, *tp2;
7145 	uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
7146 	         this_sack_lowest_newack;
7147 	uint32_t sav_cum_ack;
7148 
7149 	/* EY num_seg */
7150 	uint16_t num_seg, num_nr_seg, num_dup;
7151 	uint16_t wake_him = 0;
7152 	unsigned int nr_sack_length;
7153 	uint32_t send_s = 0;
7154 	long j;
7155 	int accum_moved = 0;
7156 	int will_exit_fast_recovery = 0;
7157 	uint32_t a_rwnd, old_rwnd;
7158 	int win_probe_recovery = 0;
7159 	int win_probe_recovered = 0;
7160 	struct sctp_nets *net = NULL;
7161 	int nonce_sum_flag, ecn_seg_sums = 0;
7162 	int done_once;
7163 	uint8_t reneged_all = 0;
7164 	uint8_t cmt_dac_flag;
7165 
7166 	/*
7167 	 * we take any chance we can to service our queues since we cannot
7168 	 * get awoken when the socket is read from :<
7169 	 */
7170 	/*
7171 	 * Now perform the actual SACK handling: 1) Verify that it is not an
7172 	 * old sack, if so discard. 2) If there is nothing left in the send
7173 	 * queue (cum-ack is equal to last acked) then you have a duplicate
7174 	 * too, update any rwnd change and verify no timers are running.
7175 	 * then return. 3) Process any new consequtive data i.e. cum-ack
7176 	 * moved process these first and note that it moved. 4) Process any
7177 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
7178 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
7179 	 * sync up flightsizes and things, stop all timers and also check
7180 	 * for shutdown_pending state. If so then go ahead and send off the
7181 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
7182 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
7183 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
7184 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
7185 	 * if in shutdown_recv state.
7186 	 */
7187 	SCTP_TCB_LOCK_ASSERT(stcb);
7188 	nr_sack = &ch->nr_sack;
7189 	/* CMT DAC algo */
7190 	this_sack_lowest_newack = 0;
7191 	j = 0;
7192 	nr_sack_length = (unsigned int)nr_sack_len;
7193 	/* ECN Nonce */
7194 	SCTP_STAT_INCR(sctps_slowpath_sack);
7195 	nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
7196 	cum_ack = last_tsn = ntohl(nr_sack->cum_tsn_ack);
7197 #ifdef SCTP_ASOCLOG_OF_TSNS
7198 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
7199 	stcb->asoc.cumack_log_at++;
7200 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
7201 		stcb->asoc.cumack_log_at = 0;
7202 	}
7203 #endif
7204 	num_seg = ntohs(nr_sack->num_gap_ack_blks);
7205 	num_nr_seg = ntohs(nr_sack->num_nr_gap_ack_blks);
7206 	a_rwnd = rwnd;
7207 
7208 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
7209 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
7210 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
7211 	}
7212 	/* CMT DAC algo */
7213 	cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
7214 	num_dup = ntohs(nr_sack->num_dup_tsns);
7215 
7216 	old_rwnd = stcb->asoc.peers_rwnd;
7217 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
7218 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
7219 		    stcb->asoc.overall_error_count,
7220 		    0,
7221 		    SCTP_FROM_SCTP_INDATA,
7222 		    __LINE__);
7223 	}
7224 	stcb->asoc.overall_error_count = 0;
7225 	asoc = &stcb->asoc;
7226 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7227 		sctp_log_sack(asoc->last_acked_seq,
7228 		    cum_ack,
7229 		    0,
7230 		    num_seg,
7231 		    num_dup,
7232 		    SCTP_LOG_NEW_SACK);
7233 	}
7234 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
7235 		int off_to_dup, iii;
7236 		uint32_t *dupdata, dblock;
7237 
7238 		off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) +
7239 		    (num_nr_seg * sizeof(struct sctp_nr_gap_ack_block)) + sizeof(struct sctp_nr_sack_chunk);
7240 		if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= nr_sack_length) {
7241 			dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
7242 			    sizeof(uint32_t), (uint8_t *) & dblock);
7243 			off_to_dup += sizeof(uint32_t);
7244 			if (dupdata) {
7245 				for (iii = 0; iii < num_dup; iii++) {
7246 					sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
7247 					dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
7248 					    sizeof(uint32_t), (uint8_t *) & dblock);
7249 					if (dupdata == NULL)
7250 						break;
7251 					off_to_dup += sizeof(uint32_t);
7252 				}
7253 			}
7254 		} else {
7255 			SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d nr_sack_len:%d num gaps:%d num nr_gaps:%d\n",
7256 			    off_to_dup, num_dup, nr_sack_length, num_seg, num_nr_seg);
7257 		}
7258 	}
7259 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
7260 		/* reality check */
7261 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
7262 			tp1 = TAILQ_LAST(&asoc->sent_queue,
7263 			    sctpchunk_listhead);
7264 			send_s = tp1->rec.data.TSN_seq + 1;
7265 		} else {
7266 			send_s = asoc->sending_seq;
7267 		}
7268 		if (cum_ack == send_s ||
7269 		    compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
7270 #ifndef INVARIANTS
7271 			struct mbuf *oper;
7272 
7273 #endif
7274 #ifdef INVARIANTS
7275 	hopeless_peer:
7276 			panic("Impossible sack 1");
7277 #else
7278 
7279 
7280 			/*
7281 			 * no way, we have not even sent this TSN out yet.
7282 			 * Peer is hopelessly messed up with us.
7283 			 */
7284 	hopeless_peer:
7285 			*abort_now = 1;
7286 			/* XXX */
7287 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
7288 			    0, M_DONTWAIT, 1, MT_DATA);
7289 			if (oper) {
7290 				struct sctp_paramhdr *ph;
7291 				uint32_t *ippp;
7292 
7293 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
7294 				    sizeof(uint32_t);
7295 				ph = mtod(oper, struct sctp_paramhdr *);
7296 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
7297 				ph->param_length = htons(SCTP_BUF_LEN(oper));
7298 				ippp = (uint32_t *) (ph + 1);
7299 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
7300 			}
7301 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
7302 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
7303 			return;
7304 #endif
7305 		}
7306 	}
7307 	/**********************/
7308 	/* 1) check the range */
7309 	/**********************/
7310 	if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
7311 		/* acking something behind */
7312 		return;
7313 	}
7314 	sav_cum_ack = asoc->last_acked_seq;
7315 
7316 	/* update the Rwnd of the peer */
7317 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
7318 	    TAILQ_EMPTY(&asoc->send_queue) &&
7319 	    (asoc->stream_queue_cnt == 0)
7320 	    ) {
7321 		/* nothing left on send/sent and strmq */
7322 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7323 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
7324 			    asoc->peers_rwnd, 0, 0, a_rwnd);
7325 		}
7326 		asoc->peers_rwnd = a_rwnd;
7327 		if (asoc->sent_queue_retran_cnt) {
7328 			asoc->sent_queue_retran_cnt = 0;
7329 		}
7330 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7331 			/* SWS sender side engages */
7332 			asoc->peers_rwnd = 0;
7333 		}
7334 		/* stop any timers */
7335 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7336 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7337 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
7338 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
7339 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
7340 					SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
7341 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
7342 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
7343 				}
7344 			}
7345 			net->partial_bytes_acked = 0;
7346 			net->flight_size = 0;
7347 		}
7348 		asoc->total_flight = 0;
7349 		asoc->total_flight_count = 0;
7350 		return;
7351 	}
7352 	/*
7353 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
7354 	 * things. The total byte count acked is tracked in netAckSz AND
7355 	 * netAck2 is used to track the total bytes acked that are un-
7356 	 * amibguious and were never retransmitted. We track these on a per
7357 	 * destination address basis.
7358 	 */
7359 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7360 		net->prev_cwnd = net->cwnd;
7361 		net->net_ack = 0;
7362 		net->net_ack2 = 0;
7363 
7364 		/*
7365 		 * CMT: Reset CUC and Fast recovery algo variables before
7366 		 * SACK processing
7367 		 */
7368 		net->new_pseudo_cumack = 0;
7369 		net->will_exit_fast_recovery = 0;
7370 	}
7371 	/* process the new consecutive TSN first */
7372 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
7373 	while (tp1) {
7374 		if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
7375 		    MAX_TSN) ||
7376 		    last_tsn == tp1->rec.data.TSN_seq) {
7377 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
7378 				/*
7379 				 * ECN Nonce: Add the nonce to the sender's
7380 				 * nonce sum
7381 				 */
7382 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
7383 				accum_moved = 1;
7384 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
7385 					/*
7386 					 * If it is less than ACKED, it is
7387 					 * now no-longer in flight. Higher
7388 					 * values may occur during marking
7389 					 */
7390 					if ((tp1->whoTo->dest_state &
7391 					    SCTP_ADDR_UNCONFIRMED) &&
7392 					    (tp1->snd_count < 2)) {
7393 						/*
7394 						 * If there was no retran
7395 						 * and the address is
7396 						 * un-confirmed and we sent
7397 						 * there and are now
7398 						 * sacked.. its confirmed,
7399 						 * mark it so.
7400 						 */
7401 						tp1->whoTo->dest_state &=
7402 						    ~SCTP_ADDR_UNCONFIRMED;
7403 					}
7404 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
7405 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7406 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
7407 							    tp1->whoTo->flight_size,
7408 							    tp1->book_size,
7409 							    (uintptr_t) tp1->whoTo,
7410 							    tp1->rec.data.TSN_seq);
7411 						}
7412 						sctp_flight_size_decrease(tp1);
7413 						sctp_total_flight_decrease(stcb, tp1);
7414 					}
7415 					tp1->whoTo->net_ack += tp1->send_size;
7416 
7417 					/* CMT SFR and DAC algos */
7418 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
7419 					tp1->whoTo->saw_newack = 1;
7420 
7421 					if (tp1->snd_count < 2) {
7422 						/*
7423 						 * True non-retransmited
7424 						 * chunk
7425 						 */
7426 						tp1->whoTo->net_ack2 +=
7427 						    tp1->send_size;
7428 
7429 						/* update RTO too? */
7430 						if (tp1->do_rtt) {
7431 							tp1->whoTo->RTO =
7432 							    sctp_calculate_rto(stcb,
7433 							    asoc, tp1->whoTo,
7434 							    &tp1->sent_rcv_time,
7435 							    sctp_align_safe_nocopy);
7436 							tp1->do_rtt = 0;
7437 						}
7438 					}
7439 					/*
7440 					 * CMT: CUCv2 algorithm. From the
7441 					 * cumack'd TSNs, for each TSN being
7442 					 * acked for the first time, set the
7443 					 * following variables for the
7444 					 * corresp destination.
7445 					 * new_pseudo_cumack will trigger a
7446 					 * cwnd update.
7447 					 * find_(rtx_)pseudo_cumack will
7448 					 * trigger search for the next
7449 					 * expected (rtx-)pseudo-cumack.
7450 					 */
7451 					tp1->whoTo->new_pseudo_cumack = 1;
7452 					tp1->whoTo->find_pseudo_cumack = 1;
7453 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
7454 
7455 
7456 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7457 						sctp_log_sack(asoc->last_acked_seq,
7458 						    cum_ack,
7459 						    tp1->rec.data.TSN_seq,
7460 						    0,
7461 						    0,
7462 						    SCTP_LOG_TSN_ACKED);
7463 					}
7464 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7465 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
7466 					}
7467 				}
7468 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
7469 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
7470 #ifdef SCTP_AUDITING_ENABLED
7471 					sctp_audit_log(0xB3,
7472 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
7473 #endif
7474 				}
7475 				if (tp1->rec.data.chunk_was_revoked) {
7476 					/* deflate the cwnd */
7477 					tp1->whoTo->cwnd -= tp1->book_size;
7478 					tp1->rec.data.chunk_was_revoked = 0;
7479 				}
7480 				tp1->sent = SCTP_DATAGRAM_ACKED;
7481 			}
7482 		} else {
7483 			break;
7484 		}
7485 		tp1 = TAILQ_NEXT(tp1, sctp_next);
7486 	}
7487 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
7488 	/* always set this up to cum-ack */
7489 	asoc->this_sack_highest_gap = last_tsn;
7490 
7491 	/* Move offset up to point to gaps/dups */
7492 	offset += sizeof(struct sctp_nr_sack_chunk);
7493 	if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_nr_sack_chunk)) > nr_sack_length) {
7494 
7495 		/* skip corrupt segments */
7496 		goto skip_segments;
7497 	}
7498 	if (num_seg > 0) {
7499 
7500 		/*
7501 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
7502 		 * to be greater than the cumack. Also reset saw_newack to 0
7503 		 * for all dests.
7504 		 */
7505 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7506 			net->saw_newack = 0;
7507 			net->this_sack_highest_newack = last_tsn;
7508 		}
7509 
7510 		/*
7511 		 * thisSackHighestGap will increase while handling NEW
7512 		 * segments this_sack_highest_newack will increase while
7513 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
7514 		 * used for CMT DAC algo. saw_newack will also change.
7515 		 */
7516 
7517 		sctp_handle_nr_sack_segments(m, &offset, stcb, asoc, ch, last_tsn,
7518 		    &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
7519 		    num_seg, num_nr_seg, &ecn_seg_sums);
7520 
7521 
7522 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
7523 			/*
7524 			 * validate the biggest_tsn_acked in the gap acks if
7525 			 * strict adherence is wanted.
7526 			 */
7527 			if ((biggest_tsn_acked == send_s) ||
7528 			    (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
7529 				/*
7530 				 * peer is either confused or we are under
7531 				 * attack. We must abort.
7532 				 */
7533 				goto hopeless_peer;
7534 			}
7535 		}
7536 	}
7537 skip_segments:
7538 	/*******************************************/
7539 	/* cancel ALL T3-send timer if accum moved */
7540 	/*******************************************/
7541 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
7542 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7543 			if (net->new_pseudo_cumack)
7544 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7545 				    stcb, net,
7546 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
7547 
7548 		}
7549 	} else {
7550 		if (accum_moved) {
7551 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7552 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7553 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
7554 			}
7555 		}
7556 	}
7557 	/********************************************/
7558 	/* drop the acked chunks from the sendqueue */
7559 	/********************************************/
7560 	asoc->last_acked_seq = cum_ack;
7561 
7562 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
7563 	if (tp1 == NULL)
7564 		goto done_with_it;
7565 	do {
7566 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
7567 		    MAX_TSN)) {
7568 			break;
7569 		}
7570 		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
7571 			/* no more sent on list */
7572 			printf("Warning, tp1->sent == %d and its now acked?\n",
7573 			    tp1->sent);
7574 		}
7575 		tp2 = TAILQ_NEXT(tp1, sctp_next);
7576 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
7577 		if (tp1->pr_sctp_on) {
7578 			if (asoc->pr_sctp_cnt != 0)
7579 				asoc->pr_sctp_cnt--;
7580 		}
7581 		if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
7582 		    (asoc->total_flight > 0)) {
7583 #ifdef INVARIANTS
7584 			panic("Warning flight size is postive and should be 0");
7585 #else
7586 			SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
7587 			    asoc->total_flight);
7588 #endif
7589 			asoc->total_flight = 0;
7590 		}
7591 		if (tp1->data) {
7592 			/* sa_ignore NO_NULL_CHK */
7593 			sctp_free_bufspace(stcb, asoc, tp1, 1);
7594 			sctp_m_freem(tp1->data);
7595 			if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
7596 				asoc->sent_queue_cnt_removeable--;
7597 			}
7598 		}
7599 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7600 			sctp_log_sack(asoc->last_acked_seq,
7601 			    cum_ack,
7602 			    tp1->rec.data.TSN_seq,
7603 			    0,
7604 			    0,
7605 			    SCTP_LOG_FREE_SENT);
7606 		}
7607 		tp1->data = NULL;
7608 		asoc->sent_queue_cnt--;
7609 		sctp_free_a_chunk(stcb, tp1);
7610 		wake_him++;
7611 		tp1 = tp2;
7612 	} while (tp1 != NULL);
7613 
7614 done_with_it:
7615 	/* sa_ignore NO_NULL_CHK */
7616 	if ((wake_him) && (stcb->sctp_socket)) {
7617 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7618 		struct socket *so;
7619 
7620 #endif
7621 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
7622 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
7623 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
7624 		}
7625 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7626 		so = SCTP_INP_SO(stcb->sctp_ep);
7627 		atomic_add_int(&stcb->asoc.refcnt, 1);
7628 		SCTP_TCB_UNLOCK(stcb);
7629 		SCTP_SOCKET_LOCK(so, 1);
7630 		SCTP_TCB_LOCK(stcb);
7631 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
7632 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7633 			/* assoc was freed while we were unlocked */
7634 			SCTP_SOCKET_UNLOCK(so, 1);
7635 			return;
7636 		}
7637 #endif
7638 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
7639 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7640 		SCTP_SOCKET_UNLOCK(so, 1);
7641 #endif
7642 	} else {
7643 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
7644 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
7645 		}
7646 	}
7647 
7648 	if (asoc->fast_retran_loss_recovery && accum_moved) {
7649 		if (compare_with_wrap(asoc->last_acked_seq,
7650 		    asoc->fast_recovery_tsn, MAX_TSN) ||
7651 		    asoc->last_acked_seq == asoc->fast_recovery_tsn) {
7652 			/* Setup so we will exit RFC2582 fast recovery */
7653 			will_exit_fast_recovery = 1;
7654 		}
7655 	}
7656 	/*
7657 	 * Check for revoked fragments:
7658 	 *
7659 	 * if Previous sack - Had no frags then we can't have any revoked if
7660 	 * Previous sack - Had frag's then - If we now have frags aka
7661 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
7662 	 * some of them. else - The peer revoked all ACKED fragments, since
7663 	 * we had some before and now we have NONE.
7664 	 */
7665 
7666 	if (num_seg)
7667 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
7668 
7669 	else if (asoc->saw_sack_with_frags) {
7670 		int cnt_revoked = 0;
7671 
7672 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
7673 		if (tp1 != NULL) {
7674 			/* Peer revoked all dg's marked or acked */
7675 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
7676 				/*
7677 				 * EY- maybe check only if it is nr_acked
7678 				 * nr_marked may not be possible
7679 				 */
7680 				if ((tp1->sent == SCTP_DATAGRAM_NR_ACKED) ||
7681 				    (tp1->sent == SCTP_DATAGRAM_NR_MARKED)) {
7682 					/*
7683 					 * EY! - TODO: Something previously
7684 					 * nr_gapped is reneged, abort the
7685 					 * association
7686 					 */
7687 					return;
7688 				}
7689 				if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
7690 				    (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
7691 					tp1->sent = SCTP_DATAGRAM_SENT;
7692 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7693 						sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
7694 						    tp1->whoTo->flight_size,
7695 						    tp1->book_size,
7696 						    (uintptr_t) tp1->whoTo,
7697 						    tp1->rec.data.TSN_seq);
7698 					}
7699 					sctp_flight_size_increase(tp1);
7700 					sctp_total_flight_increase(stcb, tp1);
7701 					tp1->rec.data.chunk_was_revoked = 1;
7702 					/*
7703 					 * To ensure that this increase in
7704 					 * flightsize, which is artificial,
7705 					 * does not throttle the sender, we
7706 					 * also increase the cwnd
7707 					 * artificially.
7708 					 */
7709 					tp1->whoTo->cwnd += tp1->book_size;
7710 					cnt_revoked++;
7711 				}
7712 			}
7713 			if (cnt_revoked) {
7714 				reneged_all = 1;
7715 			}
7716 		}
7717 		asoc->saw_sack_with_frags = 0;
7718 	}
7719 	if (num_seg)
7720 		asoc->saw_sack_with_frags = 1;
7721 	else
7722 		asoc->saw_sack_with_frags = 0;
7723 
7724 	/* EY! - not sure about if there should be an IF */
7725 	if (num_nr_seg)
7726 		sctp_check_for_nr_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
7727 	else if (asoc->saw_sack_with_nr_frags) {
7728 		/*
7729 		 * EY!- TODO: all previously nr_gapped chunks have been
7730 		 * reneged abort the association
7731 		 */
7732 		asoc->saw_sack_with_nr_frags = 0;
7733 	}
7734 	if (num_nr_seg)
7735 		asoc->saw_sack_with_nr_frags = 1;
7736 	else
7737 		asoc->saw_sack_with_nr_frags = 0;
7738 	/* JRS - Use the congestion control given in the CC module */
7739 	asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
7740 
7741 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
7742 		/* nothing left in-flight */
7743 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7744 			/* stop all timers */
7745 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
7746 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
7747 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
7748 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
7749 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
7750 				}
7751 			}
7752 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7753 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
7754 			net->flight_size = 0;
7755 			net->partial_bytes_acked = 0;
7756 		}
7757 		asoc->total_flight = 0;
7758 		asoc->total_flight_count = 0;
7759 	}
7760 	/**********************************/
7761 	/* Now what about shutdown issues */
7762 	/**********************************/
7763 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
7764 		/* nothing left on sendqueue.. consider done */
7765 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7766 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
7767 			    asoc->peers_rwnd, 0, 0, a_rwnd);
7768 		}
7769 		asoc->peers_rwnd = a_rwnd;
7770 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7771 			/* SWS sender side engages */
7772 			asoc->peers_rwnd = 0;
7773 		}
7774 		/* clean up */
7775 		if ((asoc->stream_queue_cnt == 1) &&
7776 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7777 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
7778 		    (asoc->locked_on_sending)
7779 		    ) {
7780 			struct sctp_stream_queue_pending *sp;
7781 
7782 			/*
7783 			 * I may be in a state where we got all across.. but
7784 			 * cannot write more due to a shutdown... we abort
7785 			 * since the user did not indicate EOR in this case.
7786 			 */
7787 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
7788 			    sctp_streamhead);
7789 			if ((sp) && (sp->length == 0)) {
7790 				asoc->locked_on_sending = NULL;
7791 				if (sp->msg_is_complete) {
7792 					asoc->stream_queue_cnt--;
7793 				} else {
7794 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
7795 					asoc->stream_queue_cnt--;
7796 				}
7797 			}
7798 		}
7799 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
7800 		    (asoc->stream_queue_cnt == 0)) {
7801 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
7802 				/* Need to abort here */
7803 				struct mbuf *oper;
7804 
7805 		abort_out_now:
7806 				*abort_now = 1;
7807 				/* XXX */
7808 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
7809 				    0, M_DONTWAIT, 1, MT_DATA);
7810 				if (oper) {
7811 					struct sctp_paramhdr *ph;
7812 					uint32_t *ippp;
7813 
7814 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
7815 					    sizeof(uint32_t);
7816 					ph = mtod(oper, struct sctp_paramhdr *);
7817 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
7818 					ph->param_length = htons(SCTP_BUF_LEN(oper));
7819 					ippp = (uint32_t *) (ph + 1);
7820 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
7821 				}
7822 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
7823 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
7824 				return;
7825 			} else {
7826 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
7827 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
7828 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
7829 				}
7830 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
7831 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
7832 				sctp_stop_timers_for_shutdown(stcb);
7833 				sctp_send_shutdown(stcb,
7834 				    stcb->asoc.primary_destination);
7835 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
7836 				    stcb->sctp_ep, stcb, asoc->primary_destination);
7837 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
7838 				    stcb->sctp_ep, stcb, asoc->primary_destination);
7839 			}
7840 			return;
7841 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
7842 		    (asoc->stream_queue_cnt == 0)) {
7843 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
7844 				goto abort_out_now;
7845 			}
7846 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
7847 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
7848 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
7849 			sctp_send_shutdown_ack(stcb,
7850 			    stcb->asoc.primary_destination);
7851 
7852 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
7853 			    stcb->sctp_ep, stcb, asoc->primary_destination);
7854 			return;
7855 		}
7856 	}
7857 	/*
7858 	 * Now here we are going to recycle net_ack for a different use...
7859 	 * HEADS UP.
7860 	 */
7861 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7862 		net->net_ack = 0;
7863 	}
7864 
7865 	/*
7866 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
7867 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
7868 	 * automatically ensure that.
7869 	 */
7870 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
7871 		this_sack_lowest_newack = cum_ack;
7872 	}
7873 	if (num_seg > 0) {
7874 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
7875 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
7876 	}
7877 	/* JRS - Use the congestion control given in the CC module */
7878 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
7879 
7880 	/******************************************************************
7881 	 *  Here we do the stuff with ECN Nonce checking.
7882 	 *  We basically check to see if the nonce sum flag was incorrect
7883 	 *  or if resynchronization needs to be done. Also if we catch a
7884 	 *  misbehaving receiver we give him the kick.
7885 	 ******************************************************************/
7886 
7887 	if (asoc->ecn_nonce_allowed) {
7888 		if (asoc->nonce_sum_check) {
7889 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
7890 				if (asoc->nonce_wait_for_ecne == 0) {
7891 					struct sctp_tmit_chunk *lchk;
7892 
7893 					lchk = TAILQ_FIRST(&asoc->send_queue);
7894 					asoc->nonce_wait_for_ecne = 1;
7895 					if (lchk) {
7896 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
7897 					} else {
7898 						asoc->nonce_wait_tsn = asoc->sending_seq;
7899 					}
7900 				} else {
7901 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
7902 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
7903 						/*
7904 						 * Misbehaving peer. We need
7905 						 * to react to this guy
7906 						 */
7907 						asoc->ecn_allowed = 0;
7908 						asoc->ecn_nonce_allowed = 0;
7909 					}
7910 				}
7911 			}
7912 		} else {
7913 			/* See if Resynchronization Possible */
7914 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
7915 				asoc->nonce_sum_check = 1;
7916 				/*
7917 				 * now we must calculate what the base is.
7918 				 * We do this based on two things, we know
7919 				 * the total's for all the segments
7920 				 * gap-acked in the SACK, its stored in
7921 				 * ecn_seg_sums. We also know the SACK's
7922 				 * nonce sum, its in nonce_sum_flag. So we
7923 				 * can build a truth table to back-calculate
7924 				 * the new value of
7925 				 * asoc->nonce_sum_expect_base:
7926 				 *
7927 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
7928 				 * 1                    0 1 0 1 1 1 1 0
7929 				 */
7930 				asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
7931 			}
7932 		}
7933 	}
7934 	/* Now are we exiting loss recovery ? */
7935 	if (will_exit_fast_recovery) {
7936 		/* Ok, we must exit fast recovery */
7937 		asoc->fast_retran_loss_recovery = 0;
7938 	}
7939 	if ((asoc->sat_t3_loss_recovery) &&
7940 	    ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
7941 	    MAX_TSN) ||
7942 	    (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
7943 		/* end satellite t3 loss recovery */
7944 		asoc->sat_t3_loss_recovery = 0;
7945 	}
7946 	/*
7947 	 * CMT Fast recovery
7948 	 */
7949 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7950 		if (net->will_exit_fast_recovery) {
7951 			/* Ok, we must exit fast recovery */
7952 			net->fast_retran_loss_recovery = 0;
7953 		}
7954 	}
7955 
7956 	/* Adjust and set the new rwnd value */
7957 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7958 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
7959 		    asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
7960 	}
7961 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
7962 	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
7963 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7964 		/* SWS sender side engages */
7965 		asoc->peers_rwnd = 0;
7966 	}
7967 	if (asoc->peers_rwnd > old_rwnd) {
7968 		win_probe_recovery = 1;
7969 	}
7970 	/*
7971 	 * Now we must setup so we have a timer up for anyone with
7972 	 * outstanding data.
7973 	 */
7974 	done_once = 0;
7975 again:
7976 	j = 0;
7977 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7978 		if (win_probe_recovery && (net->window_probe)) {
7979 			win_probe_recovered = 1;
7980 			/*-
7981 			 * Find first chunk that was used with
7982 			 * window probe and clear the event. Put
7983 			 * it back into the send queue as if has
7984 			 * not been sent.
7985 			 */
7986 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
7987 				if (tp1->window_probe) {
7988 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
7989 					break;
7990 				}
7991 			}
7992 		}
7993 		if (net->flight_size) {
7994 			j++;
7995 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
7996 			    stcb->sctp_ep, stcb, net);
7997 			if (net->window_probe) {
7998 				net->window_probe = 0;
7999 			}
8000 		} else {
8001 			if (net->window_probe) {
8002 				net->window_probe = 0;
8003 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
8004 				    stcb->sctp_ep, stcb, net);
8005 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8006 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
8007 				    stcb, net,
8008 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
8009 			}
8010 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
8011 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
8012 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
8013 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
8014 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
8015 				}
8016 			}
8017 		}
8018 	}
8019 	if ((j == 0) &&
8020 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
8021 	    (asoc->sent_queue_retran_cnt == 0) &&
8022 	    (win_probe_recovered == 0) &&
8023 	    (done_once == 0)) {
8024 		/*
8025 		 * huh, this should not happen unless all packets are
8026 		 * PR-SCTP and marked to skip of course.
8027 		 */
8028 		if (sctp_fs_audit(asoc)) {
8029 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8030 				net->flight_size = 0;
8031 			}
8032 			asoc->total_flight = 0;
8033 			asoc->total_flight_count = 0;
8034 			asoc->sent_queue_retran_cnt = 0;
8035 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
8036 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
8037 					sctp_flight_size_increase(tp1);
8038 					sctp_total_flight_increase(stcb, tp1);
8039 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
8040 					asoc->sent_queue_retran_cnt++;
8041 				}
8042 			}
8043 		}
8044 		done_once = 1;
8045 		goto again;
8046 	}
8047 	/*********************************************/
8048 	/* Here we perform PR-SCTP procedures        */
8049 	/* (section 4.2)                             */
8050 	/*********************************************/
8051 	/* C1. update advancedPeerAckPoint */
8052 	if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
8053 		asoc->advanced_peer_ack_point = cum_ack;
8054 	}
8055 	/* C2. try to further move advancedPeerAckPoint ahead */
8056 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
8057 		struct sctp_tmit_chunk *lchk;
8058 		uint32_t old_adv_peer_ack_point;
8059 
8060 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
8061 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
8062 		/* C3. See if we need to send a Fwd-TSN */
8063 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
8064 		    MAX_TSN)) {
8065 			/*
8066 			 * ISSUE with ECN, see FWD-TSN processing for notes
8067 			 * on issues that will occur when the ECN NONCE
8068 			 * stuff is put into SCTP for cross checking.
8069 			 */
8070 			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
8071 			    MAX_TSN)) {
8072 				send_forward_tsn(stcb, asoc);
8073 				/*
8074 				 * ECN Nonce: Disable Nonce Sum check when
8075 				 * FWD TSN is sent and store resync tsn
8076 				 */
8077 				asoc->nonce_sum_check = 0;
8078 				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
8079 			} else if (lchk) {
8080 				/* try to FR fwd-tsn's that get lost too */
8081 				lchk->rec.data.fwd_tsn_cnt++;
8082 				if (lchk->rec.data.fwd_tsn_cnt > 3) {
8083 					send_forward_tsn(stcb, asoc);
8084 					lchk->rec.data.fwd_tsn_cnt = 0;
8085 				}
8086 			}
8087 		}
8088 		if (lchk) {
8089 			/* Assure a timer is up */
8090 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
8091 			    stcb->sctp_ep, stcb, lchk->whoTo);
8092 		}
8093 	}
8094 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
8095 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
8096 		    a_rwnd,
8097 		    stcb->asoc.peers_rwnd,
8098 		    stcb->asoc.total_flight,
8099 		    stcb->asoc.total_output_queue_size);
8100 	}
8101 }
8102