xref: /freebsd/sys/netinet/sctp_indata.c (revision d8b878873e7aa8df1972cc6a642804b17eb61087)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 
48 
49 /*
50  * NOTES: On the outbound side of things I need to check the sack timer to
51  * see if I should generate a sack into the chunk queue (if I have data to
52  * send that is and will be sending it .. for bundling.
53  *
54  * The callback in sctp_usrreq.c will get called when the socket is read from.
55  * This will cause sctp_service_queues() to get called on the top entry in
56  * the list.
57  */
58 
59 void
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61 {
62 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63 }
64 
65 /* Calculate what the rwnd would be */
66 uint32_t
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68 {
69 	uint32_t calc = 0;
70 
71 	/*
72 	 * This is really set wrong with respect to a 1-2-m socket. Since
73 	 * the sb_cc is the count that everyone as put up. When we re-write
74 	 * sctp_soreceive then we will fix this so that ONLY this
75 	 * associations data is taken into account.
76 	 */
77 	if (stcb->sctp_socket == NULL)
78 		return (calc);
79 
80 	if (stcb->asoc.sb_cc == 0 &&
81 	    asoc->size_on_reasm_queue == 0 &&
82 	    asoc->size_on_all_streams == 0) {
83 		/* Full rwnd granted */
84 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85 		return (calc);
86 	}
87 	/* get actual space */
88 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89 
90 	/*
91 	 * take out what has NOT been put on socket queue and we yet hold
92 	 * for putting up.
93 	 */
94 	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
95 	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
96 
97 	if (calc == 0) {
98 		/* out of space */
99 		return (calc);
100 	}
101 	/* what is the overhead of all these rwnd's */
102 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
103 	/*
104 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
105 	 * even it is 0. SWS engaged
106 	 */
107 	if (calc < stcb->asoc.my_rwnd_control_len) {
108 		calc = 1;
109 	}
110 	return (calc);
111 }
112 
113 
114 
115 /*
116  * Build out our readq entry based on the incoming packet.
117  */
118 struct sctp_queued_to_read *
119 sctp_build_readq_entry(struct sctp_tcb *stcb,
120     struct sctp_nets *net,
121     uint32_t tsn, uint32_t ppid,
122     uint32_t context, uint16_t stream_no,
123     uint16_t stream_seq, uint8_t flags,
124     struct mbuf *dm)
125 {
126 	struct sctp_queued_to_read *read_queue_e = NULL;
127 
128 	sctp_alloc_a_readq(stcb, read_queue_e);
129 	if (read_queue_e == NULL) {
130 		goto failed_build;
131 	}
132 	read_queue_e->sinfo_stream = stream_no;
133 	read_queue_e->sinfo_ssn = stream_seq;
134 	read_queue_e->sinfo_flags = (flags << 8);
135 	read_queue_e->sinfo_ppid = ppid;
136 	read_queue_e->sinfo_context = stcb->asoc.context;
137 	read_queue_e->sinfo_timetolive = 0;
138 	read_queue_e->sinfo_tsn = tsn;
139 	read_queue_e->sinfo_cumtsn = tsn;
140 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
141 	read_queue_e->whoFrom = net;
142 	read_queue_e->length = 0;
143 	atomic_add_int(&net->ref_count, 1);
144 	read_queue_e->data = dm;
145 	read_queue_e->spec_flags = 0;
146 	read_queue_e->tail_mbuf = NULL;
147 	read_queue_e->aux_data = NULL;
148 	read_queue_e->stcb = stcb;
149 	read_queue_e->port_from = stcb->rport;
150 	read_queue_e->do_not_ref_stcb = 0;
151 	read_queue_e->end_added = 0;
152 	read_queue_e->some_taken = 0;
153 	read_queue_e->pdapi_aborted = 0;
154 failed_build:
155 	return (read_queue_e);
156 }
157 
158 
159 /*
160  * Build out our readq entry based on the incoming packet.
161  */
162 static struct sctp_queued_to_read *
163 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
164     struct sctp_tmit_chunk *chk)
165 {
166 	struct sctp_queued_to_read *read_queue_e = NULL;
167 
168 	sctp_alloc_a_readq(stcb, read_queue_e);
169 	if (read_queue_e == NULL) {
170 		goto failed_build;
171 	}
172 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
173 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
174 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
175 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
176 	read_queue_e->sinfo_context = stcb->asoc.context;
177 	read_queue_e->sinfo_timetolive = 0;
178 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
179 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
180 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
181 	read_queue_e->whoFrom = chk->whoTo;
182 	read_queue_e->aux_data = NULL;
183 	read_queue_e->length = 0;
184 	atomic_add_int(&chk->whoTo->ref_count, 1);
185 	read_queue_e->data = chk->data;
186 	read_queue_e->tail_mbuf = NULL;
187 	read_queue_e->stcb = stcb;
188 	read_queue_e->port_from = stcb->rport;
189 	read_queue_e->spec_flags = 0;
190 	read_queue_e->do_not_ref_stcb = 0;
191 	read_queue_e->end_added = 0;
192 	read_queue_e->some_taken = 0;
193 	read_queue_e->pdapi_aborted = 0;
194 failed_build:
195 	return (read_queue_e);
196 }
197 
198 
199 struct mbuf *
200 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
201     struct sctp_sndrcvinfo *sinfo)
202 {
203 	struct sctp_sndrcvinfo *outinfo;
204 	struct cmsghdr *cmh;
205 	struct mbuf *ret;
206 	int len;
207 	int use_extended = 0;
208 
209 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
210 		/* user does not want the sndrcv ctl */
211 		return (NULL);
212 	}
213 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
214 		use_extended = 1;
215 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
216 	} else {
217 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
218 	}
219 
220 
221 	ret = sctp_get_mbuf_for_msg(len,
222 	    0, M_DONTWAIT, 1, MT_DATA);
223 
224 	if (ret == NULL) {
225 		/* No space */
226 		return (ret);
227 	}
228 	/* We need a CMSG header followed by the struct  */
229 	cmh = mtod(ret, struct cmsghdr *);
230 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
231 	cmh->cmsg_level = IPPROTO_SCTP;
232 	if (use_extended) {
233 		cmh->cmsg_type = SCTP_EXTRCV;
234 		cmh->cmsg_len = len;
235 		memcpy(outinfo, sinfo, len);
236 	} else {
237 		cmh->cmsg_type = SCTP_SNDRCV;
238 		cmh->cmsg_len = len;
239 		*outinfo = *sinfo;
240 	}
241 	SCTP_BUF_LEN(ret) = cmh->cmsg_len;
242 	return (ret);
243 }
244 
245 
246 char *
247 sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
248     int *control_len,
249     struct sctp_sndrcvinfo *sinfo)
250 {
251 	struct sctp_sndrcvinfo *outinfo;
252 	struct cmsghdr *cmh;
253 	char *buf;
254 	int len;
255 	int use_extended = 0;
256 
257 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
258 		/* user does not want the sndrcv ctl */
259 		return (NULL);
260 	}
261 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
262 		use_extended = 1;
263 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
264 	} else {
265 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
266 	}
267 	SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
268 	if (buf == NULL) {
269 		/* No space */
270 		return (buf);
271 	}
272 	/* We need a CMSG header followed by the struct  */
273 	cmh = (struct cmsghdr *)buf;
274 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
275 	cmh->cmsg_level = IPPROTO_SCTP;
276 	if (use_extended) {
277 		cmh->cmsg_type = SCTP_EXTRCV;
278 		cmh->cmsg_len = len;
279 		memcpy(outinfo, sinfo, len);
280 	} else {
281 		cmh->cmsg_type = SCTP_SNDRCV;
282 		cmh->cmsg_len = len;
283 		*outinfo = *sinfo;
284 	}
285 	*control_len = len;
286 	return (buf);
287 }
288 
289 static void
290 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
291 {
292 	uint32_t gap, i;
293 	int fnd = 0;
294 
295 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
296 		return;
297 	}
298 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
299 	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
300 		printf("gap:%x tsn:%x\n", gap, tsn);
301 		sctp_print_mapping_array(asoc);
302 #ifdef INVARIANTS
303 		panic("Things are really messed up now!!");
304 #endif
305 	}
306 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
307 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
308 	if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
309 		asoc->highest_tsn_inside_nr_map = tsn;
310 	}
311 	if (tsn == asoc->highest_tsn_inside_map) {
312 		/* We must back down to see what the new highest is */
313 		for (i = tsn - 1; (compare_with_wrap(i, asoc->mapping_array_base_tsn, MAX_TSN) ||
314 		    (i == asoc->mapping_array_base_tsn)); i--) {
315 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
316 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
317 				asoc->highest_tsn_inside_map = i;
318 				fnd = 1;
319 				break;
320 			}
321 		}
322 		if (!fnd) {
323 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
324 		}
325 	}
326 }
327 
328 
329 /*
330  * We are delivering currently from the reassembly queue. We must continue to
331  * deliver until we either: 1) run out of space. 2) run out of sequential
332  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
333  */
334 static void
335 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
336 {
337 	struct sctp_tmit_chunk *chk;
338 	uint16_t nxt_todel;
339 	uint16_t stream_no;
340 	int end = 0;
341 	int cntDel;
342 
343 	struct sctp_queued_to_read *control, *ctl, *ctlat;
344 
345 	if (stcb == NULL)
346 		return;
347 
348 	cntDel = stream_no = 0;
349 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
350 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
351 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
352 		/* socket above is long gone or going.. */
353 abandon:
354 		asoc->fragmented_delivery_inprogress = 0;
355 		chk = TAILQ_FIRST(&asoc->reasmqueue);
356 		while (chk) {
357 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
358 			asoc->size_on_reasm_queue -= chk->send_size;
359 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
360 			/*
361 			 * Lose the data pointer, since its in the socket
362 			 * buffer
363 			 */
364 			if (chk->data) {
365 				sctp_m_freem(chk->data);
366 				chk->data = NULL;
367 			}
368 			/* Now free the address and data */
369 			sctp_free_a_chunk(stcb, chk);
370 			/* sa_ignore FREED_MEMORY */
371 			chk = TAILQ_FIRST(&asoc->reasmqueue);
372 		}
373 		return;
374 	}
375 	SCTP_TCB_LOCK_ASSERT(stcb);
376 	do {
377 		chk = TAILQ_FIRST(&asoc->reasmqueue);
378 		if (chk == NULL) {
379 			return;
380 		}
381 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
382 			/* Can't deliver more :< */
383 			return;
384 		}
385 		stream_no = chk->rec.data.stream_number;
386 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
387 		if (nxt_todel != chk->rec.data.stream_seq &&
388 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
389 			/*
390 			 * Not the next sequence to deliver in its stream OR
391 			 * unordered
392 			 */
393 			return;
394 		}
395 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
396 
397 			control = sctp_build_readq_entry_chk(stcb, chk);
398 			if (control == NULL) {
399 				/* out of memory? */
400 				return;
401 			}
402 			/* save it off for our future deliveries */
403 			stcb->asoc.control_pdapi = control;
404 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
405 				end = 1;
406 			else
407 				end = 0;
408 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
409 			sctp_add_to_readq(stcb->sctp_ep,
410 			    stcb, control, &stcb->sctp_socket->so_rcv, end,
411 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
412 			cntDel++;
413 		} else {
414 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
415 				end = 1;
416 			else
417 				end = 0;
418 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
419 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
420 			    stcb->asoc.control_pdapi,
421 			    chk->data, end, chk->rec.data.TSN_seq,
422 			    &stcb->sctp_socket->so_rcv)) {
423 				/*
424 				 * something is very wrong, either
425 				 * control_pdapi is NULL, or the tail_mbuf
426 				 * is corrupt, or there is a EOM already on
427 				 * the mbuf chain.
428 				 */
429 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
430 					goto abandon;
431 				} else {
432 #ifdef INVARIANTS
433 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
434 						panic("This should not happen control_pdapi NULL?");
435 					}
436 					/* if we did not panic, it was a EOM */
437 					panic("Bad chunking ??");
438 #else
439 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
440 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
441 					}
442 					SCTP_PRINTF("Bad chunking ??\n");
443 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
444 
445 #endif
446 					goto abandon;
447 				}
448 			}
449 			cntDel++;
450 		}
451 		/* pull it we did it */
452 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
453 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
454 			asoc->fragmented_delivery_inprogress = 0;
455 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
456 				asoc->strmin[stream_no].last_sequence_delivered++;
457 			}
458 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
459 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
460 			}
461 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
462 			/*
463 			 * turn the flag back on since we just  delivered
464 			 * yet another one.
465 			 */
466 			asoc->fragmented_delivery_inprogress = 1;
467 		}
468 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
469 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
470 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
471 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
472 
473 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
474 		asoc->size_on_reasm_queue -= chk->send_size;
475 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
476 		/* free up the chk */
477 		chk->data = NULL;
478 		sctp_free_a_chunk(stcb, chk);
479 
480 		if (asoc->fragmented_delivery_inprogress == 0) {
481 			/*
482 			 * Now lets see if we can deliver the next one on
483 			 * the stream
484 			 */
485 			struct sctp_stream_in *strm;
486 
487 			strm = &asoc->strmin[stream_no];
488 			nxt_todel = strm->last_sequence_delivered + 1;
489 			ctl = TAILQ_FIRST(&strm->inqueue);
490 			if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
491 				while (ctl != NULL) {
492 					/* Deliver more if we can. */
493 					if (nxt_todel == ctl->sinfo_ssn) {
494 						ctlat = TAILQ_NEXT(ctl, next);
495 						TAILQ_REMOVE(&strm->inqueue, ctl, next);
496 						asoc->size_on_all_streams -= ctl->length;
497 						sctp_ucount_decr(asoc->cnt_on_all_streams);
498 						strm->last_sequence_delivered++;
499 						sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
500 						sctp_add_to_readq(stcb->sctp_ep, stcb,
501 						    ctl,
502 						    &stcb->sctp_socket->so_rcv, 1,
503 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
504 						ctl = ctlat;
505 					} else {
506 						break;
507 					}
508 					nxt_todel = strm->last_sequence_delivered + 1;
509 				}
510 			}
511 			break;
512 		}
513 		/* sa_ignore FREED_MEMORY */
514 		chk = TAILQ_FIRST(&asoc->reasmqueue);
515 	} while (chk);
516 }
517 
518 /*
519  * Queue the chunk either right into the socket buffer if it is the next one
520  * to go OR put it in the correct place in the delivery queue.  If we do
521  * append to the so_buf, keep doing so until we are out of order. One big
522  * question still remains, what to do when the socket buffer is FULL??
523  */
524 static void
525 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
526     struct sctp_queued_to_read *control, int *abort_flag)
527 {
528 	/*
529 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
530 	 * all the data in one stream this could happen quite rapidly. One
531 	 * could use the TSN to keep track of things, but this scheme breaks
532 	 * down in the other type of stream useage that could occur. Send a
533 	 * single msg to stream 0, send 4Billion messages to stream 1, now
534 	 * send a message to stream 0. You have a situation where the TSN
535 	 * has wrapped but not in the stream. Is this worth worrying about
536 	 * or should we just change our queue sort at the bottom to be by
537 	 * TSN.
538 	 *
539 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
540 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
541 	 * assignment this could happen... and I don't see how this would be
542 	 * a violation. So for now I am undecided an will leave the sort by
543 	 * SSN alone. Maybe a hybred approach is the answer
544 	 *
545 	 */
546 	struct sctp_stream_in *strm;
547 	struct sctp_queued_to_read *at;
548 	int queue_needed;
549 	uint16_t nxt_todel;
550 	struct mbuf *oper;
551 
552 	queue_needed = 1;
553 	asoc->size_on_all_streams += control->length;
554 	sctp_ucount_incr(asoc->cnt_on_all_streams);
555 	strm = &asoc->strmin[control->sinfo_stream];
556 	nxt_todel = strm->last_sequence_delivered + 1;
557 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
558 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
559 	}
560 	SCTPDBG(SCTP_DEBUG_INDATA1,
561 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
562 	    (uint32_t) control->sinfo_stream,
563 	    (uint32_t) strm->last_sequence_delivered,
564 	    (uint32_t) nxt_todel);
565 	if (compare_with_wrap(strm->last_sequence_delivered,
566 	    control->sinfo_ssn, MAX_SEQ) ||
567 	    (strm->last_sequence_delivered == control->sinfo_ssn)) {
568 		/* The incoming sseq is behind where we last delivered? */
569 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
570 		    control->sinfo_ssn, strm->last_sequence_delivered);
571 protocol_error:
572 		/*
573 		 * throw it in the stream so it gets cleaned up in
574 		 * association destruction
575 		 */
576 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
577 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
578 		    0, M_DONTWAIT, 1, MT_DATA);
579 		if (oper) {
580 			struct sctp_paramhdr *ph;
581 			uint32_t *ippp;
582 
583 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
584 			    (sizeof(uint32_t) * 3);
585 			ph = mtod(oper, struct sctp_paramhdr *);
586 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
587 			ph->param_length = htons(SCTP_BUF_LEN(oper));
588 			ippp = (uint32_t *) (ph + 1);
589 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
590 			ippp++;
591 			*ippp = control->sinfo_tsn;
592 			ippp++;
593 			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
594 		}
595 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
596 		sctp_abort_an_association(stcb->sctp_ep, stcb,
597 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
598 
599 		*abort_flag = 1;
600 		return;
601 
602 	}
603 	if (nxt_todel == control->sinfo_ssn) {
604 		/* can be delivered right away? */
605 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
606 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
607 		}
608 		/* EY it wont be queued if it could be delivered directly */
609 		queue_needed = 0;
610 		asoc->size_on_all_streams -= control->length;
611 		sctp_ucount_decr(asoc->cnt_on_all_streams);
612 		strm->last_sequence_delivered++;
613 
614 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
615 		sctp_add_to_readq(stcb->sctp_ep, stcb,
616 		    control,
617 		    &stcb->sctp_socket->so_rcv, 1,
618 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
619 		control = TAILQ_FIRST(&strm->inqueue);
620 		while (control != NULL) {
621 			/* all delivered */
622 			nxt_todel = strm->last_sequence_delivered + 1;
623 			if (nxt_todel == control->sinfo_ssn) {
624 				at = TAILQ_NEXT(control, next);
625 				TAILQ_REMOVE(&strm->inqueue, control, next);
626 				asoc->size_on_all_streams -= control->length;
627 				sctp_ucount_decr(asoc->cnt_on_all_streams);
628 				strm->last_sequence_delivered++;
629 				/*
630 				 * We ignore the return of deliver_data here
631 				 * since we always can hold the chunk on the
632 				 * d-queue. And we have a finite number that
633 				 * can be delivered from the strq.
634 				 */
635 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
636 					sctp_log_strm_del(control, NULL,
637 					    SCTP_STR_LOG_FROM_IMMED_DEL);
638 				}
639 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
640 				sctp_add_to_readq(stcb->sctp_ep, stcb,
641 				    control,
642 				    &stcb->sctp_socket->so_rcv, 1,
643 				    SCTP_READ_LOCK_NOT_HELD,
644 				    SCTP_SO_NOT_LOCKED);
645 				control = at;
646 				continue;
647 			}
648 			break;
649 		}
650 	}
651 	if (queue_needed) {
652 		/*
653 		 * Ok, we did not deliver this guy, find the correct place
654 		 * to put it on the queue.
655 		 */
656 		if ((compare_with_wrap(asoc->cumulative_tsn,
657 		    control->sinfo_tsn, MAX_TSN)) ||
658 		    (control->sinfo_tsn == asoc->cumulative_tsn)) {
659 			goto protocol_error;
660 		}
661 		if (TAILQ_EMPTY(&strm->inqueue)) {
662 			/* Empty queue */
663 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
664 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
665 			}
666 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
667 		} else {
668 			TAILQ_FOREACH(at, &strm->inqueue, next) {
669 				if (compare_with_wrap(at->sinfo_ssn,
670 				    control->sinfo_ssn, MAX_SEQ)) {
671 					/*
672 					 * one in queue is bigger than the
673 					 * new one, insert before this one
674 					 */
675 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
676 						sctp_log_strm_del(control, at,
677 						    SCTP_STR_LOG_FROM_INSERT_MD);
678 					}
679 					TAILQ_INSERT_BEFORE(at, control, next);
680 					break;
681 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
682 					/*
683 					 * Gak, He sent me a duplicate str
684 					 * seq number
685 					 */
686 					/*
687 					 * foo bar, I guess I will just free
688 					 * this new guy, should we abort
689 					 * too? FIX ME MAYBE? Or it COULD be
690 					 * that the SSN's have wrapped.
691 					 * Maybe I should compare to TSN
692 					 * somehow... sigh for now just blow
693 					 * away the chunk!
694 					 */
695 
696 					if (control->data)
697 						sctp_m_freem(control->data);
698 					control->data = NULL;
699 					asoc->size_on_all_streams -= control->length;
700 					sctp_ucount_decr(asoc->cnt_on_all_streams);
701 					if (control->whoFrom)
702 						sctp_free_remote_addr(control->whoFrom);
703 					control->whoFrom = NULL;
704 					sctp_free_a_readq(stcb, control);
705 					return;
706 				} else {
707 					if (TAILQ_NEXT(at, next) == NULL) {
708 						/*
709 						 * We are at the end, insert
710 						 * it after this one
711 						 */
712 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
713 							sctp_log_strm_del(control, at,
714 							    SCTP_STR_LOG_FROM_INSERT_TL);
715 						}
716 						TAILQ_INSERT_AFTER(&strm->inqueue,
717 						    at, control, next);
718 						break;
719 					}
720 				}
721 			}
722 		}
723 	}
724 }
725 
726 /*
727  * Returns two things: You get the total size of the deliverable parts of the
728  * first fragmented message on the reassembly queue. And you get a 1 back if
729  * all of the message is ready or a 0 back if the message is still incomplete
730  */
731 static int
732 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
733 {
734 	struct sctp_tmit_chunk *chk;
735 	uint32_t tsn;
736 
737 	*t_size = 0;
738 	chk = TAILQ_FIRST(&asoc->reasmqueue);
739 	if (chk == NULL) {
740 		/* nothing on the queue */
741 		return (0);
742 	}
743 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
744 		/* Not a first on the queue */
745 		return (0);
746 	}
747 	tsn = chk->rec.data.TSN_seq;
748 	while (chk) {
749 		if (tsn != chk->rec.data.TSN_seq) {
750 			return (0);
751 		}
752 		*t_size += chk->send_size;
753 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
754 			return (1);
755 		}
756 		tsn++;
757 		chk = TAILQ_NEXT(chk, sctp_next);
758 	}
759 	return (0);
760 }
761 
762 static void
763 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
764 {
765 	struct sctp_tmit_chunk *chk;
766 	uint16_t nxt_todel;
767 	uint32_t tsize, pd_point;
768 
769 doit_again:
770 	chk = TAILQ_FIRST(&asoc->reasmqueue);
771 	if (chk == NULL) {
772 		/* Huh? */
773 		asoc->size_on_reasm_queue = 0;
774 		asoc->cnt_on_reasm_queue = 0;
775 		return;
776 	}
777 	if (asoc->fragmented_delivery_inprogress == 0) {
778 		nxt_todel =
779 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
780 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
781 		    (nxt_todel == chk->rec.data.stream_seq ||
782 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
783 			/*
784 			 * Yep the first one is here and its ok to deliver
785 			 * but should we?
786 			 */
787 			if (stcb->sctp_socket) {
788 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
789 				    stcb->sctp_ep->partial_delivery_point);
790 			} else {
791 				pd_point = stcb->sctp_ep->partial_delivery_point;
792 			}
793 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
794 
795 				/*
796 				 * Yes, we setup to start reception, by
797 				 * backing down the TSN just in case we
798 				 * can't deliver. If we
799 				 */
800 				asoc->fragmented_delivery_inprogress = 1;
801 				asoc->tsn_last_delivered =
802 				    chk->rec.data.TSN_seq - 1;
803 				asoc->str_of_pdapi =
804 				    chk->rec.data.stream_number;
805 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
806 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
807 				asoc->fragment_flags = chk->rec.data.rcv_flags;
808 				sctp_service_reassembly(stcb, asoc);
809 			}
810 		}
811 	} else {
812 		/*
813 		 * Service re-assembly will deliver stream data queued at
814 		 * the end of fragmented delivery.. but it wont know to go
815 		 * back and call itself again... we do that here with the
816 		 * got doit_again
817 		 */
818 		sctp_service_reassembly(stcb, asoc);
819 		if (asoc->fragmented_delivery_inprogress == 0) {
820 			/*
821 			 * finished our Fragmented delivery, could be more
822 			 * waiting?
823 			 */
824 			goto doit_again;
825 		}
826 	}
827 }
828 
829 /*
830  * Dump onto the re-assembly queue, in its proper place. After dumping on the
831  * queue, see if anthing can be delivered. If so pull it off (or as much as
832  * we can. If we run out of space then we must dump what we can and set the
833  * appropriate flag to say we queued what we could.
834  */
835 static void
836 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
837     struct sctp_tmit_chunk *chk, int *abort_flag)
838 {
839 	struct mbuf *oper;
840 	uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
841 	u_char last_flags;
842 	struct sctp_tmit_chunk *at, *prev, *next;
843 
844 	prev = next = NULL;
845 	cum_ackp1 = asoc->tsn_last_delivered + 1;
846 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
847 		/* This is the first one on the queue */
848 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
849 		/*
850 		 * we do not check for delivery of anything when only one
851 		 * fragment is here
852 		 */
853 		asoc->size_on_reasm_queue = chk->send_size;
854 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
855 		if (chk->rec.data.TSN_seq == cum_ackp1) {
856 			if (asoc->fragmented_delivery_inprogress == 0 &&
857 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
858 			    SCTP_DATA_FIRST_FRAG) {
859 				/*
860 				 * An empty queue, no delivery inprogress,
861 				 * we hit the next one and it does NOT have
862 				 * a FIRST fragment mark.
863 				 */
864 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
865 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
866 				    0, M_DONTWAIT, 1, MT_DATA);
867 
868 				if (oper) {
869 					struct sctp_paramhdr *ph;
870 					uint32_t *ippp;
871 
872 					SCTP_BUF_LEN(oper) =
873 					    sizeof(struct sctp_paramhdr) +
874 					    (sizeof(uint32_t) * 3);
875 					ph = mtod(oper, struct sctp_paramhdr *);
876 					ph->param_type =
877 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
878 					ph->param_length = htons(SCTP_BUF_LEN(oper));
879 					ippp = (uint32_t *) (ph + 1);
880 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
881 					ippp++;
882 					*ippp = chk->rec.data.TSN_seq;
883 					ippp++;
884 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
885 
886 				}
887 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
888 				sctp_abort_an_association(stcb->sctp_ep, stcb,
889 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
890 				*abort_flag = 1;
891 			} else if (asoc->fragmented_delivery_inprogress &&
892 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
893 				/*
894 				 * We are doing a partial delivery and the
895 				 * NEXT chunk MUST be either the LAST or
896 				 * MIDDLE fragment NOT a FIRST
897 				 */
898 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
899 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
900 				    0, M_DONTWAIT, 1, MT_DATA);
901 				if (oper) {
902 					struct sctp_paramhdr *ph;
903 					uint32_t *ippp;
904 
905 					SCTP_BUF_LEN(oper) =
906 					    sizeof(struct sctp_paramhdr) +
907 					    (3 * sizeof(uint32_t));
908 					ph = mtod(oper, struct sctp_paramhdr *);
909 					ph->param_type =
910 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
911 					ph->param_length = htons(SCTP_BUF_LEN(oper));
912 					ippp = (uint32_t *) (ph + 1);
913 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
914 					ippp++;
915 					*ippp = chk->rec.data.TSN_seq;
916 					ippp++;
917 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
918 				}
919 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
920 				sctp_abort_an_association(stcb->sctp_ep, stcb,
921 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
922 				*abort_flag = 1;
923 			} else if (asoc->fragmented_delivery_inprogress) {
924 				/*
925 				 * Here we are ok with a MIDDLE or LAST
926 				 * piece
927 				 */
928 				if (chk->rec.data.stream_number !=
929 				    asoc->str_of_pdapi) {
930 					/* Got to be the right STR No */
931 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
932 					    chk->rec.data.stream_number,
933 					    asoc->str_of_pdapi);
934 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
935 					    0, M_DONTWAIT, 1, MT_DATA);
936 					if (oper) {
937 						struct sctp_paramhdr *ph;
938 						uint32_t *ippp;
939 
940 						SCTP_BUF_LEN(oper) =
941 						    sizeof(struct sctp_paramhdr) +
942 						    (sizeof(uint32_t) * 3);
943 						ph = mtod(oper,
944 						    struct sctp_paramhdr *);
945 						ph->param_type =
946 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
947 						ph->param_length =
948 						    htons(SCTP_BUF_LEN(oper));
949 						ippp = (uint32_t *) (ph + 1);
950 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
951 						ippp++;
952 						*ippp = chk->rec.data.TSN_seq;
953 						ippp++;
954 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
955 					}
956 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
957 					sctp_abort_an_association(stcb->sctp_ep,
958 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
959 					*abort_flag = 1;
960 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
961 					    SCTP_DATA_UNORDERED &&
962 				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
963 					/* Got to be the right STR Seq */
964 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
965 					    chk->rec.data.stream_seq,
966 					    asoc->ssn_of_pdapi);
967 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
968 					    0, M_DONTWAIT, 1, MT_DATA);
969 					if (oper) {
970 						struct sctp_paramhdr *ph;
971 						uint32_t *ippp;
972 
973 						SCTP_BUF_LEN(oper) =
974 						    sizeof(struct sctp_paramhdr) +
975 						    (3 * sizeof(uint32_t));
976 						ph = mtod(oper,
977 						    struct sctp_paramhdr *);
978 						ph->param_type =
979 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
980 						ph->param_length =
981 						    htons(SCTP_BUF_LEN(oper));
982 						ippp = (uint32_t *) (ph + 1);
983 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
984 						ippp++;
985 						*ippp = chk->rec.data.TSN_seq;
986 						ippp++;
987 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
988 
989 					}
990 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
991 					sctp_abort_an_association(stcb->sctp_ep,
992 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
993 					*abort_flag = 1;
994 				}
995 			}
996 		}
997 		return;
998 	}
999 	/* Find its place */
1000 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1001 		if (compare_with_wrap(at->rec.data.TSN_seq,
1002 		    chk->rec.data.TSN_seq, MAX_TSN)) {
1003 			/*
1004 			 * one in queue is bigger than the new one, insert
1005 			 * before this one
1006 			 */
1007 			/* A check */
1008 			asoc->size_on_reasm_queue += chk->send_size;
1009 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1010 			next = at;
1011 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1012 			break;
1013 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1014 			/* Gak, He sent me a duplicate str seq number */
1015 			/*
1016 			 * foo bar, I guess I will just free this new guy,
1017 			 * should we abort too? FIX ME MAYBE? Or it COULD be
1018 			 * that the SSN's have wrapped. Maybe I should
1019 			 * compare to TSN somehow... sigh for now just blow
1020 			 * away the chunk!
1021 			 */
1022 			if (chk->data) {
1023 				sctp_m_freem(chk->data);
1024 				chk->data = NULL;
1025 			}
1026 			sctp_free_a_chunk(stcb, chk);
1027 			return;
1028 		} else {
1029 			last_flags = at->rec.data.rcv_flags;
1030 			last_tsn = at->rec.data.TSN_seq;
1031 			prev = at;
1032 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1033 				/*
1034 				 * We are at the end, insert it after this
1035 				 * one
1036 				 */
1037 				/* check it first */
1038 				asoc->size_on_reasm_queue += chk->send_size;
1039 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1040 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1041 				break;
1042 			}
1043 		}
1044 	}
1045 	/* Now the audits */
1046 	if (prev) {
1047 		prev_tsn = chk->rec.data.TSN_seq - 1;
1048 		if (prev_tsn == prev->rec.data.TSN_seq) {
1049 			/*
1050 			 * Ok the one I am dropping onto the end is the
1051 			 * NEXT. A bit of valdiation here.
1052 			 */
1053 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1054 			    SCTP_DATA_FIRST_FRAG ||
1055 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1056 			    SCTP_DATA_MIDDLE_FRAG) {
1057 				/*
1058 				 * Insert chk MUST be a MIDDLE or LAST
1059 				 * fragment
1060 				 */
1061 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1062 				    SCTP_DATA_FIRST_FRAG) {
1063 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1064 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1065 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1066 					    0, M_DONTWAIT, 1, MT_DATA);
1067 					if (oper) {
1068 						struct sctp_paramhdr *ph;
1069 						uint32_t *ippp;
1070 
1071 						SCTP_BUF_LEN(oper) =
1072 						    sizeof(struct sctp_paramhdr) +
1073 						    (3 * sizeof(uint32_t));
1074 						ph = mtod(oper,
1075 						    struct sctp_paramhdr *);
1076 						ph->param_type =
1077 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1078 						ph->param_length =
1079 						    htons(SCTP_BUF_LEN(oper));
1080 						ippp = (uint32_t *) (ph + 1);
1081 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1082 						ippp++;
1083 						*ippp = chk->rec.data.TSN_seq;
1084 						ippp++;
1085 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1086 
1087 					}
1088 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1089 					sctp_abort_an_association(stcb->sctp_ep,
1090 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1091 					*abort_flag = 1;
1092 					return;
1093 				}
1094 				if (chk->rec.data.stream_number !=
1095 				    prev->rec.data.stream_number) {
1096 					/*
1097 					 * Huh, need the correct STR here,
1098 					 * they must be the same.
1099 					 */
1100 					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1101 					    chk->rec.data.stream_number,
1102 					    prev->rec.data.stream_number);
1103 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1104 					    0, M_DONTWAIT, 1, MT_DATA);
1105 					if (oper) {
1106 						struct sctp_paramhdr *ph;
1107 						uint32_t *ippp;
1108 
1109 						SCTP_BUF_LEN(oper) =
1110 						    sizeof(struct sctp_paramhdr) +
1111 						    (3 * sizeof(uint32_t));
1112 						ph = mtod(oper,
1113 						    struct sctp_paramhdr *);
1114 						ph->param_type =
1115 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1116 						ph->param_length =
1117 						    htons(SCTP_BUF_LEN(oper));
1118 						ippp = (uint32_t *) (ph + 1);
1119 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1120 						ippp++;
1121 						*ippp = chk->rec.data.TSN_seq;
1122 						ippp++;
1123 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1124 					}
1125 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1126 					sctp_abort_an_association(stcb->sctp_ep,
1127 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1128 
1129 					*abort_flag = 1;
1130 					return;
1131 				}
1132 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1133 				    chk->rec.data.stream_seq !=
1134 				    prev->rec.data.stream_seq) {
1135 					/*
1136 					 * Huh, need the correct STR here,
1137 					 * they must be the same.
1138 					 */
1139 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1140 					    chk->rec.data.stream_seq,
1141 					    prev->rec.data.stream_seq);
1142 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1143 					    0, M_DONTWAIT, 1, MT_DATA);
1144 					if (oper) {
1145 						struct sctp_paramhdr *ph;
1146 						uint32_t *ippp;
1147 
1148 						SCTP_BUF_LEN(oper) =
1149 						    sizeof(struct sctp_paramhdr) +
1150 						    (3 * sizeof(uint32_t));
1151 						ph = mtod(oper,
1152 						    struct sctp_paramhdr *);
1153 						ph->param_type =
1154 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1155 						ph->param_length =
1156 						    htons(SCTP_BUF_LEN(oper));
1157 						ippp = (uint32_t *) (ph + 1);
1158 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1159 						ippp++;
1160 						*ippp = chk->rec.data.TSN_seq;
1161 						ippp++;
1162 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1163 					}
1164 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1165 					sctp_abort_an_association(stcb->sctp_ep,
1166 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1167 
1168 					*abort_flag = 1;
1169 					return;
1170 				}
1171 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1172 			    SCTP_DATA_LAST_FRAG) {
1173 				/* Insert chk MUST be a FIRST */
1174 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1175 				    SCTP_DATA_FIRST_FRAG) {
1176 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1177 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1178 					    0, M_DONTWAIT, 1, MT_DATA);
1179 					if (oper) {
1180 						struct sctp_paramhdr *ph;
1181 						uint32_t *ippp;
1182 
1183 						SCTP_BUF_LEN(oper) =
1184 						    sizeof(struct sctp_paramhdr) +
1185 						    (3 * sizeof(uint32_t));
1186 						ph = mtod(oper,
1187 						    struct sctp_paramhdr *);
1188 						ph->param_type =
1189 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1190 						ph->param_length =
1191 						    htons(SCTP_BUF_LEN(oper));
1192 						ippp = (uint32_t *) (ph + 1);
1193 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1194 						ippp++;
1195 						*ippp = chk->rec.data.TSN_seq;
1196 						ippp++;
1197 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1198 
1199 					}
1200 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1201 					sctp_abort_an_association(stcb->sctp_ep,
1202 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1203 
1204 					*abort_flag = 1;
1205 					return;
1206 				}
1207 			}
1208 		}
1209 	}
1210 	if (next) {
1211 		post_tsn = chk->rec.data.TSN_seq + 1;
1212 		if (post_tsn == next->rec.data.TSN_seq) {
1213 			/*
1214 			 * Ok the one I am inserting ahead of is my NEXT
1215 			 * one. A bit of valdiation here.
1216 			 */
1217 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1218 				/* Insert chk MUST be a last fragment */
1219 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1220 				    != SCTP_DATA_LAST_FRAG) {
1221 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1222 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1223 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1224 					    0, M_DONTWAIT, 1, MT_DATA);
1225 					if (oper) {
1226 						struct sctp_paramhdr *ph;
1227 						uint32_t *ippp;
1228 
1229 						SCTP_BUF_LEN(oper) =
1230 						    sizeof(struct sctp_paramhdr) +
1231 						    (3 * sizeof(uint32_t));
1232 						ph = mtod(oper,
1233 						    struct sctp_paramhdr *);
1234 						ph->param_type =
1235 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1236 						ph->param_length =
1237 						    htons(SCTP_BUF_LEN(oper));
1238 						ippp = (uint32_t *) (ph + 1);
1239 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1240 						ippp++;
1241 						*ippp = chk->rec.data.TSN_seq;
1242 						ippp++;
1243 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1244 					}
1245 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1246 					sctp_abort_an_association(stcb->sctp_ep,
1247 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1248 
1249 					*abort_flag = 1;
1250 					return;
1251 				}
1252 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1253 				    SCTP_DATA_MIDDLE_FRAG ||
1254 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1255 			    SCTP_DATA_LAST_FRAG) {
1256 				/*
1257 				 * Insert chk CAN be MIDDLE or FIRST NOT
1258 				 * LAST
1259 				 */
1260 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1261 				    SCTP_DATA_LAST_FRAG) {
1262 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1263 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1264 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1265 					    0, M_DONTWAIT, 1, MT_DATA);
1266 					if (oper) {
1267 						struct sctp_paramhdr *ph;
1268 						uint32_t *ippp;
1269 
1270 						SCTP_BUF_LEN(oper) =
1271 						    sizeof(struct sctp_paramhdr) +
1272 						    (3 * sizeof(uint32_t));
1273 						ph = mtod(oper,
1274 						    struct sctp_paramhdr *);
1275 						ph->param_type =
1276 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1277 						ph->param_length =
1278 						    htons(SCTP_BUF_LEN(oper));
1279 						ippp = (uint32_t *) (ph + 1);
1280 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1281 						ippp++;
1282 						*ippp = chk->rec.data.TSN_seq;
1283 						ippp++;
1284 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1285 
1286 					}
1287 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1288 					sctp_abort_an_association(stcb->sctp_ep,
1289 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1290 
1291 					*abort_flag = 1;
1292 					return;
1293 				}
1294 				if (chk->rec.data.stream_number !=
1295 				    next->rec.data.stream_number) {
1296 					/*
1297 					 * Huh, need the correct STR here,
1298 					 * they must be the same.
1299 					 */
1300 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1301 					    chk->rec.data.stream_number,
1302 					    next->rec.data.stream_number);
1303 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1304 					    0, M_DONTWAIT, 1, MT_DATA);
1305 					if (oper) {
1306 						struct sctp_paramhdr *ph;
1307 						uint32_t *ippp;
1308 
1309 						SCTP_BUF_LEN(oper) =
1310 						    sizeof(struct sctp_paramhdr) +
1311 						    (3 * sizeof(uint32_t));
1312 						ph = mtod(oper,
1313 						    struct sctp_paramhdr *);
1314 						ph->param_type =
1315 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1316 						ph->param_length =
1317 						    htons(SCTP_BUF_LEN(oper));
1318 						ippp = (uint32_t *) (ph + 1);
1319 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1320 						ippp++;
1321 						*ippp = chk->rec.data.TSN_seq;
1322 						ippp++;
1323 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1324 
1325 					}
1326 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1327 					sctp_abort_an_association(stcb->sctp_ep,
1328 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1329 
1330 					*abort_flag = 1;
1331 					return;
1332 				}
1333 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1334 				    chk->rec.data.stream_seq !=
1335 				    next->rec.data.stream_seq) {
1336 					/*
1337 					 * Huh, need the correct STR here,
1338 					 * they must be the same.
1339 					 */
1340 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1341 					    chk->rec.data.stream_seq,
1342 					    next->rec.data.stream_seq);
1343 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1344 					    0, M_DONTWAIT, 1, MT_DATA);
1345 					if (oper) {
1346 						struct sctp_paramhdr *ph;
1347 						uint32_t *ippp;
1348 
1349 						SCTP_BUF_LEN(oper) =
1350 						    sizeof(struct sctp_paramhdr) +
1351 						    (3 * sizeof(uint32_t));
1352 						ph = mtod(oper,
1353 						    struct sctp_paramhdr *);
1354 						ph->param_type =
1355 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1356 						ph->param_length =
1357 						    htons(SCTP_BUF_LEN(oper));
1358 						ippp = (uint32_t *) (ph + 1);
1359 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1360 						ippp++;
1361 						*ippp = chk->rec.data.TSN_seq;
1362 						ippp++;
1363 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1364 					}
1365 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1366 					sctp_abort_an_association(stcb->sctp_ep,
1367 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1368 
1369 					*abort_flag = 1;
1370 					return;
1371 				}
1372 			}
1373 		}
1374 	}
1375 	/* Do we need to do some delivery? check */
1376 	sctp_deliver_reasm_check(stcb, asoc);
1377 }
1378 
1379 /*
1380  * This is an unfortunate routine. It checks to make sure a evil guy is not
1381  * stuffing us full of bad packet fragments. A broken peer could also do this
1382  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1383  * :< more cycles.
1384  */
1385 static int
1386 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1387     uint32_t TSN_seq)
1388 {
1389 	struct sctp_tmit_chunk *at;
1390 	uint32_t tsn_est;
1391 
1392 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1393 		if (compare_with_wrap(TSN_seq,
1394 		    at->rec.data.TSN_seq, MAX_TSN)) {
1395 			/* is it one bigger? */
1396 			tsn_est = at->rec.data.TSN_seq + 1;
1397 			if (tsn_est == TSN_seq) {
1398 				/* yep. It better be a last then */
1399 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1400 				    SCTP_DATA_LAST_FRAG) {
1401 					/*
1402 					 * Ok this guy belongs next to a guy
1403 					 * that is NOT last, it should be a
1404 					 * middle/last, not a complete
1405 					 * chunk.
1406 					 */
1407 					return (1);
1408 				} else {
1409 					/*
1410 					 * This guy is ok since its a LAST
1411 					 * and the new chunk is a fully
1412 					 * self- contained one.
1413 					 */
1414 					return (0);
1415 				}
1416 			}
1417 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1418 			/* Software error since I have a dup? */
1419 			return (1);
1420 		} else {
1421 			/*
1422 			 * Ok, 'at' is larger than new chunk but does it
1423 			 * need to be right before it.
1424 			 */
1425 			tsn_est = TSN_seq + 1;
1426 			if (tsn_est == at->rec.data.TSN_seq) {
1427 				/* Yep, It better be a first */
1428 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1429 				    SCTP_DATA_FIRST_FRAG) {
1430 					return (1);
1431 				} else {
1432 					return (0);
1433 				}
1434 			}
1435 		}
1436 	}
1437 	return (0);
1438 }
1439 
1440 
1441 static int
1442 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1443     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1444     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1445     int *break_flag, int last_chunk)
1446 {
1447 	/* Process a data chunk */
1448 	/* struct sctp_tmit_chunk *chk; */
1449 	struct sctp_tmit_chunk *chk;
1450 	uint32_t tsn, gap;
1451 	struct mbuf *dmbuf;
1452 	int indx, the_len;
1453 	int need_reasm_check = 0;
1454 	uint16_t strmno, strmseq;
1455 	struct mbuf *oper;
1456 	struct sctp_queued_to_read *control;
1457 	int ordered;
1458 	uint32_t protocol_id;
1459 	uint8_t chunk_flags;
1460 	struct sctp_stream_reset_list *liste;
1461 
1462 	chk = NULL;
1463 	tsn = ntohl(ch->dp.tsn);
1464 	chunk_flags = ch->ch.chunk_flags;
1465 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1466 		asoc->send_sack = 1;
1467 	}
1468 	protocol_id = ch->dp.protocol_id;
1469 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1470 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1471 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1472 	}
1473 	if (stcb == NULL) {
1474 		return (0);
1475 	}
1476 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1477 	if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1478 	    asoc->cumulative_tsn == tsn) {
1479 		/* It is a duplicate */
1480 		SCTP_STAT_INCR(sctps_recvdupdata);
1481 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1482 			/* Record a dup for the next outbound sack */
1483 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1484 			asoc->numduptsns++;
1485 		}
1486 		asoc->send_sack = 1;
1487 		return (0);
1488 	}
1489 	/* Calculate the number of TSN's between the base and this TSN */
1490 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1491 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1492 		/* Can't hold the bit in the mapping at max array, toss it */
1493 		return (0);
1494 	}
1495 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1496 		SCTP_TCB_LOCK_ASSERT(stcb);
1497 		if (sctp_expand_mapping_array(asoc, gap)) {
1498 			/* Can't expand, drop it */
1499 			return (0);
1500 		}
1501 	}
1502 	if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1503 		*high_tsn = tsn;
1504 	}
1505 	/* See if we have received this one already */
1506 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1507 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1508 		SCTP_STAT_INCR(sctps_recvdupdata);
1509 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1510 			/* Record a dup for the next outbound sack */
1511 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1512 			asoc->numduptsns++;
1513 		}
1514 		asoc->send_sack = 1;
1515 		return (0);
1516 	}
1517 	/*
1518 	 * Check to see about the GONE flag, duplicates would cause a sack
1519 	 * to be sent up above
1520 	 */
1521 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1522 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1523 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1524 	    ) {
1525 		/*
1526 		 * wait a minute, this guy is gone, there is no longer a
1527 		 * receiver. Send peer an ABORT!
1528 		 */
1529 		struct mbuf *op_err;
1530 
1531 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1532 		sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1533 		*abort_flag = 1;
1534 		return (0);
1535 	}
1536 	/*
1537 	 * Now before going further we see if there is room. If NOT then we
1538 	 * MAY let one through only IF this TSN is the one we are waiting
1539 	 * for on a partial delivery API.
1540 	 */
1541 
1542 	/* now do the tests */
1543 	if (((asoc->cnt_on_all_streams +
1544 	    asoc->cnt_on_reasm_queue +
1545 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1546 	    (((int)asoc->my_rwnd) <= 0)) {
1547 		/*
1548 		 * When we have NO room in the rwnd we check to make sure
1549 		 * the reader is doing its job...
1550 		 */
1551 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1552 			/* some to read, wake-up */
1553 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1554 			struct socket *so;
1555 
1556 			so = SCTP_INP_SO(stcb->sctp_ep);
1557 			atomic_add_int(&stcb->asoc.refcnt, 1);
1558 			SCTP_TCB_UNLOCK(stcb);
1559 			SCTP_SOCKET_LOCK(so, 1);
1560 			SCTP_TCB_LOCK(stcb);
1561 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1562 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1563 				/* assoc was freed while we were unlocked */
1564 				SCTP_SOCKET_UNLOCK(so, 1);
1565 				return (0);
1566 			}
1567 #endif
1568 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1569 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1570 			SCTP_SOCKET_UNLOCK(so, 1);
1571 #endif
1572 		}
1573 		/* now is it in the mapping array of what we have accepted? */
1574 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
1575 		    compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1576 			/* Nope not in the valid range dump it */
1577 			sctp_set_rwnd(stcb, asoc);
1578 			if ((asoc->cnt_on_all_streams +
1579 			    asoc->cnt_on_reasm_queue +
1580 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1581 				SCTP_STAT_INCR(sctps_datadropchklmt);
1582 			} else {
1583 				SCTP_STAT_INCR(sctps_datadroprwnd);
1584 			}
1585 			indx = *break_flag;
1586 			*break_flag = 1;
1587 			return (0);
1588 		}
1589 	}
1590 	strmno = ntohs(ch->dp.stream_id);
1591 	if (strmno >= asoc->streamincnt) {
1592 		struct sctp_paramhdr *phdr;
1593 		struct mbuf *mb;
1594 
1595 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1596 		    0, M_DONTWAIT, 1, MT_DATA);
1597 		if (mb != NULL) {
1598 			/* add some space up front so prepend will work well */
1599 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1600 			phdr = mtod(mb, struct sctp_paramhdr *);
1601 			/*
1602 			 * Error causes are just param's and this one has
1603 			 * two back to back phdr, one with the error type
1604 			 * and size, the other with the streamid and a rsvd
1605 			 */
1606 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1607 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1608 			phdr->param_length =
1609 			    htons(sizeof(struct sctp_paramhdr) * 2);
1610 			phdr++;
1611 			/* We insert the stream in the type field */
1612 			phdr->param_type = ch->dp.stream_id;
1613 			/* And set the length to 0 for the rsvd field */
1614 			phdr->param_length = 0;
1615 			sctp_queue_op_err(stcb, mb);
1616 		}
1617 		SCTP_STAT_INCR(sctps_badsid);
1618 		SCTP_TCB_LOCK_ASSERT(stcb);
1619 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1620 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1621 			asoc->highest_tsn_inside_nr_map = tsn;
1622 		}
1623 		if (tsn == (asoc->cumulative_tsn + 1)) {
1624 			/* Update cum-ack */
1625 			asoc->cumulative_tsn = tsn;
1626 		}
1627 		return (0);
1628 	}
1629 	/*
1630 	 * Before we continue lets validate that we are not being fooled by
1631 	 * an evil attacker. We can only have 4k chunks based on our TSN
1632 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1633 	 * way our stream sequence numbers could have wrapped. We of course
1634 	 * only validate the FIRST fragment so the bit must be set.
1635 	 */
1636 	strmseq = ntohs(ch->dp.stream_sequence);
1637 #ifdef SCTP_ASOCLOG_OF_TSNS
1638 	SCTP_TCB_LOCK_ASSERT(stcb);
1639 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1640 		asoc->tsn_in_at = 0;
1641 		asoc->tsn_in_wrapped = 1;
1642 	}
1643 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1644 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1645 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1646 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1647 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1648 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1649 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1650 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1651 	asoc->tsn_in_at++;
1652 #endif
1653 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1654 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1655 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1656 	    (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1657 	    strmseq, MAX_SEQ) ||
1658 	    asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1659 		/* The incoming sseq is behind where we last delivered? */
1660 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1661 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1662 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1663 		    0, M_DONTWAIT, 1, MT_DATA);
1664 		if (oper) {
1665 			struct sctp_paramhdr *ph;
1666 			uint32_t *ippp;
1667 
1668 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1669 			    (3 * sizeof(uint32_t));
1670 			ph = mtod(oper, struct sctp_paramhdr *);
1671 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1672 			ph->param_length = htons(SCTP_BUF_LEN(oper));
1673 			ippp = (uint32_t *) (ph + 1);
1674 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1675 			ippp++;
1676 			*ippp = tsn;
1677 			ippp++;
1678 			*ippp = ((strmno << 16) | strmseq);
1679 
1680 		}
1681 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1682 		sctp_abort_an_association(stcb->sctp_ep, stcb,
1683 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1684 		*abort_flag = 1;
1685 		return (0);
1686 	}
1687 	/************************************
1688 	 * From here down we may find ch-> invalid
1689 	 * so its a good idea NOT to use it.
1690 	 *************************************/
1691 
1692 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1693 	if (last_chunk == 0) {
1694 		dmbuf = SCTP_M_COPYM(*m,
1695 		    (offset + sizeof(struct sctp_data_chunk)),
1696 		    the_len, M_DONTWAIT);
1697 #ifdef SCTP_MBUF_LOGGING
1698 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1699 			struct mbuf *mat;
1700 
1701 			mat = dmbuf;
1702 			while (mat) {
1703 				if (SCTP_BUF_IS_EXTENDED(mat)) {
1704 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1705 				}
1706 				mat = SCTP_BUF_NEXT(mat);
1707 			}
1708 		}
1709 #endif
1710 	} else {
1711 		/* We can steal the last chunk */
1712 		int l_len;
1713 
1714 		dmbuf = *m;
1715 		/* lop off the top part */
1716 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1717 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1718 			l_len = SCTP_BUF_LEN(dmbuf);
1719 		} else {
1720 			/*
1721 			 * need to count up the size hopefully does not hit
1722 			 * this to often :-0
1723 			 */
1724 			struct mbuf *lat;
1725 
1726 			l_len = 0;
1727 			lat = dmbuf;
1728 			while (lat) {
1729 				l_len += SCTP_BUF_LEN(lat);
1730 				lat = SCTP_BUF_NEXT(lat);
1731 			}
1732 		}
1733 		if (l_len > the_len) {
1734 			/* Trim the end round bytes off  too */
1735 			m_adj(dmbuf, -(l_len - the_len));
1736 		}
1737 	}
1738 	if (dmbuf == NULL) {
1739 		SCTP_STAT_INCR(sctps_nomem);
1740 		return (0);
1741 	}
1742 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1743 	    asoc->fragmented_delivery_inprogress == 0 &&
1744 	    TAILQ_EMPTY(&asoc->resetHead) &&
1745 	    ((ordered == 0) ||
1746 	    ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1747 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1748 		/* Candidate for express delivery */
1749 		/*
1750 		 * Its not fragmented, No PD-API is up, Nothing in the
1751 		 * delivery queue, Its un-ordered OR ordered and the next to
1752 		 * deliver AND nothing else is stuck on the stream queue,
1753 		 * And there is room for it in the socket buffer. Lets just
1754 		 * stuff it up the buffer....
1755 		 */
1756 
1757 		/* It would be nice to avoid this copy if we could :< */
1758 		sctp_alloc_a_readq(stcb, control);
1759 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1760 		    protocol_id,
1761 		    stcb->asoc.context,
1762 		    strmno, strmseq,
1763 		    chunk_flags,
1764 		    dmbuf);
1765 		if (control == NULL) {
1766 			goto failed_express_del;
1767 		}
1768 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1769 		    control, &stcb->sctp_socket->so_rcv,
1770 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1771 
1772 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1773 			/* for ordered, bump what we delivered */
1774 			asoc->strmin[strmno].last_sequence_delivered++;
1775 		}
1776 		SCTP_STAT_INCR(sctps_recvexpress);
1777 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1778 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1779 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1780 		}
1781 		control = NULL;
1782 
1783 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1784 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1785 			asoc->highest_tsn_inside_nr_map = tsn;
1786 		}
1787 		goto finish_express_del;
1788 	}
1789 failed_express_del:
1790 	/* If we reach here this is a new chunk */
1791 	chk = NULL;
1792 	control = NULL;
1793 	/* Express for fragmented delivery? */
1794 	if ((asoc->fragmented_delivery_inprogress) &&
1795 	    (stcb->asoc.control_pdapi) &&
1796 	    (asoc->str_of_pdapi == strmno) &&
1797 	    (asoc->ssn_of_pdapi == strmseq)
1798 	    ) {
1799 		control = stcb->asoc.control_pdapi;
1800 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1801 			/* Can't be another first? */
1802 			goto failed_pdapi_express_del;
1803 		}
1804 		if (tsn == (control->sinfo_tsn + 1)) {
1805 			/* Yep, we can add it on */
1806 			int end = 0;
1807 			uint32_t cumack;
1808 
1809 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1810 				end = 1;
1811 			}
1812 			cumack = asoc->cumulative_tsn;
1813 			if ((cumack + 1) == tsn)
1814 				cumack = tsn;
1815 
1816 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1817 			    tsn,
1818 			    &stcb->sctp_socket->so_rcv)) {
1819 				SCTP_PRINTF("Append fails end:%d\n", end);
1820 				goto failed_pdapi_express_del;
1821 			}
1822 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1823 			if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1824 				asoc->highest_tsn_inside_nr_map = tsn;
1825 			}
1826 			SCTP_STAT_INCR(sctps_recvexpressm);
1827 			control->sinfo_tsn = tsn;
1828 			asoc->tsn_last_delivered = tsn;
1829 			asoc->fragment_flags = chunk_flags;
1830 			asoc->tsn_of_pdapi_last_delivered = tsn;
1831 			asoc->last_flags_delivered = chunk_flags;
1832 			asoc->last_strm_seq_delivered = strmseq;
1833 			asoc->last_strm_no_delivered = strmno;
1834 			if (end) {
1835 				/* clean up the flags and such */
1836 				asoc->fragmented_delivery_inprogress = 0;
1837 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1838 					asoc->strmin[strmno].last_sequence_delivered++;
1839 				}
1840 				stcb->asoc.control_pdapi = NULL;
1841 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1842 					/*
1843 					 * There could be another message
1844 					 * ready
1845 					 */
1846 					need_reasm_check = 1;
1847 				}
1848 			}
1849 			control = NULL;
1850 			goto finish_express_del;
1851 		}
1852 	}
1853 failed_pdapi_express_del:
1854 	control = NULL;
1855 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1856 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1857 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1858 			asoc->highest_tsn_inside_nr_map = tsn;
1859 		}
1860 	} else {
1861 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1862 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1863 			asoc->highest_tsn_inside_map = tsn;
1864 		}
1865 	}
1866 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1867 		sctp_alloc_a_chunk(stcb, chk);
1868 		if (chk == NULL) {
1869 			/* No memory so we drop the chunk */
1870 			SCTP_STAT_INCR(sctps_nomem);
1871 			if (last_chunk == 0) {
1872 				/* we copied it, free the copy */
1873 				sctp_m_freem(dmbuf);
1874 			}
1875 			return (0);
1876 		}
1877 		chk->rec.data.TSN_seq = tsn;
1878 		chk->no_fr_allowed = 0;
1879 		chk->rec.data.stream_seq = strmseq;
1880 		chk->rec.data.stream_number = strmno;
1881 		chk->rec.data.payloadtype = protocol_id;
1882 		chk->rec.data.context = stcb->asoc.context;
1883 		chk->rec.data.doing_fast_retransmit = 0;
1884 		chk->rec.data.rcv_flags = chunk_flags;
1885 		chk->asoc = asoc;
1886 		chk->send_size = the_len;
1887 		chk->whoTo = net;
1888 		atomic_add_int(&net->ref_count, 1);
1889 		chk->data = dmbuf;
1890 	} else {
1891 		sctp_alloc_a_readq(stcb, control);
1892 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1893 		    protocol_id,
1894 		    stcb->asoc.context,
1895 		    strmno, strmseq,
1896 		    chunk_flags,
1897 		    dmbuf);
1898 		if (control == NULL) {
1899 			/* No memory so we drop the chunk */
1900 			SCTP_STAT_INCR(sctps_nomem);
1901 			if (last_chunk == 0) {
1902 				/* we copied it, free the copy */
1903 				sctp_m_freem(dmbuf);
1904 			}
1905 			return (0);
1906 		}
1907 		control->length = the_len;
1908 	}
1909 
1910 	/* Mark it as received */
1911 	/* Now queue it where it belongs */
1912 	if (control != NULL) {
1913 		/* First a sanity check */
1914 		if (asoc->fragmented_delivery_inprogress) {
1915 			/*
1916 			 * Ok, we have a fragmented delivery in progress if
1917 			 * this chunk is next to deliver OR belongs in our
1918 			 * view to the reassembly, the peer is evil or
1919 			 * broken.
1920 			 */
1921 			uint32_t estimate_tsn;
1922 
1923 			estimate_tsn = asoc->tsn_last_delivered + 1;
1924 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1925 			    (estimate_tsn == control->sinfo_tsn)) {
1926 				/* Evil/Broke peer */
1927 				sctp_m_freem(control->data);
1928 				control->data = NULL;
1929 				if (control->whoFrom) {
1930 					sctp_free_remote_addr(control->whoFrom);
1931 					control->whoFrom = NULL;
1932 				}
1933 				sctp_free_a_readq(stcb, control);
1934 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1935 				    0, M_DONTWAIT, 1, MT_DATA);
1936 				if (oper) {
1937 					struct sctp_paramhdr *ph;
1938 					uint32_t *ippp;
1939 
1940 					SCTP_BUF_LEN(oper) =
1941 					    sizeof(struct sctp_paramhdr) +
1942 					    (3 * sizeof(uint32_t));
1943 					ph = mtod(oper, struct sctp_paramhdr *);
1944 					ph->param_type =
1945 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1946 					ph->param_length = htons(SCTP_BUF_LEN(oper));
1947 					ippp = (uint32_t *) (ph + 1);
1948 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1949 					ippp++;
1950 					*ippp = tsn;
1951 					ippp++;
1952 					*ippp = ((strmno << 16) | strmseq);
1953 				}
1954 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1955 				sctp_abort_an_association(stcb->sctp_ep, stcb,
1956 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1957 
1958 				*abort_flag = 1;
1959 				return (0);
1960 			} else {
1961 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1962 					sctp_m_freem(control->data);
1963 					control->data = NULL;
1964 					if (control->whoFrom) {
1965 						sctp_free_remote_addr(control->whoFrom);
1966 						control->whoFrom = NULL;
1967 					}
1968 					sctp_free_a_readq(stcb, control);
1969 
1970 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1971 					    0, M_DONTWAIT, 1, MT_DATA);
1972 					if (oper) {
1973 						struct sctp_paramhdr *ph;
1974 						uint32_t *ippp;
1975 
1976 						SCTP_BUF_LEN(oper) =
1977 						    sizeof(struct sctp_paramhdr) +
1978 						    (3 * sizeof(uint32_t));
1979 						ph = mtod(oper,
1980 						    struct sctp_paramhdr *);
1981 						ph->param_type =
1982 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1983 						ph->param_length =
1984 						    htons(SCTP_BUF_LEN(oper));
1985 						ippp = (uint32_t *) (ph + 1);
1986 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1987 						ippp++;
1988 						*ippp = tsn;
1989 						ippp++;
1990 						*ippp = ((strmno << 16) | strmseq);
1991 					}
1992 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1993 					sctp_abort_an_association(stcb->sctp_ep,
1994 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1995 
1996 					*abort_flag = 1;
1997 					return (0);
1998 				}
1999 			}
2000 		} else {
2001 			/* No PDAPI running */
2002 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2003 				/*
2004 				 * Reassembly queue is NOT empty validate
2005 				 * that this tsn does not need to be in
2006 				 * reasembly queue. If it does then our peer
2007 				 * is broken or evil.
2008 				 */
2009 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2010 					sctp_m_freem(control->data);
2011 					control->data = NULL;
2012 					if (control->whoFrom) {
2013 						sctp_free_remote_addr(control->whoFrom);
2014 						control->whoFrom = NULL;
2015 					}
2016 					sctp_free_a_readq(stcb, control);
2017 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2018 					    0, M_DONTWAIT, 1, MT_DATA);
2019 					if (oper) {
2020 						struct sctp_paramhdr *ph;
2021 						uint32_t *ippp;
2022 
2023 						SCTP_BUF_LEN(oper) =
2024 						    sizeof(struct sctp_paramhdr) +
2025 						    (3 * sizeof(uint32_t));
2026 						ph = mtod(oper,
2027 						    struct sctp_paramhdr *);
2028 						ph->param_type =
2029 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2030 						ph->param_length =
2031 						    htons(SCTP_BUF_LEN(oper));
2032 						ippp = (uint32_t *) (ph + 1);
2033 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2034 						ippp++;
2035 						*ippp = tsn;
2036 						ippp++;
2037 						*ippp = ((strmno << 16) | strmseq);
2038 					}
2039 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2040 					sctp_abort_an_association(stcb->sctp_ep,
2041 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2042 
2043 					*abort_flag = 1;
2044 					return (0);
2045 				}
2046 			}
2047 		}
2048 		/* ok, if we reach here we have passed the sanity checks */
2049 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2050 			/* queue directly into socket buffer */
2051 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2052 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2053 			    control,
2054 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2055 		} else {
2056 			/*
2057 			 * Special check for when streams are resetting. We
2058 			 * could be more smart about this and check the
2059 			 * actual stream to see if it is not being reset..
2060 			 * that way we would not create a HOLB when amongst
2061 			 * streams being reset and those not being reset.
2062 			 *
2063 			 * We take complete messages that have a stream reset
2064 			 * intervening (aka the TSN is after where our
2065 			 * cum-ack needs to be) off and put them on a
2066 			 * pending_reply_queue. The reassembly ones we do
2067 			 * not have to worry about since they are all sorted
2068 			 * and proceessed by TSN order. It is only the
2069 			 * singletons I must worry about.
2070 			 */
2071 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2072 			    ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)))
2073 			    ) {
2074 				/*
2075 				 * yep its past where we need to reset... go
2076 				 * ahead and queue it.
2077 				 */
2078 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2079 					/* first one on */
2080 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2081 				} else {
2082 					struct sctp_queued_to_read *ctlOn;
2083 					unsigned char inserted = 0;
2084 
2085 					ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2086 					while (ctlOn) {
2087 						if (compare_with_wrap(control->sinfo_tsn,
2088 						    ctlOn->sinfo_tsn, MAX_TSN)) {
2089 							ctlOn = TAILQ_NEXT(ctlOn, next);
2090 						} else {
2091 							/* found it */
2092 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2093 							inserted = 1;
2094 							break;
2095 						}
2096 					}
2097 					if (inserted == 0) {
2098 						/*
2099 						 * must be put at end, use
2100 						 * prevP (all setup from
2101 						 * loop) to setup nextP.
2102 						 */
2103 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2104 					}
2105 				}
2106 			} else {
2107 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2108 				if (*abort_flag) {
2109 					return (0);
2110 				}
2111 			}
2112 		}
2113 	} else {
2114 		/* Into the re-assembly queue */
2115 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2116 		if (*abort_flag) {
2117 			/*
2118 			 * the assoc is now gone and chk was put onto the
2119 			 * reasm queue, which has all been freed.
2120 			 */
2121 			*m = NULL;
2122 			return (0);
2123 		}
2124 	}
2125 finish_express_del:
2126 	if (tsn == (asoc->cumulative_tsn + 1)) {
2127 		/* Update cum-ack */
2128 		asoc->cumulative_tsn = tsn;
2129 	}
2130 	if (last_chunk) {
2131 		*m = NULL;
2132 	}
2133 	if (ordered) {
2134 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2135 	} else {
2136 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2137 	}
2138 	SCTP_STAT_INCR(sctps_recvdata);
2139 	/* Set it present please */
2140 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2141 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2142 	}
2143 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2144 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2145 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2146 	}
2147 	/* check the special flag for stream resets */
2148 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2149 	    ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2150 	    (asoc->cumulative_tsn == liste->tsn))
2151 	    ) {
2152 		/*
2153 		 * we have finished working through the backlogged TSN's now
2154 		 * time to reset streams. 1: call reset function. 2: free
2155 		 * pending_reply space 3: distribute any chunks in
2156 		 * pending_reply_queue.
2157 		 */
2158 		struct sctp_queued_to_read *ctl;
2159 
2160 		sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2161 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2162 		SCTP_FREE(liste, SCTP_M_STRESET);
2163 		/* sa_ignore FREED_MEMORY */
2164 		liste = TAILQ_FIRST(&asoc->resetHead);
2165 		ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2166 		if (ctl && (liste == NULL)) {
2167 			/* All can be removed */
2168 			while (ctl) {
2169 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2170 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2171 				if (*abort_flag) {
2172 					return (0);
2173 				}
2174 				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2175 			}
2176 		} else if (ctl) {
2177 			/* more than one in queue */
2178 			while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2179 				/*
2180 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2181 				 * process it which is the NOT of
2182 				 * ctl->sinfo_tsn > liste->tsn
2183 				 */
2184 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2185 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2186 				if (*abort_flag) {
2187 					return (0);
2188 				}
2189 				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2190 			}
2191 		}
2192 		/*
2193 		 * Now service re-assembly to pick up anything that has been
2194 		 * held on reassembly queue?
2195 		 */
2196 		sctp_deliver_reasm_check(stcb, asoc);
2197 		need_reasm_check = 0;
2198 	}
2199 	if (need_reasm_check) {
2200 		/* Another one waits ? */
2201 		sctp_deliver_reasm_check(stcb, asoc);
2202 	}
2203 	return (1);
2204 }
2205 
2206 int8_t sctp_map_lookup_tab[256] = {
2207 	0, 1, 0, 2, 0, 1, 0, 3,
2208 	0, 1, 0, 2, 0, 1, 0, 4,
2209 	0, 1, 0, 2, 0, 1, 0, 3,
2210 	0, 1, 0, 2, 0, 1, 0, 5,
2211 	0, 1, 0, 2, 0, 1, 0, 3,
2212 	0, 1, 0, 2, 0, 1, 0, 4,
2213 	0, 1, 0, 2, 0, 1, 0, 3,
2214 	0, 1, 0, 2, 0, 1, 0, 6,
2215 	0, 1, 0, 2, 0, 1, 0, 3,
2216 	0, 1, 0, 2, 0, 1, 0, 4,
2217 	0, 1, 0, 2, 0, 1, 0, 3,
2218 	0, 1, 0, 2, 0, 1, 0, 5,
2219 	0, 1, 0, 2, 0, 1, 0, 3,
2220 	0, 1, 0, 2, 0, 1, 0, 4,
2221 	0, 1, 0, 2, 0, 1, 0, 3,
2222 	0, 1, 0, 2, 0, 1, 0, 7,
2223 	0, 1, 0, 2, 0, 1, 0, 3,
2224 	0, 1, 0, 2, 0, 1, 0, 4,
2225 	0, 1, 0, 2, 0, 1, 0, 3,
2226 	0, 1, 0, 2, 0, 1, 0, 5,
2227 	0, 1, 0, 2, 0, 1, 0, 3,
2228 	0, 1, 0, 2, 0, 1, 0, 4,
2229 	0, 1, 0, 2, 0, 1, 0, 3,
2230 	0, 1, 0, 2, 0, 1, 0, 6,
2231 	0, 1, 0, 2, 0, 1, 0, 3,
2232 	0, 1, 0, 2, 0, 1, 0, 4,
2233 	0, 1, 0, 2, 0, 1, 0, 3,
2234 	0, 1, 0, 2, 0, 1, 0, 5,
2235 	0, 1, 0, 2, 0, 1, 0, 3,
2236 	0, 1, 0, 2, 0, 1, 0, 4,
2237 	0, 1, 0, 2, 0, 1, 0, 3,
2238 	0, 1, 0, 2, 0, 1, 0, 8
2239 };
2240 
2241 
2242 void
2243 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2244 {
2245 	/*
2246 	 * Now we also need to check the mapping array in a couple of ways.
2247 	 * 1) Did we move the cum-ack point?
2248 	 */
2249 	struct sctp_association *asoc;
2250 	int at;
2251 	int slide_from, slide_end, lgap, distance;
2252 
2253 	/* EY nr_mapping array variables */
2254 	/* int nr_at; */
2255 	/* int nr_last_all_ones = 0; */
2256 	/* int nr_slide_from, nr_slide_end, nr_lgap, nr_distance; */
2257 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2258 
2259 	asoc = &stcb->asoc;
2260 	at = 0;
2261 
2262 	old_cumack = asoc->cumulative_tsn;
2263 	old_base = asoc->mapping_array_base_tsn;
2264 	old_highest = asoc->highest_tsn_inside_map;
2265 	/*
2266 	 * We could probably improve this a small bit by calculating the
2267 	 * offset of the current cum-ack as the starting point.
2268 	 */
2269 	at = 0;
2270 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2271 		if (asoc->nr_mapping_array[slide_from] == 0xff) {
2272 			at += 8;
2273 		} else {
2274 			/* there is a 0 bit */
2275 			at += sctp_map_lookup_tab[asoc->nr_mapping_array[slide_from]];
2276 			break;
2277 		}
2278 	}
2279 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2280 
2281 	if (compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
2282 	    compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
2283 #ifdef INVARIANTS
2284 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2285 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2286 #else
2287 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2288 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2289 		sctp_print_mapping_array(asoc);
2290 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2291 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2292 		}
2293 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2294 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2295 #endif
2296 	}
2297 	if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
2298 	    asoc->highest_tsn_inside_map,
2299 	    MAX_TSN)) {
2300 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2301 	} else {
2302 		highest_tsn = asoc->highest_tsn_inside_map;
2303 	}
2304 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2305 		/* The complete array was completed by a single FR */
2306 		/* highest becomes the cum-ack */
2307 		int clr;
2308 
2309 #ifdef INVARIANTS
2310 		unsigned int i;
2311 
2312 #endif
2313 
2314 		/* clear the array */
2315 		clr = ((at + 7) >> 3);
2316 		if (clr > asoc->mapping_array_size) {
2317 			clr = asoc->mapping_array_size;
2318 		}
2319 		memset(asoc->mapping_array, 0, clr);
2320 		memset(asoc->nr_mapping_array, 0, clr);
2321 #ifdef INVARIANTS
2322 		for (i = 0; i < asoc->mapping_array_size; i++) {
2323 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2324 				printf("Error Mapping array's not clean at clear\n");
2325 				sctp_print_mapping_array(asoc);
2326 			}
2327 		}
2328 #endif
2329 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2330 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2331 	} else if (at >= 8) {
2332 		/* we can slide the mapping array down */
2333 		/* slide_from holds where we hit the first NON 0xff byte */
2334 
2335 		/*
2336 		 * now calculate the ceiling of the move using our highest
2337 		 * TSN value
2338 		 */
2339 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2340 		slide_end = (lgap >> 3);
2341 		if (slide_end < slide_from) {
2342 			sctp_print_mapping_array(asoc);
2343 #ifdef INVARIANTS
2344 			panic("impossible slide");
2345 #else
2346 			printf("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2347 			    lgap, slide_end, slide_from, at);
2348 			return;
2349 #endif
2350 		}
2351 		if (slide_end > asoc->mapping_array_size) {
2352 #ifdef INVARIANTS
2353 			panic("would overrun buffer");
2354 #else
2355 			printf("Gak, would have overrun map end:%d slide_end:%d\n",
2356 			    asoc->mapping_array_size, slide_end);
2357 			slide_end = asoc->mapping_array_size;
2358 #endif
2359 		}
2360 		distance = (slide_end - slide_from) + 1;
2361 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2362 			sctp_log_map(old_base, old_cumack, old_highest,
2363 			    SCTP_MAP_PREPARE_SLIDE);
2364 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2365 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2366 		}
2367 		if (distance + slide_from > asoc->mapping_array_size ||
2368 		    distance < 0) {
2369 			/*
2370 			 * Here we do NOT slide forward the array so that
2371 			 * hopefully when more data comes in to fill it up
2372 			 * we will be able to slide it forward. Really I
2373 			 * don't think this should happen :-0
2374 			 */
2375 
2376 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2377 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2378 				    (uint32_t) asoc->mapping_array_size,
2379 				    SCTP_MAP_SLIDE_NONE);
2380 			}
2381 		} else {
2382 			int ii;
2383 
2384 			for (ii = 0; ii < distance; ii++) {
2385 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2386 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2387 
2388 			}
2389 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2390 				asoc->mapping_array[ii] = 0;
2391 				asoc->nr_mapping_array[ii] = 0;
2392 			}
2393 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2394 				asoc->highest_tsn_inside_map += (slide_from << 3);
2395 			}
2396 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2397 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2398 			}
2399 			asoc->mapping_array_base_tsn += (slide_from << 3);
2400 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2401 				sctp_log_map(asoc->mapping_array_base_tsn,
2402 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2403 				    SCTP_MAP_SLIDE_RESULT);
2404 			}
2405 		}
2406 	}
2407 }
2408 
2409 
2410 void
2411 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap, int *abort_flag)
2412 {
2413 	struct sctp_association *asoc;
2414 	uint32_t highest_tsn;
2415 
2416 	asoc = &stcb->asoc;
2417 	if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
2418 	    asoc->highest_tsn_inside_map,
2419 	    MAX_TSN)) {
2420 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2421 	} else {
2422 		highest_tsn = asoc->highest_tsn_inside_map;
2423 	}
2424 
2425 	/*
2426 	 * Now we need to see if we need to queue a sack or just start the
2427 	 * timer (if allowed).
2428 	 */
2429 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2430 		/*
2431 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2432 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2433 		 * SACK
2434 		 */
2435 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2436 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2437 			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2438 		}
2439 		sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2440 		sctp_send_sack(stcb);
2441 	} else {
2442 		int is_a_gap;
2443 
2444 		/* is there a gap now ? */
2445 		is_a_gap = compare_with_wrap(highest_tsn, stcb->asoc.cumulative_tsn, MAX_TSN);
2446 
2447 		/*
2448 		 * CMT DAC algorithm: increase number of packets received
2449 		 * since last ack
2450 		 */
2451 		stcb->asoc.cmt_dac_pkts_rcvd++;
2452 
2453 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2454 							 * SACK */
2455 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2456 							 * longer is one */
2457 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2458 		    (is_a_gap) ||	/* is still a gap */
2459 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2460 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2461 		    ) {
2462 
2463 			if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off)) &&
2464 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2465 			    (stcb->asoc.send_sack == 0) &&
2466 			    (stcb->asoc.numduptsns == 0) &&
2467 			    (stcb->asoc.delayed_ack) &&
2468 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2469 
2470 				/*
2471 				 * CMT DAC algorithm: With CMT, delay acks
2472 				 * even in the face of
2473 				 *
2474 				 * reordering. Therefore, if acks that do not
2475 				 * have to be sent because of the above
2476 				 * reasons, will be delayed. That is, acks
2477 				 * that would have been sent due to gap
2478 				 * reports will be delayed with DAC. Start
2479 				 * the delayed ack timer.
2480 				 */
2481 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2482 				    stcb->sctp_ep, stcb, NULL);
2483 			} else {
2484 				/*
2485 				 * Ok we must build a SACK since the timer
2486 				 * is pending, we got our first packet OR
2487 				 * there are gaps or duplicates.
2488 				 */
2489 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2490 				sctp_send_sack(stcb);
2491 			}
2492 		} else {
2493 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2494 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2495 				    stcb->sctp_ep, stcb, NULL);
2496 			}
2497 		}
2498 	}
2499 }
2500 
2501 void
2502 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2503 {
2504 	struct sctp_tmit_chunk *chk;
2505 	uint32_t tsize, pd_point;
2506 	uint16_t nxt_todel;
2507 
2508 	if (asoc->fragmented_delivery_inprogress) {
2509 		sctp_service_reassembly(stcb, asoc);
2510 	}
2511 	/* Can we proceed further, i.e. the PD-API is complete */
2512 	if (asoc->fragmented_delivery_inprogress) {
2513 		/* no */
2514 		return;
2515 	}
2516 	/*
2517 	 * Now is there some other chunk I can deliver from the reassembly
2518 	 * queue.
2519 	 */
2520 doit_again:
2521 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2522 	if (chk == NULL) {
2523 		asoc->size_on_reasm_queue = 0;
2524 		asoc->cnt_on_reasm_queue = 0;
2525 		return;
2526 	}
2527 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2528 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2529 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2530 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2531 		/*
2532 		 * Yep the first one is here. We setup to start reception,
2533 		 * by backing down the TSN just in case we can't deliver.
2534 		 */
2535 
2536 		/*
2537 		 * Before we start though either all of the message should
2538 		 * be here or the socket buffer max or nothing on the
2539 		 * delivery queue and something can be delivered.
2540 		 */
2541 		if (stcb->sctp_socket) {
2542 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2543 			    stcb->sctp_ep->partial_delivery_point);
2544 		} else {
2545 			pd_point = stcb->sctp_ep->partial_delivery_point;
2546 		}
2547 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2548 			asoc->fragmented_delivery_inprogress = 1;
2549 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2550 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2551 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2552 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2553 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2554 			sctp_service_reassembly(stcb, asoc);
2555 			if (asoc->fragmented_delivery_inprogress == 0) {
2556 				goto doit_again;
2557 			}
2558 		}
2559 	}
2560 }
2561 
2562 int
2563 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2564     struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2565     struct sctp_nets *net, uint32_t * high_tsn)
2566 {
2567 	struct sctp_data_chunk *ch, chunk_buf;
2568 	struct sctp_association *asoc;
2569 	int num_chunks = 0;	/* number of control chunks processed */
2570 	int stop_proc = 0;
2571 	int chk_length, break_flag, last_chunk;
2572 	int abort_flag = 0, was_a_gap = 0;
2573 	struct mbuf *m;
2574 
2575 	/* set the rwnd */
2576 	sctp_set_rwnd(stcb, &stcb->asoc);
2577 
2578 	m = *mm;
2579 	SCTP_TCB_LOCK_ASSERT(stcb);
2580 	asoc = &stcb->asoc;
2581 	if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2582 	    stcb->asoc.cumulative_tsn, MAX_TSN)) {
2583 		/* there was a gap before this data was processed */
2584 		was_a_gap = 1;
2585 	}
2586 	/*
2587 	 * setup where we got the last DATA packet from for any SACK that
2588 	 * may need to go out. Don't bump the net. This is done ONLY when a
2589 	 * chunk is assigned.
2590 	 */
2591 	asoc->last_data_chunk_from = net;
2592 
2593 	/*-
2594 	 * Now before we proceed we must figure out if this is a wasted
2595 	 * cluster... i.e. it is a small packet sent in and yet the driver
2596 	 * underneath allocated a full cluster for it. If so we must copy it
2597 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2598 	 * with cluster starvation. Note for __Panda__ we don't do this
2599 	 * since it has clusters all the way down to 64 bytes.
2600 	 */
2601 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2602 		/* we only handle mbufs that are singletons.. not chains */
2603 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2604 		if (m) {
2605 			/* ok lets see if we can copy the data up */
2606 			caddr_t *from, *to;
2607 
2608 			/* get the pointers and copy */
2609 			to = mtod(m, caddr_t *);
2610 			from = mtod((*mm), caddr_t *);
2611 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2612 			/* copy the length and free up the old */
2613 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2614 			sctp_m_freem(*mm);
2615 			/* sucess, back copy */
2616 			*mm = m;
2617 		} else {
2618 			/* We are in trouble in the mbuf world .. yikes */
2619 			m = *mm;
2620 		}
2621 	}
2622 	/* get pointer to the first chunk header */
2623 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2624 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2625 	if (ch == NULL) {
2626 		return (1);
2627 	}
2628 	/*
2629 	 * process all DATA chunks...
2630 	 */
2631 	*high_tsn = asoc->cumulative_tsn;
2632 	break_flag = 0;
2633 	asoc->data_pkts_seen++;
2634 	while (stop_proc == 0) {
2635 		/* validate chunk length */
2636 		chk_length = ntohs(ch->ch.chunk_length);
2637 		if (length - *offset < chk_length) {
2638 			/* all done, mutulated chunk */
2639 			stop_proc = 1;
2640 			break;
2641 		}
2642 		if (ch->ch.chunk_type == SCTP_DATA) {
2643 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2644 				/*
2645 				 * Need to send an abort since we had a
2646 				 * invalid data chunk.
2647 				 */
2648 				struct mbuf *op_err;
2649 
2650 				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2651 				    0, M_DONTWAIT, 1, MT_DATA);
2652 
2653 				if (op_err) {
2654 					struct sctp_paramhdr *ph;
2655 					uint32_t *ippp;
2656 
2657 					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2658 					    (2 * sizeof(uint32_t));
2659 					ph = mtod(op_err, struct sctp_paramhdr *);
2660 					ph->param_type =
2661 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2662 					ph->param_length = htons(SCTP_BUF_LEN(op_err));
2663 					ippp = (uint32_t *) (ph + 1);
2664 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2665 					ippp++;
2666 					*ippp = asoc->cumulative_tsn;
2667 
2668 				}
2669 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2670 				sctp_abort_association(inp, stcb, m, iphlen, sh,
2671 				    op_err, 0, net->port);
2672 				return (2);
2673 			}
2674 #ifdef SCTP_AUDITING_ENABLED
2675 			sctp_audit_log(0xB1, 0);
2676 #endif
2677 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2678 				last_chunk = 1;
2679 			} else {
2680 				last_chunk = 0;
2681 			}
2682 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2683 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2684 			    last_chunk)) {
2685 				num_chunks++;
2686 			}
2687 			if (abort_flag)
2688 				return (2);
2689 
2690 			if (break_flag) {
2691 				/*
2692 				 * Set because of out of rwnd space and no
2693 				 * drop rep space left.
2694 				 */
2695 				stop_proc = 1;
2696 				break;
2697 			}
2698 		} else {
2699 			/* not a data chunk in the data region */
2700 			switch (ch->ch.chunk_type) {
2701 			case SCTP_INITIATION:
2702 			case SCTP_INITIATION_ACK:
2703 			case SCTP_SELECTIVE_ACK:
2704 			case SCTP_NR_SELECTIVE_ACK:	/* EY */
2705 			case SCTP_HEARTBEAT_REQUEST:
2706 			case SCTP_HEARTBEAT_ACK:
2707 			case SCTP_ABORT_ASSOCIATION:
2708 			case SCTP_SHUTDOWN:
2709 			case SCTP_SHUTDOWN_ACK:
2710 			case SCTP_OPERATION_ERROR:
2711 			case SCTP_COOKIE_ECHO:
2712 			case SCTP_COOKIE_ACK:
2713 			case SCTP_ECN_ECHO:
2714 			case SCTP_ECN_CWR:
2715 			case SCTP_SHUTDOWN_COMPLETE:
2716 			case SCTP_AUTHENTICATION:
2717 			case SCTP_ASCONF_ACK:
2718 			case SCTP_PACKET_DROPPED:
2719 			case SCTP_STREAM_RESET:
2720 			case SCTP_FORWARD_CUM_TSN:
2721 			case SCTP_ASCONF:
2722 				/*
2723 				 * Now, what do we do with KNOWN chunks that
2724 				 * are NOT in the right place?
2725 				 *
2726 				 * For now, I do nothing but ignore them. We
2727 				 * may later want to add sysctl stuff to
2728 				 * switch out and do either an ABORT() or
2729 				 * possibly process them.
2730 				 */
2731 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2732 					struct mbuf *op_err;
2733 
2734 					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2735 					sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
2736 					return (2);
2737 				}
2738 				break;
2739 			default:
2740 				/* unknown chunk type, use bit rules */
2741 				if (ch->ch.chunk_type & 0x40) {
2742 					/* Add a error report to the queue */
2743 					struct mbuf *merr;
2744 					struct sctp_paramhdr *phd;
2745 
2746 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2747 					if (merr) {
2748 						phd = mtod(merr, struct sctp_paramhdr *);
2749 						/*
2750 						 * We cheat and use param
2751 						 * type since we did not
2752 						 * bother to define a error
2753 						 * cause struct. They are
2754 						 * the same basic format
2755 						 * with different names.
2756 						 */
2757 						phd->param_type =
2758 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2759 						phd->param_length =
2760 						    htons(chk_length + sizeof(*phd));
2761 						SCTP_BUF_LEN(merr) = sizeof(*phd);
2762 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
2763 						    SCTP_SIZE32(chk_length),
2764 						    M_DONTWAIT);
2765 						if (SCTP_BUF_NEXT(merr)) {
2766 							sctp_queue_op_err(stcb, merr);
2767 						} else {
2768 							sctp_m_freem(merr);
2769 						}
2770 					}
2771 				}
2772 				if ((ch->ch.chunk_type & 0x80) == 0) {
2773 					/* discard the rest of this packet */
2774 					stop_proc = 1;
2775 				}	/* else skip this bad chunk and
2776 					 * continue... */
2777 				break;
2778 			};	/* switch of chunk type */
2779 		}
2780 		*offset += SCTP_SIZE32(chk_length);
2781 		if ((*offset >= length) || stop_proc) {
2782 			/* no more data left in the mbuf chain */
2783 			stop_proc = 1;
2784 			continue;
2785 		}
2786 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2787 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2788 		if (ch == NULL) {
2789 			*offset = length;
2790 			stop_proc = 1;
2791 			break;
2792 
2793 		}
2794 	}			/* while */
2795 	if (break_flag) {
2796 		/*
2797 		 * we need to report rwnd overrun drops.
2798 		 */
2799 		sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2800 	}
2801 	if (num_chunks) {
2802 		/*
2803 		 * Did we get data, if so update the time for auto-close and
2804 		 * give peer credit for being alive.
2805 		 */
2806 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2807 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2808 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2809 			    stcb->asoc.overall_error_count,
2810 			    0,
2811 			    SCTP_FROM_SCTP_INDATA,
2812 			    __LINE__);
2813 		}
2814 		stcb->asoc.overall_error_count = 0;
2815 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2816 	}
2817 	/* now service all of the reassm queue if needed */
2818 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2819 		sctp_service_queues(stcb, asoc);
2820 
2821 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2822 		/* Assure that we ack right away */
2823 		stcb->asoc.send_sack = 1;
2824 	}
2825 	/* Start a sack timer or QUEUE a SACK for sending */
2826 	sctp_sack_check(stcb, was_a_gap, &abort_flag);
2827 	if (abort_flag)
2828 		return (2);
2829 
2830 	return (0);
2831 }
2832 
2833 static int
2834 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2835     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2836     int *num_frs,
2837     uint32_t * biggest_newly_acked_tsn,
2838     uint32_t * this_sack_lowest_newack,
2839     int *ecn_seg_sums)
2840 {
2841 	struct sctp_tmit_chunk *tp1;
2842 	unsigned int theTSN;
2843 	int j, wake_him = 0, circled = 0;
2844 
2845 	/* Recover the tp1 we last saw */
2846 	tp1 = *p_tp1;
2847 	if (tp1 == NULL) {
2848 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2849 	}
2850 	for (j = frag_strt; j <= frag_end; j++) {
2851 		theTSN = j + last_tsn;
2852 		while (tp1) {
2853 			if (tp1->rec.data.doing_fast_retransmit)
2854 				(*num_frs) += 1;
2855 
2856 			/*-
2857 			 * CMT: CUCv2 algorithm. For each TSN being
2858 			 * processed from the sent queue, track the
2859 			 * next expected pseudo-cumack, or
2860 			 * rtx_pseudo_cumack, if required. Separate
2861 			 * cumack trackers for first transmissions,
2862 			 * and retransmissions.
2863 			 */
2864 			if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2865 			    (tp1->snd_count == 1)) {
2866 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2867 				tp1->whoTo->find_pseudo_cumack = 0;
2868 			}
2869 			if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2870 			    (tp1->snd_count > 1)) {
2871 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2872 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2873 			}
2874 			if (tp1->rec.data.TSN_seq == theTSN) {
2875 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2876 					/*-
2877 					 * must be held until
2878 					 * cum-ack passes
2879 					 */
2880 					/*-
2881 					 * ECN Nonce: Add the nonce
2882 					 * value to the sender's
2883 					 * nonce sum
2884 					 */
2885 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2886 						/*-
2887 						 * If it is less than RESEND, it is
2888 						 * now no-longer in flight.
2889 						 * Higher values may already be set
2890 						 * via previous Gap Ack Blocks...
2891 						 * i.e. ACKED or RESEND.
2892 						 */
2893 						if (compare_with_wrap(tp1->rec.data.TSN_seq,
2894 						    *biggest_newly_acked_tsn, MAX_TSN)) {
2895 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2896 						}
2897 						/*-
2898 						 * CMT: SFR algo (and HTNA) - set
2899 						 * saw_newack to 1 for dest being
2900 						 * newly acked. update
2901 						 * this_sack_highest_newack if
2902 						 * appropriate.
2903 						 */
2904 						if (tp1->rec.data.chunk_was_revoked == 0)
2905 							tp1->whoTo->saw_newack = 1;
2906 
2907 						if (compare_with_wrap(tp1->rec.data.TSN_seq,
2908 						    tp1->whoTo->this_sack_highest_newack,
2909 						    MAX_TSN)) {
2910 							tp1->whoTo->this_sack_highest_newack =
2911 							    tp1->rec.data.TSN_seq;
2912 						}
2913 						/*-
2914 						 * CMT DAC algo: also update
2915 						 * this_sack_lowest_newack
2916 						 */
2917 						if (*this_sack_lowest_newack == 0) {
2918 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2919 								sctp_log_sack(*this_sack_lowest_newack,
2920 								    last_tsn,
2921 								    tp1->rec.data.TSN_seq,
2922 								    0,
2923 								    0,
2924 								    SCTP_LOG_TSN_ACKED);
2925 							}
2926 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2927 						}
2928 						/*-
2929 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2930 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2931 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2932 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2933 						 * Separate pseudo_cumack trackers for first transmissions and
2934 						 * retransmissions.
2935 						 */
2936 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2937 							if (tp1->rec.data.chunk_was_revoked == 0) {
2938 								tp1->whoTo->new_pseudo_cumack = 1;
2939 							}
2940 							tp1->whoTo->find_pseudo_cumack = 1;
2941 						}
2942 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2943 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2944 						}
2945 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2946 							if (tp1->rec.data.chunk_was_revoked == 0) {
2947 								tp1->whoTo->new_pseudo_cumack = 1;
2948 							}
2949 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2950 						}
2951 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2952 							sctp_log_sack(*biggest_newly_acked_tsn,
2953 							    last_tsn,
2954 							    tp1->rec.data.TSN_seq,
2955 							    frag_strt,
2956 							    frag_end,
2957 							    SCTP_LOG_TSN_ACKED);
2958 						}
2959 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2960 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2961 							    tp1->whoTo->flight_size,
2962 							    tp1->book_size,
2963 							    (uintptr_t) tp1->whoTo,
2964 							    tp1->rec.data.TSN_seq);
2965 						}
2966 						sctp_flight_size_decrease(tp1);
2967 						sctp_total_flight_decrease(stcb, tp1);
2968 
2969 						tp1->whoTo->net_ack += tp1->send_size;
2970 						if (tp1->snd_count < 2) {
2971 							/*-
2972 							 * True non-retransmited chunk
2973 							 */
2974 							tp1->whoTo->net_ack2 += tp1->send_size;
2975 
2976 							/*-
2977 							 * update RTO too ?
2978 							 */
2979 							if (tp1->do_rtt) {
2980 								tp1->whoTo->RTO =
2981 								    sctp_calculate_rto(stcb,
2982 								    &stcb->asoc,
2983 								    tp1->whoTo,
2984 								    &tp1->sent_rcv_time,
2985 								    sctp_align_safe_nocopy);
2986 								tp1->do_rtt = 0;
2987 							}
2988 						}
2989 					}
2990 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2991 						(*ecn_seg_sums) += tp1->rec.data.ect_nonce;
2992 						(*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
2993 						if (compare_with_wrap(tp1->rec.data.TSN_seq,
2994 						    stcb->asoc.this_sack_highest_gap,
2995 						    MAX_TSN)) {
2996 							stcb->asoc.this_sack_highest_gap =
2997 							    tp1->rec.data.TSN_seq;
2998 						}
2999 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3000 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3001 #ifdef SCTP_AUDITING_ENABLED
3002 							sctp_audit_log(0xB2,
3003 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3004 #endif
3005 						}
3006 					}
3007 					/*-
3008 					 * All chunks NOT UNSENT fall through here and are marked
3009 					 * (leave PR-SCTP ones that are to skip alone though)
3010 					 */
3011 					if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
3012 						tp1->sent = SCTP_DATAGRAM_MARKED;
3013 
3014 					if (tp1->rec.data.chunk_was_revoked) {
3015 						/* deflate the cwnd */
3016 						tp1->whoTo->cwnd -= tp1->book_size;
3017 						tp1->rec.data.chunk_was_revoked = 0;
3018 					}
3019 					/* NR Sack code here */
3020 					if (nr_sacking) {
3021 						if (tp1->data) {
3022 							/*
3023 							 * sa_ignore
3024 							 * NO_NULL_CHK
3025 							 */
3026 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3027 							sctp_m_freem(tp1->data);
3028 							tp1->data = NULL;
3029 						}
3030 						wake_him++;
3031 					}
3032 				}
3033 				break;
3034 			}	/* if (tp1->TSN_seq == theTSN) */
3035 			if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
3036 			    MAX_TSN))
3037 				break;
3038 
3039 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3040 			if ((tp1 == NULL) && (circled == 0)) {
3041 				circled++;
3042 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3043 			}
3044 		}		/* end while (tp1) */
3045 		if (tp1 == NULL) {
3046 			circled = 0;
3047 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3048 		}
3049 		/* In case the fragments were not in order we must reset */
3050 	}			/* end for (j = fragStart */
3051 	*p_tp1 = tp1;
3052 	return (wake_him);	/* Return value only used for nr-sack */
3053 }
3054 
3055 
3056 static int
3057 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3058     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3059     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3060     int num_seg, int num_nr_seg, int *ecn_seg_sums)
3061 {
3062 	struct sctp_gap_ack_block *frag, block;
3063 	struct sctp_tmit_chunk *tp1;
3064 	int i;
3065 	int num_frs = 0;
3066 	int chunk_freed;
3067 	int non_revocable;
3068 	uint16_t frag_strt, frag_end;
3069 	uint32_t last_frag_high;
3070 
3071 	tp1 = NULL;
3072 	last_frag_high = 0;
3073 	chunk_freed = 0;
3074 
3075 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3076 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3077 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3078 		*offset += sizeof(block);
3079 		if (frag == NULL) {
3080 			return (chunk_freed);
3081 		}
3082 		frag_strt = ntohs(frag->start);
3083 		frag_end = ntohs(frag->end);
3084 		/* some sanity checks on the fragment offsets */
3085 		if (frag_strt > frag_end) {
3086 			/* this one is malformed, skip */
3087 			continue;
3088 		}
3089 		if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
3090 		    MAX_TSN))
3091 			*biggest_tsn_acked = frag_end + last_tsn;
3092 
3093 		/* mark acked dgs and find out the highestTSN being acked */
3094 		if (tp1 == NULL) {
3095 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3096 			/* save the locations of the last frags */
3097 			last_frag_high = frag_end + last_tsn;
3098 		} else {
3099 			/*
3100 			 * now lets see if we need to reset the queue due to
3101 			 * a out-of-order SACK fragment
3102 			 */
3103 			if (compare_with_wrap(frag_strt + last_tsn,
3104 			    last_frag_high, MAX_TSN)) {
3105 				/*
3106 				 * if the new frag starts after the last TSN
3107 				 * frag covered, we are ok and this one is
3108 				 * beyond the last one
3109 				 */
3110 				;
3111 			} else {
3112 				/*
3113 				 * ok, they have reset us, so we need to
3114 				 * reset the queue this will cause extra
3115 				 * hunting but hey, they chose the
3116 				 * performance hit when they failed to order
3117 				 * their gaps
3118 				 */
3119 				tp1 = TAILQ_FIRST(&asoc->sent_queue);
3120 			}
3121 			last_frag_high = frag_end + last_tsn;
3122 		}
3123 		if (i < num_seg) {
3124 			non_revocable = 0;
3125 		} else {
3126 			non_revocable = 1;
3127 		}
3128 		if (i == num_seg) {
3129 			tp1 = NULL;
3130 		}
3131 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3132 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3133 		    this_sack_lowest_newack, ecn_seg_sums)) {
3134 			chunk_freed = 1;
3135 		}
3136 	}
3137 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3138 		if (num_frs)
3139 			sctp_log_fr(*biggest_tsn_acked,
3140 			    *biggest_newly_acked_tsn,
3141 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3142 	}
3143 	return (chunk_freed);
3144 }
3145 
3146 static void
3147 sctp_check_for_revoked(struct sctp_tcb *stcb,
3148     struct sctp_association *asoc, uint32_t cumack,
3149     uint32_t biggest_tsn_acked)
3150 {
3151 	struct sctp_tmit_chunk *tp1;
3152 	int tot_revoked = 0;
3153 
3154 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3155 	while (tp1) {
3156 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3157 		    MAX_TSN)) {
3158 			/*
3159 			 * ok this guy is either ACK or MARKED. If it is
3160 			 * ACKED it has been previously acked but not this
3161 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3162 			 * again.
3163 			 */
3164 			if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3165 			    MAX_TSN))
3166 				break;
3167 
3168 
3169 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3170 				/* it has been revoked */
3171 				tp1->sent = SCTP_DATAGRAM_SENT;
3172 				tp1->rec.data.chunk_was_revoked = 1;
3173 				/*
3174 				 * We must add this stuff back in to assure
3175 				 * timers and such get started.
3176 				 */
3177 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3178 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3179 					    tp1->whoTo->flight_size,
3180 					    tp1->book_size,
3181 					    (uintptr_t) tp1->whoTo,
3182 					    tp1->rec.data.TSN_seq);
3183 				}
3184 				sctp_flight_size_increase(tp1);
3185 				sctp_total_flight_increase(stcb, tp1);
3186 				/*
3187 				 * We inflate the cwnd to compensate for our
3188 				 * artificial inflation of the flight_size.
3189 				 */
3190 				tp1->whoTo->cwnd += tp1->book_size;
3191 				tot_revoked++;
3192 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3193 					sctp_log_sack(asoc->last_acked_seq,
3194 					    cumack,
3195 					    tp1->rec.data.TSN_seq,
3196 					    0,
3197 					    0,
3198 					    SCTP_LOG_TSN_REVOKED);
3199 				}
3200 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3201 				/* it has been re-acked in this SACK */
3202 				tp1->sent = SCTP_DATAGRAM_ACKED;
3203 			}
3204 		}
3205 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3206 			break;
3207 		tp1 = TAILQ_NEXT(tp1, sctp_next);
3208 	}
3209 	if (tot_revoked > 0) {
3210 		/*
3211 		 * Setup the ecn nonce re-sync point. We do this since once
3212 		 * data is revoked we begin to retransmit things, which do
3213 		 * NOT have the ECN bits set. This means we are now out of
3214 		 * sync and must wait until we get back in sync with the
3215 		 * peer to check ECN bits.
3216 		 */
3217 		tp1 = TAILQ_FIRST(&asoc->send_queue);
3218 		if (tp1 == NULL) {
3219 			asoc->nonce_resync_tsn = asoc->sending_seq;
3220 		} else {
3221 			asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3222 		}
3223 		asoc->nonce_wait_for_ecne = 0;
3224 		asoc->nonce_sum_check = 0;
3225 	}
3226 }
3227 
3228 
3229 static void
3230 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3231     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3232 {
3233 	struct sctp_tmit_chunk *tp1;
3234 	int strike_flag = 0;
3235 	struct timeval now;
3236 	int tot_retrans = 0;
3237 	uint32_t sending_seq;
3238 	struct sctp_nets *net;
3239 	int num_dests_sacked = 0;
3240 
3241 	/*
3242 	 * select the sending_seq, this is either the next thing ready to be
3243 	 * sent but not transmitted, OR, the next seq we assign.
3244 	 */
3245 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3246 	if (tp1 == NULL) {
3247 		sending_seq = asoc->sending_seq;
3248 	} else {
3249 		sending_seq = tp1->rec.data.TSN_seq;
3250 	}
3251 
3252 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3253 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3254 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3255 			if (net->saw_newack)
3256 				num_dests_sacked++;
3257 		}
3258 	}
3259 	if (stcb->asoc.peer_supports_prsctp) {
3260 		(void)SCTP_GETTIME_TIMEVAL(&now);
3261 	}
3262 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3263 	while (tp1) {
3264 		strike_flag = 0;
3265 		if (tp1->no_fr_allowed) {
3266 			/* this one had a timeout or something */
3267 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3268 			continue;
3269 		}
3270 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3271 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3272 				sctp_log_fr(biggest_tsn_newly_acked,
3273 				    tp1->rec.data.TSN_seq,
3274 				    tp1->sent,
3275 				    SCTP_FR_LOG_CHECK_STRIKE);
3276 		}
3277 		if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3278 		    MAX_TSN) ||
3279 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3280 			/* done */
3281 			break;
3282 		}
3283 		if (stcb->asoc.peer_supports_prsctp) {
3284 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3285 				/* Is it expired? */
3286 				if (
3287 				/*
3288 				 * TODO sctp_constants.h needs alternative
3289 				 * time macros when _KERNEL is undefined.
3290 				 */
3291 				    (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3292 				    ) {
3293 					/* Yes so drop it */
3294 					if (tp1->data != NULL) {
3295 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3296 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3297 						    SCTP_SO_NOT_LOCKED);
3298 					}
3299 					tp1 = TAILQ_NEXT(tp1, sctp_next);
3300 					continue;
3301 				}
3302 			}
3303 		}
3304 		if (compare_with_wrap(tp1->rec.data.TSN_seq,
3305 		    asoc->this_sack_highest_gap, MAX_TSN)) {
3306 			/* we are beyond the tsn in the sack  */
3307 			break;
3308 		}
3309 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3310 			/* either a RESEND, ACKED, or MARKED */
3311 			/* skip */
3312 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3313 			continue;
3314 		}
3315 		/*
3316 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3317 		 */
3318 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3319 			/*
3320 			 * No new acks were receieved for data sent to this
3321 			 * dest. Therefore, according to the SFR algo for
3322 			 * CMT, no data sent to this dest can be marked for
3323 			 * FR using this SACK.
3324 			 */
3325 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3326 			continue;
3327 		} else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq,
3328 		    tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3329 			/*
3330 			 * CMT: New acks were receieved for data sent to
3331 			 * this dest. But no new acks were seen for data
3332 			 * sent after tp1. Therefore, according to the SFR
3333 			 * algo for CMT, tp1 cannot be marked for FR using
3334 			 * this SACK. This step covers part of the DAC algo
3335 			 * and the HTNA algo as well.
3336 			 */
3337 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3338 			continue;
3339 		}
3340 		/*
3341 		 * Here we check to see if we were have already done a FR
3342 		 * and if so we see if the biggest TSN we saw in the sack is
3343 		 * smaller than the recovery point. If so we don't strike
3344 		 * the tsn... otherwise we CAN strike the TSN.
3345 		 */
3346 		/*
3347 		 * @@@ JRI: Check for CMT if (accum_moved &&
3348 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3349 		 * 0)) {
3350 		 */
3351 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3352 			/*
3353 			 * Strike the TSN if in fast-recovery and cum-ack
3354 			 * moved.
3355 			 */
3356 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3357 				sctp_log_fr(biggest_tsn_newly_acked,
3358 				    tp1->rec.data.TSN_seq,
3359 				    tp1->sent,
3360 				    SCTP_FR_LOG_STRIKE_CHUNK);
3361 			}
3362 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3363 				tp1->sent++;
3364 			}
3365 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3366 				/*
3367 				 * CMT DAC algorithm: If SACK flag is set to
3368 				 * 0, then lowest_newack test will not pass
3369 				 * because it would have been set to the
3370 				 * cumack earlier. If not already to be
3371 				 * rtx'd, If not a mixed sack and if tp1 is
3372 				 * not between two sacked TSNs, then mark by
3373 				 * one more. NOTE that we are marking by one
3374 				 * additional time since the SACK DAC flag
3375 				 * indicates that two packets have been
3376 				 * received after this missing TSN.
3377 				 */
3378 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3379 				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3380 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3381 						sctp_log_fr(16 + num_dests_sacked,
3382 						    tp1->rec.data.TSN_seq,
3383 						    tp1->sent,
3384 						    SCTP_FR_LOG_STRIKE_CHUNK);
3385 					}
3386 					tp1->sent++;
3387 				}
3388 			}
3389 		} else if ((tp1->rec.data.doing_fast_retransmit) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
3390 			/*
3391 			 * For those that have done a FR we must take
3392 			 * special consideration if we strike. I.e the
3393 			 * biggest_newly_acked must be higher than the
3394 			 * sending_seq at the time we did the FR.
3395 			 */
3396 			if (
3397 #ifdef SCTP_FR_TO_ALTERNATE
3398 			/*
3399 			 * If FR's go to new networks, then we must only do
3400 			 * this for singly homed asoc's. However if the FR's
3401 			 * go to the same network (Armando's work) then its
3402 			 * ok to FR multiple times.
3403 			 */
3404 			    (asoc->numnets < 2)
3405 #else
3406 			    (1)
3407 #endif
3408 			    ) {
3409 
3410 				if ((compare_with_wrap(biggest_tsn_newly_acked,
3411 				    tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3412 				    (biggest_tsn_newly_acked ==
3413 				    tp1->rec.data.fast_retran_tsn)) {
3414 					/*
3415 					 * Strike the TSN, since this ack is
3416 					 * beyond where things were when we
3417 					 * did a FR.
3418 					 */
3419 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3420 						sctp_log_fr(biggest_tsn_newly_acked,
3421 						    tp1->rec.data.TSN_seq,
3422 						    tp1->sent,
3423 						    SCTP_FR_LOG_STRIKE_CHUNK);
3424 					}
3425 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3426 						tp1->sent++;
3427 					}
3428 					strike_flag = 1;
3429 					if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3430 						/*
3431 						 * CMT DAC algorithm: If
3432 						 * SACK flag is set to 0,
3433 						 * then lowest_newack test
3434 						 * will not pass because it
3435 						 * would have been set to
3436 						 * the cumack earlier. If
3437 						 * not already to be rtx'd,
3438 						 * If not a mixed sack and
3439 						 * if tp1 is not between two
3440 						 * sacked TSNs, then mark by
3441 						 * one more. NOTE that we
3442 						 * are marking by one
3443 						 * additional time since the
3444 						 * SACK DAC flag indicates
3445 						 * that two packets have
3446 						 * been received after this
3447 						 * missing TSN.
3448 						 */
3449 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3450 						    (num_dests_sacked == 1) &&
3451 						    compare_with_wrap(this_sack_lowest_newack,
3452 						    tp1->rec.data.TSN_seq, MAX_TSN)) {
3453 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3454 								sctp_log_fr(32 + num_dests_sacked,
3455 								    tp1->rec.data.TSN_seq,
3456 								    tp1->sent,
3457 								    SCTP_FR_LOG_STRIKE_CHUNK);
3458 							}
3459 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3460 								tp1->sent++;
3461 							}
3462 						}
3463 					}
3464 				}
3465 			}
3466 			/*
3467 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3468 			 * algo covers HTNA.
3469 			 */
3470 		} else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3471 		    biggest_tsn_newly_acked, MAX_TSN)) {
3472 			/*
3473 			 * We don't strike these: This is the  HTNA
3474 			 * algorithm i.e. we don't strike If our TSN is
3475 			 * larger than the Highest TSN Newly Acked.
3476 			 */
3477 			;
3478 		} else {
3479 			/* Strike the TSN */
3480 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3481 				sctp_log_fr(biggest_tsn_newly_acked,
3482 				    tp1->rec.data.TSN_seq,
3483 				    tp1->sent,
3484 				    SCTP_FR_LOG_STRIKE_CHUNK);
3485 			}
3486 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3487 				tp1->sent++;
3488 			}
3489 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3490 				/*
3491 				 * CMT DAC algorithm: If SACK flag is set to
3492 				 * 0, then lowest_newack test will not pass
3493 				 * because it would have been set to the
3494 				 * cumack earlier. If not already to be
3495 				 * rtx'd, If not a mixed sack and if tp1 is
3496 				 * not between two sacked TSNs, then mark by
3497 				 * one more. NOTE that we are marking by one
3498 				 * additional time since the SACK DAC flag
3499 				 * indicates that two packets have been
3500 				 * received after this missing TSN.
3501 				 */
3502 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3503 				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3504 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3505 						sctp_log_fr(48 + num_dests_sacked,
3506 						    tp1->rec.data.TSN_seq,
3507 						    tp1->sent,
3508 						    SCTP_FR_LOG_STRIKE_CHUNK);
3509 					}
3510 					tp1->sent++;
3511 				}
3512 			}
3513 		}
3514 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3515 			struct sctp_nets *alt;
3516 
3517 			/* fix counts and things */
3518 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3519 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3520 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3521 				    tp1->book_size,
3522 				    (uintptr_t) tp1->whoTo,
3523 				    tp1->rec.data.TSN_seq);
3524 			}
3525 			if (tp1->whoTo) {
3526 				tp1->whoTo->net_ack++;
3527 				sctp_flight_size_decrease(tp1);
3528 			}
3529 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3530 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3531 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3532 			}
3533 			/* add back to the rwnd */
3534 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3535 
3536 			/* remove from the total flight */
3537 			sctp_total_flight_decrease(stcb, tp1);
3538 
3539 			if ((stcb->asoc.peer_supports_prsctp) &&
3540 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3541 				/*
3542 				 * Has it been retransmitted tv_sec times? -
3543 				 * we store the retran count there.
3544 				 */
3545 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3546 					/* Yes, so drop it */
3547 					if (tp1->data != NULL) {
3548 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3549 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3550 						    SCTP_SO_NOT_LOCKED);
3551 					}
3552 					/* Make sure to flag we had a FR */
3553 					tp1->whoTo->net_ack++;
3554 					tp1 = TAILQ_NEXT(tp1, sctp_next);
3555 					continue;
3556 				}
3557 			}
3558 			/* printf("OK, we are now ready to FR this guy\n"); */
3559 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3560 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3561 				    0, SCTP_FR_MARKED);
3562 			}
3563 			if (strike_flag) {
3564 				/* This is a subsequent FR */
3565 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3566 			}
3567 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3568 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
3569 				/*
3570 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3571 				 * If CMT is being used, then pick dest with
3572 				 * largest ssthresh for any retransmission.
3573 				 */
3574 				tp1->no_fr_allowed = 1;
3575 				alt = tp1->whoTo;
3576 				/* sa_ignore NO_NULL_CHK */
3577 				if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
3578 					/*
3579 					 * JRS 5/18/07 - If CMT PF is on,
3580 					 * use the PF version of
3581 					 * find_alt_net()
3582 					 */
3583 					alt = sctp_find_alternate_net(stcb, alt, 2);
3584 				} else {
3585 					/*
3586 					 * JRS 5/18/07 - If only CMT is on,
3587 					 * use the CMT version of
3588 					 * find_alt_net()
3589 					 */
3590 					/* sa_ignore NO_NULL_CHK */
3591 					alt = sctp_find_alternate_net(stcb, alt, 1);
3592 				}
3593 				if (alt == NULL) {
3594 					alt = tp1->whoTo;
3595 				}
3596 				/*
3597 				 * CUCv2: If a different dest is picked for
3598 				 * the retransmission, then new
3599 				 * (rtx-)pseudo_cumack needs to be tracked
3600 				 * for orig dest. Let CUCv2 track new (rtx-)
3601 				 * pseudo-cumack always.
3602 				 */
3603 				if (tp1->whoTo) {
3604 					tp1->whoTo->find_pseudo_cumack = 1;
3605 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3606 				}
3607 			} else {/* CMT is OFF */
3608 
3609 #ifdef SCTP_FR_TO_ALTERNATE
3610 				/* Can we find an alternate? */
3611 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3612 #else
3613 				/*
3614 				 * default behavior is to NOT retransmit
3615 				 * FR's to an alternate. Armando Caro's
3616 				 * paper details why.
3617 				 */
3618 				alt = tp1->whoTo;
3619 #endif
3620 			}
3621 
3622 			tp1->rec.data.doing_fast_retransmit = 1;
3623 			tot_retrans++;
3624 			/* mark the sending seq for possible subsequent FR's */
3625 			/*
3626 			 * printf("Marking TSN for FR new value %x\n",
3627 			 * (uint32_t)tpi->rec.data.TSN_seq);
3628 			 */
3629 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3630 				/*
3631 				 * If the queue of send is empty then its
3632 				 * the next sequence number that will be
3633 				 * assigned so we subtract one from this to
3634 				 * get the one we last sent.
3635 				 */
3636 				tp1->rec.data.fast_retran_tsn = sending_seq;
3637 			} else {
3638 				/*
3639 				 * If there are chunks on the send queue
3640 				 * (unsent data that has made it from the
3641 				 * stream queues but not out the door, we
3642 				 * take the first one (which will have the
3643 				 * lowest TSN) and subtract one to get the
3644 				 * one we last sent.
3645 				 */
3646 				struct sctp_tmit_chunk *ttt;
3647 
3648 				ttt = TAILQ_FIRST(&asoc->send_queue);
3649 				tp1->rec.data.fast_retran_tsn =
3650 				    ttt->rec.data.TSN_seq;
3651 			}
3652 
3653 			if (tp1->do_rtt) {
3654 				/*
3655 				 * this guy had a RTO calculation pending on
3656 				 * it, cancel it
3657 				 */
3658 				tp1->do_rtt = 0;
3659 			}
3660 			if (alt != tp1->whoTo) {
3661 				/* yes, there is an alternate. */
3662 				sctp_free_remote_addr(tp1->whoTo);
3663 				/* sa_ignore FREED_MEMORY */
3664 				tp1->whoTo = alt;
3665 				atomic_add_int(&alt->ref_count, 1);
3666 			}
3667 		}
3668 		tp1 = TAILQ_NEXT(tp1, sctp_next);
3669 	}			/* while (tp1) */
3670 
3671 	if (tot_retrans > 0) {
3672 		/*
3673 		 * Setup the ecn nonce re-sync point. We do this since once
3674 		 * we go to FR something we introduce a Karn's rule scenario
3675 		 * and won't know the totals for the ECN bits.
3676 		 */
3677 		asoc->nonce_resync_tsn = sending_seq;
3678 		asoc->nonce_wait_for_ecne = 0;
3679 		asoc->nonce_sum_check = 0;
3680 	}
3681 }
3682 
3683 struct sctp_tmit_chunk *
3684 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3685     struct sctp_association *asoc)
3686 {
3687 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3688 	struct timeval now;
3689 	int now_filled = 0;
3690 
3691 	if (asoc->peer_supports_prsctp == 0) {
3692 		return (NULL);
3693 	}
3694 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3695 	while (tp1) {
3696 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3697 		    tp1->sent != SCTP_DATAGRAM_RESEND) {
3698 			/* no chance to advance, out of here */
3699 			break;
3700 		}
3701 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3702 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3703 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3704 				    asoc->advanced_peer_ack_point,
3705 				    tp1->rec.data.TSN_seq, 0, 0);
3706 			}
3707 		}
3708 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3709 			/*
3710 			 * We can't fwd-tsn past any that are reliable aka
3711 			 * retransmitted until the asoc fails.
3712 			 */
3713 			break;
3714 		}
3715 		if (!now_filled) {
3716 			(void)SCTP_GETTIME_TIMEVAL(&now);
3717 			now_filled = 1;
3718 		}
3719 		tp2 = TAILQ_NEXT(tp1, sctp_next);
3720 		/*
3721 		 * now we got a chunk which is marked for another
3722 		 * retransmission to a PR-stream but has run out its chances
3723 		 * already maybe OR has been marked to skip now. Can we skip
3724 		 * it if its a resend?
3725 		 */
3726 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3727 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3728 			/*
3729 			 * Now is this one marked for resend and its time is
3730 			 * now up?
3731 			 */
3732 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3733 				/* Yes so drop it */
3734 				if (tp1->data) {
3735 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3736 					    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3737 					    SCTP_SO_NOT_LOCKED);
3738 				}
3739 			} else {
3740 				/*
3741 				 * No, we are done when hit one for resend
3742 				 * whos time as not expired.
3743 				 */
3744 				break;
3745 			}
3746 		}
3747 		/*
3748 		 * Ok now if this chunk is marked to drop it we can clean up
3749 		 * the chunk, advance our peer ack point and we can check
3750 		 * the next chunk.
3751 		 */
3752 		if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3753 			/* advance PeerAckPoint goes forward */
3754 			if (compare_with_wrap(tp1->rec.data.TSN_seq,
3755 			    asoc->advanced_peer_ack_point,
3756 			    MAX_TSN)) {
3757 
3758 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3759 				a_adv = tp1;
3760 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3761 				/* No update but we do save the chk */
3762 				a_adv = tp1;
3763 			}
3764 		} else {
3765 			/*
3766 			 * If it is still in RESEND we can advance no
3767 			 * further
3768 			 */
3769 			break;
3770 		}
3771 		/*
3772 		 * If we hit here we just dumped tp1, move to next tsn on
3773 		 * sent queue.
3774 		 */
3775 		tp1 = tp2;
3776 	}
3777 	return (a_adv);
3778 }
3779 
3780 static int
3781 sctp_fs_audit(struct sctp_association *asoc)
3782 {
3783 	struct sctp_tmit_chunk *chk;
3784 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3785 	int entry_flight, entry_cnt, ret;
3786 
3787 	entry_flight = asoc->total_flight;
3788 	entry_cnt = asoc->total_flight_count;
3789 	ret = 0;
3790 
3791 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3792 		return (0);
3793 
3794 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3795 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3796 			printf("Chk TSN:%u size:%d inflight cnt:%d\n",
3797 			    chk->rec.data.TSN_seq,
3798 			    chk->send_size,
3799 			    chk->snd_count
3800 			    );
3801 			inflight++;
3802 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3803 			resend++;
3804 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3805 			inbetween++;
3806 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3807 			above++;
3808 		} else {
3809 			acked++;
3810 		}
3811 	}
3812 
3813 	if ((inflight > 0) || (inbetween > 0)) {
3814 #ifdef INVARIANTS
3815 		panic("Flight size-express incorrect? \n");
3816 #else
3817 		printf("asoc->total_flight:%d cnt:%d\n",
3818 		    entry_flight, entry_cnt);
3819 
3820 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3821 		    inflight, inbetween, resend, above, acked);
3822 		ret = 1;
3823 #endif
3824 	}
3825 	return (ret);
3826 }
3827 
3828 
3829 static void
3830 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3831     struct sctp_association *asoc,
3832     struct sctp_nets *net,
3833     struct sctp_tmit_chunk *tp1)
3834 {
3835 	tp1->window_probe = 0;
3836 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3837 		/* TSN's skipped we do NOT move back. */
3838 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3839 		    tp1->whoTo->flight_size,
3840 		    tp1->book_size,
3841 		    (uintptr_t) tp1->whoTo,
3842 		    tp1->rec.data.TSN_seq);
3843 		return;
3844 	}
3845 	/* First setup this by shrinking flight */
3846 	sctp_flight_size_decrease(tp1);
3847 	sctp_total_flight_decrease(stcb, tp1);
3848 	/* Now mark for resend */
3849 	tp1->sent = SCTP_DATAGRAM_RESEND;
3850 	asoc->sent_queue_retran_cnt++;
3851 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3852 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3853 		    tp1->whoTo->flight_size,
3854 		    tp1->book_size,
3855 		    (uintptr_t) tp1->whoTo,
3856 		    tp1->rec.data.TSN_seq);
3857 	}
3858 }
3859 
3860 void
3861 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3862     uint32_t rwnd, int nonce_sum_flag, int *abort_now)
3863 {
3864 	struct sctp_nets *net;
3865 	struct sctp_association *asoc;
3866 	struct sctp_tmit_chunk *tp1, *tp2;
3867 	uint32_t old_rwnd;
3868 	int win_probe_recovery = 0;
3869 	int win_probe_recovered = 0;
3870 	int j, done_once = 0;
3871 
3872 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3873 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3874 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3875 	}
3876 	SCTP_TCB_LOCK_ASSERT(stcb);
3877 #ifdef SCTP_ASOCLOG_OF_TSNS
3878 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3879 	stcb->asoc.cumack_log_at++;
3880 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3881 		stcb->asoc.cumack_log_at = 0;
3882 	}
3883 #endif
3884 	asoc = &stcb->asoc;
3885 	old_rwnd = asoc->peers_rwnd;
3886 	if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
3887 		/* old ack */
3888 		return;
3889 	} else if (asoc->last_acked_seq == cumack) {
3890 		/* Window update sack */
3891 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3892 		    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3893 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3894 			/* SWS sender side engages */
3895 			asoc->peers_rwnd = 0;
3896 		}
3897 		if (asoc->peers_rwnd > old_rwnd) {
3898 			goto again;
3899 		}
3900 		return;
3901 	}
3902 	/* First setup for CC stuff */
3903 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3904 		net->prev_cwnd = net->cwnd;
3905 		net->net_ack = 0;
3906 		net->net_ack2 = 0;
3907 
3908 		/*
3909 		 * CMT: Reset CUC and Fast recovery algo variables before
3910 		 * SACK processing
3911 		 */
3912 		net->new_pseudo_cumack = 0;
3913 		net->will_exit_fast_recovery = 0;
3914 	}
3915 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3916 		uint32_t send_s;
3917 
3918 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3919 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3920 			    sctpchunk_listhead);
3921 			send_s = tp1->rec.data.TSN_seq + 1;
3922 		} else {
3923 			send_s = asoc->sending_seq;
3924 		}
3925 		if ((cumack == send_s) ||
3926 		    compare_with_wrap(cumack, send_s, MAX_TSN)) {
3927 #ifndef INVARIANTS
3928 			struct mbuf *oper;
3929 
3930 #endif
3931 #ifdef INVARIANTS
3932 			panic("Impossible sack 1");
3933 #else
3934 
3935 			*abort_now = 1;
3936 			/* XXX */
3937 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3938 			    0, M_DONTWAIT, 1, MT_DATA);
3939 			if (oper) {
3940 				struct sctp_paramhdr *ph;
3941 				uint32_t *ippp;
3942 
3943 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3944 				    sizeof(uint32_t);
3945 				ph = mtod(oper, struct sctp_paramhdr *);
3946 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3947 				ph->param_length = htons(SCTP_BUF_LEN(oper));
3948 				ippp = (uint32_t *) (ph + 1);
3949 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3950 			}
3951 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3952 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
3953 			return;
3954 #endif
3955 		}
3956 	}
3957 	asoc->this_sack_highest_gap = cumack;
3958 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3959 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3960 		    stcb->asoc.overall_error_count,
3961 		    0,
3962 		    SCTP_FROM_SCTP_INDATA,
3963 		    __LINE__);
3964 	}
3965 	stcb->asoc.overall_error_count = 0;
3966 	if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
3967 		/* process the new consecutive TSN first */
3968 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
3969 		while (tp1) {
3970 			tp2 = TAILQ_NEXT(tp1, sctp_next);
3971 			if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
3972 			    MAX_TSN) ||
3973 			    cumack == tp1->rec.data.TSN_seq) {
3974 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3975 					printf("Warning, an unsent is now acked?\n");
3976 				}
3977 				/*
3978 				 * ECN Nonce: Add the nonce to the sender's
3979 				 * nonce sum
3980 				 */
3981 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
3982 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3983 					/*
3984 					 * If it is less than ACKED, it is
3985 					 * now no-longer in flight. Higher
3986 					 * values may occur during marking
3987 					 */
3988 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3989 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3990 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3991 							    tp1->whoTo->flight_size,
3992 							    tp1->book_size,
3993 							    (uintptr_t) tp1->whoTo,
3994 							    tp1->rec.data.TSN_seq);
3995 						}
3996 						sctp_flight_size_decrease(tp1);
3997 						/* sa_ignore NO_NULL_CHK */
3998 						sctp_total_flight_decrease(stcb, tp1);
3999 					}
4000 					tp1->whoTo->net_ack += tp1->send_size;
4001 					if (tp1->snd_count < 2) {
4002 						/*
4003 						 * True non-retransmited
4004 						 * chunk
4005 						 */
4006 						tp1->whoTo->net_ack2 +=
4007 						    tp1->send_size;
4008 
4009 						/* update RTO too? */
4010 						if (tp1->do_rtt) {
4011 							tp1->whoTo->RTO =
4012 							/*
4013 							 * sa_ignore
4014 							 * NO_NULL_CHK
4015 							 */
4016 							    sctp_calculate_rto(stcb,
4017 							    asoc, tp1->whoTo,
4018 							    &tp1->sent_rcv_time,
4019 							    sctp_align_safe_nocopy);
4020 							tp1->do_rtt = 0;
4021 						}
4022 					}
4023 					/*
4024 					 * CMT: CUCv2 algorithm. From the
4025 					 * cumack'd TSNs, for each TSN being
4026 					 * acked for the first time, set the
4027 					 * following variables for the
4028 					 * corresp destination.
4029 					 * new_pseudo_cumack will trigger a
4030 					 * cwnd update.
4031 					 * find_(rtx_)pseudo_cumack will
4032 					 * trigger search for the next
4033 					 * expected (rtx-)pseudo-cumack.
4034 					 */
4035 					tp1->whoTo->new_pseudo_cumack = 1;
4036 					tp1->whoTo->find_pseudo_cumack = 1;
4037 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4038 
4039 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4040 						/* sa_ignore NO_NULL_CHK */
4041 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4042 					}
4043 				}
4044 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4045 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4046 				}
4047 				if (tp1->rec.data.chunk_was_revoked) {
4048 					/* deflate the cwnd */
4049 					tp1->whoTo->cwnd -= tp1->book_size;
4050 					tp1->rec.data.chunk_was_revoked = 0;
4051 				}
4052 				tp1->sent = SCTP_DATAGRAM_ACKED;
4053 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4054 				if (tp1->data) {
4055 					/* sa_ignore NO_NULL_CHK */
4056 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4057 					sctp_m_freem(tp1->data);
4058 				}
4059 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4060 					sctp_log_sack(asoc->last_acked_seq,
4061 					    cumack,
4062 					    tp1->rec.data.TSN_seq,
4063 					    0,
4064 					    0,
4065 					    SCTP_LOG_FREE_SENT);
4066 				}
4067 				tp1->data = NULL;
4068 				asoc->sent_queue_cnt--;
4069 				sctp_free_a_chunk(stcb, tp1);
4070 				tp1 = tp2;
4071 			} else {
4072 				break;
4073 			}
4074 		}
4075 
4076 	}
4077 	/* sa_ignore NO_NULL_CHK */
4078 	if (stcb->sctp_socket) {
4079 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4080 		struct socket *so;
4081 
4082 #endif
4083 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4084 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4085 			/* sa_ignore NO_NULL_CHK */
4086 			sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4087 		}
4088 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4089 		so = SCTP_INP_SO(stcb->sctp_ep);
4090 		atomic_add_int(&stcb->asoc.refcnt, 1);
4091 		SCTP_TCB_UNLOCK(stcb);
4092 		SCTP_SOCKET_LOCK(so, 1);
4093 		SCTP_TCB_LOCK(stcb);
4094 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4095 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4096 			/* assoc was freed while we were unlocked */
4097 			SCTP_SOCKET_UNLOCK(so, 1);
4098 			return;
4099 		}
4100 #endif
4101 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4102 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4103 		SCTP_SOCKET_UNLOCK(so, 1);
4104 #endif
4105 	} else {
4106 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4107 			sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4108 		}
4109 	}
4110 
4111 	/* JRS - Use the congestion control given in the CC module */
4112 	if (asoc->last_acked_seq != cumack)
4113 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4114 
4115 	asoc->last_acked_seq = cumack;
4116 
4117 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4118 		/* nothing left in-flight */
4119 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4120 			net->flight_size = 0;
4121 			net->partial_bytes_acked = 0;
4122 		}
4123 		asoc->total_flight = 0;
4124 		asoc->total_flight_count = 0;
4125 	}
4126 	/* ECN Nonce updates */
4127 	if (asoc->ecn_nonce_allowed) {
4128 		if (asoc->nonce_sum_check) {
4129 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4130 				if (asoc->nonce_wait_for_ecne == 0) {
4131 					struct sctp_tmit_chunk *lchk;
4132 
4133 					lchk = TAILQ_FIRST(&asoc->send_queue);
4134 					asoc->nonce_wait_for_ecne = 1;
4135 					if (lchk) {
4136 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4137 					} else {
4138 						asoc->nonce_wait_tsn = asoc->sending_seq;
4139 					}
4140 				} else {
4141 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4142 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4143 						/*
4144 						 * Misbehaving peer. We need
4145 						 * to react to this guy
4146 						 */
4147 						asoc->ecn_allowed = 0;
4148 						asoc->ecn_nonce_allowed = 0;
4149 					}
4150 				}
4151 			}
4152 		} else {
4153 			/* See if Resynchronization Possible */
4154 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4155 				asoc->nonce_sum_check = 1;
4156 				/*
4157 				 * Now we must calculate what the base is.
4158 				 * We do this based on two things, we know
4159 				 * the total's for all the segments
4160 				 * gap-acked in the SACK (none). We also
4161 				 * know the SACK's nonce sum, its in
4162 				 * nonce_sum_flag. So we can build a truth
4163 				 * table to back-calculate the new value of
4164 				 * asoc->nonce_sum_expect_base:
4165 				 *
4166 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
4167 				 * 1                    0 1 0 1 1 1
4168 				 * 1 0
4169 				 */
4170 				asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4171 			}
4172 		}
4173 	}
4174 	/* RWND update */
4175 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4176 	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4177 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4178 		/* SWS sender side engages */
4179 		asoc->peers_rwnd = 0;
4180 	}
4181 	if (asoc->peers_rwnd > old_rwnd) {
4182 		win_probe_recovery = 1;
4183 	}
4184 	/* Now assure a timer where data is queued at */
4185 again:
4186 	j = 0;
4187 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4188 		int to_ticks;
4189 
4190 		if (win_probe_recovery && (net->window_probe)) {
4191 			win_probe_recovered = 1;
4192 			/*
4193 			 * Find first chunk that was used with window probe
4194 			 * and clear the sent
4195 			 */
4196 			/* sa_ignore FREED_MEMORY */
4197 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4198 				if (tp1->window_probe) {
4199 					/* move back to data send queue */
4200 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
4201 					break;
4202 				}
4203 			}
4204 		}
4205 		if (net->RTO == 0) {
4206 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4207 		} else {
4208 			to_ticks = MSEC_TO_TICKS(net->RTO);
4209 		}
4210 		if (net->flight_size) {
4211 			j++;
4212 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4213 			    sctp_timeout_handler, &net->rxt_timer);
4214 			if (net->window_probe) {
4215 				net->window_probe = 0;
4216 			}
4217 		} else {
4218 			if (net->window_probe) {
4219 				/*
4220 				 * In window probes we must assure a timer
4221 				 * is still running there
4222 				 */
4223 				net->window_probe = 0;
4224 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4225 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4226 					    sctp_timeout_handler, &net->rxt_timer);
4227 				}
4228 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4229 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4230 				    stcb, net,
4231 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4232 			}
4233 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4234 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4235 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4236 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4237 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4238 				}
4239 			}
4240 		}
4241 	}
4242 	if ((j == 0) &&
4243 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4244 	    (asoc->sent_queue_retran_cnt == 0) &&
4245 	    (win_probe_recovered == 0) &&
4246 	    (done_once == 0)) {
4247 		/*
4248 		 * huh, this should not happen unless all packets are
4249 		 * PR-SCTP and marked to skip of course.
4250 		 */
4251 		if (sctp_fs_audit(asoc)) {
4252 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4253 				net->flight_size = 0;
4254 			}
4255 			asoc->total_flight = 0;
4256 			asoc->total_flight_count = 0;
4257 			asoc->sent_queue_retran_cnt = 0;
4258 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4259 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4260 					sctp_flight_size_increase(tp1);
4261 					sctp_total_flight_increase(stcb, tp1);
4262 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4263 					asoc->sent_queue_retran_cnt++;
4264 				}
4265 			}
4266 		}
4267 		done_once = 1;
4268 		goto again;
4269 	}
4270 	/**********************************/
4271 	/* Now what about shutdown issues */
4272 	/**********************************/
4273 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4274 		/* nothing left on sendqueue.. consider done */
4275 		/* clean up */
4276 		if ((asoc->stream_queue_cnt == 1) &&
4277 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4278 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4279 		    (asoc->locked_on_sending)
4280 		    ) {
4281 			struct sctp_stream_queue_pending *sp;
4282 
4283 			/*
4284 			 * I may be in a state where we got all across.. but
4285 			 * cannot write more due to a shutdown... we abort
4286 			 * since the user did not indicate EOR in this case.
4287 			 * The sp will be cleaned during free of the asoc.
4288 			 */
4289 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4290 			    sctp_streamhead);
4291 			if ((sp) && (sp->length == 0)) {
4292 				/* Let cleanup code purge it */
4293 				if (sp->msg_is_complete) {
4294 					asoc->stream_queue_cnt--;
4295 				} else {
4296 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4297 					asoc->locked_on_sending = NULL;
4298 					asoc->stream_queue_cnt--;
4299 				}
4300 			}
4301 		}
4302 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4303 		    (asoc->stream_queue_cnt == 0)) {
4304 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4305 				/* Need to abort here */
4306 				struct mbuf *oper;
4307 
4308 		abort_out_now:
4309 				*abort_now = 1;
4310 				/* XXX */
4311 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4312 				    0, M_DONTWAIT, 1, MT_DATA);
4313 				if (oper) {
4314 					struct sctp_paramhdr *ph;
4315 					uint32_t *ippp;
4316 
4317 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4318 					    sizeof(uint32_t);
4319 					ph = mtod(oper, struct sctp_paramhdr *);
4320 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4321 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4322 					ippp = (uint32_t *) (ph + 1);
4323 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4324 				}
4325 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4326 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4327 			} else {
4328 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4329 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4330 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4331 				}
4332 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4333 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4334 				sctp_stop_timers_for_shutdown(stcb);
4335 				sctp_send_shutdown(stcb,
4336 				    stcb->asoc.primary_destination);
4337 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4338 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4339 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4340 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4341 			}
4342 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4343 		    (asoc->stream_queue_cnt == 0)) {
4344 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4345 				goto abort_out_now;
4346 			}
4347 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4348 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4349 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4350 			sctp_send_shutdown_ack(stcb,
4351 			    stcb->asoc.primary_destination);
4352 
4353 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4354 			    stcb->sctp_ep, stcb, asoc->primary_destination);
4355 		}
4356 	}
4357 	/*********************************************/
4358 	/* Here we perform PR-SCTP procedures        */
4359 	/* (section 4.2)                             */
4360 	/*********************************************/
4361 	/* C1. update advancedPeerAckPoint */
4362 	if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4363 		asoc->advanced_peer_ack_point = cumack;
4364 	}
4365 	/* PR-Sctp issues need to be addressed too */
4366 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4367 		struct sctp_tmit_chunk *lchk;
4368 		uint32_t old_adv_peer_ack_point;
4369 
4370 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4371 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4372 		/* C3. See if we need to send a Fwd-TSN */
4373 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cumack,
4374 		    MAX_TSN)) {
4375 			/*
4376 			 * ISSUE with ECN, see FWD-TSN processing for notes
4377 			 * on issues that will occur when the ECN NONCE
4378 			 * stuff is put into SCTP for cross checking.
4379 			 */
4380 			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
4381 			    MAX_TSN)) {
4382 				send_forward_tsn(stcb, asoc);
4383 				/*
4384 				 * ECN Nonce: Disable Nonce Sum check when
4385 				 * FWD TSN is sent and store resync tsn
4386 				 */
4387 				asoc->nonce_sum_check = 0;
4388 				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4389 			} else if (lchk) {
4390 				/* try to FR fwd-tsn's that get lost too */
4391 				lchk->rec.data.fwd_tsn_cnt++;
4392 				if (lchk->rec.data.fwd_tsn_cnt > 3) {
4393 					send_forward_tsn(stcb, asoc);
4394 					lchk->rec.data.fwd_tsn_cnt = 0;
4395 				}
4396 			}
4397 		}
4398 		if (lchk) {
4399 			/* Assure a timer is up */
4400 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4401 			    stcb->sctp_ep, stcb, lchk->whoTo);
4402 		}
4403 	}
4404 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4405 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4406 		    rwnd,
4407 		    stcb->asoc.peers_rwnd,
4408 		    stcb->asoc.total_flight,
4409 		    stcb->asoc.total_output_queue_size);
4410 	}
4411 }
4412 
4413 void
4414 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4415     struct sctp_tcb *stcb, struct sctp_nets *net_from,
4416     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4417     int *abort_now, uint8_t flags,
4418     uint32_t cum_ack, uint32_t rwnd)
4419 {
4420 	struct sctp_association *asoc;
4421 	struct sctp_tmit_chunk *tp1, *tp2;
4422 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4423 	uint32_t sav_cum_ack;
4424 	uint16_t wake_him = 0;
4425 	uint32_t send_s = 0;
4426 	long j;
4427 	int accum_moved = 0;
4428 	int will_exit_fast_recovery = 0;
4429 	uint32_t a_rwnd, old_rwnd;
4430 	int win_probe_recovery = 0;
4431 	int win_probe_recovered = 0;
4432 	struct sctp_nets *net = NULL;
4433 	int nonce_sum_flag, ecn_seg_sums = 0;
4434 	int done_once;
4435 	uint8_t reneged_all = 0;
4436 	uint8_t cmt_dac_flag;
4437 
4438 	/*
4439 	 * we take any chance we can to service our queues since we cannot
4440 	 * get awoken when the socket is read from :<
4441 	 */
4442 	/*
4443 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4444 	 * old sack, if so discard. 2) If there is nothing left in the send
4445 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4446 	 * too, update any rwnd change and verify no timers are running.
4447 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4448 	 * moved process these first and note that it moved. 4) Process any
4449 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4450 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4451 	 * sync up flightsizes and things, stop all timers and also check
4452 	 * for shutdown_pending state. If so then go ahead and send off the
4453 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4454 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4455 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4456 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4457 	 * if in shutdown_recv state.
4458 	 */
4459 	SCTP_TCB_LOCK_ASSERT(stcb);
4460 	/* CMT DAC algo */
4461 	this_sack_lowest_newack = 0;
4462 	j = 0;
4463 	SCTP_STAT_INCR(sctps_slowpath_sack);
4464 	last_tsn = cum_ack;
4465 	nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
4466 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4467 #ifdef SCTP_ASOCLOG_OF_TSNS
4468 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4469 	stcb->asoc.cumack_log_at++;
4470 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4471 		stcb->asoc.cumack_log_at = 0;
4472 	}
4473 #endif
4474 	a_rwnd = rwnd;
4475 
4476 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4477 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4478 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4479 	}
4480 	old_rwnd = stcb->asoc.peers_rwnd;
4481 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4482 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4483 		    stcb->asoc.overall_error_count,
4484 		    0,
4485 		    SCTP_FROM_SCTP_INDATA,
4486 		    __LINE__);
4487 	}
4488 	stcb->asoc.overall_error_count = 0;
4489 	asoc = &stcb->asoc;
4490 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4491 		sctp_log_sack(asoc->last_acked_seq,
4492 		    cum_ack,
4493 		    0,
4494 		    num_seg,
4495 		    num_dup,
4496 		    SCTP_LOG_NEW_SACK);
4497 	}
4498 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4499 		uint16_t i;
4500 		uint32_t *dupdata, dblock;
4501 
4502 		for (i = 0; i < num_dup; i++) {
4503 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4504 			    sizeof(uint32_t), (uint8_t *) & dblock);
4505 			if (dupdata == NULL) {
4506 				break;
4507 			}
4508 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4509 		}
4510 	}
4511 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4512 		/* reality check */
4513 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4514 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4515 			    sctpchunk_listhead);
4516 			send_s = tp1->rec.data.TSN_seq + 1;
4517 		} else {
4518 			tp1 = NULL;
4519 			send_s = asoc->sending_seq;
4520 		}
4521 		if (cum_ack == send_s ||
4522 		    compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4523 			struct mbuf *oper;
4524 
4525 			/*
4526 			 * no way, we have not even sent this TSN out yet.
4527 			 * Peer is hopelessly messed up with us.
4528 			 */
4529 			printf("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4530 			    cum_ack, send_s);
4531 			if (tp1) {
4532 				printf("Got send_s from tsn:%x + 1 of tp1:%p\n",
4533 				    tp1->rec.data.TSN_seq, tp1);
4534 			}
4535 	hopeless_peer:
4536 			*abort_now = 1;
4537 			/* XXX */
4538 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4539 			    0, M_DONTWAIT, 1, MT_DATA);
4540 			if (oper) {
4541 				struct sctp_paramhdr *ph;
4542 				uint32_t *ippp;
4543 
4544 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4545 				    sizeof(uint32_t);
4546 				ph = mtod(oper, struct sctp_paramhdr *);
4547 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4548 				ph->param_length = htons(SCTP_BUF_LEN(oper));
4549 				ippp = (uint32_t *) (ph + 1);
4550 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4551 			}
4552 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4553 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4554 			return;
4555 		}
4556 	}
4557 	/**********************/
4558 	/* 1) check the range */
4559 	/**********************/
4560 	if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4561 		/* acking something behind */
4562 		return;
4563 	}
4564 	sav_cum_ack = asoc->last_acked_seq;
4565 
4566 	/* update the Rwnd of the peer */
4567 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4568 	    TAILQ_EMPTY(&asoc->send_queue) &&
4569 	    (asoc->stream_queue_cnt == 0)) {
4570 		/* nothing left on send/sent and strmq */
4571 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4572 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4573 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4574 		}
4575 		asoc->peers_rwnd = a_rwnd;
4576 		if (asoc->sent_queue_retran_cnt) {
4577 			asoc->sent_queue_retran_cnt = 0;
4578 		}
4579 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4580 			/* SWS sender side engages */
4581 			asoc->peers_rwnd = 0;
4582 		}
4583 		/* stop any timers */
4584 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4585 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4586 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4587 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4588 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4589 					SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4590 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4591 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4592 				}
4593 			}
4594 			net->partial_bytes_acked = 0;
4595 			net->flight_size = 0;
4596 		}
4597 		asoc->total_flight = 0;
4598 		asoc->total_flight_count = 0;
4599 		return;
4600 	}
4601 	/*
4602 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4603 	 * things. The total byte count acked is tracked in netAckSz AND
4604 	 * netAck2 is used to track the total bytes acked that are un-
4605 	 * amibguious and were never retransmitted. We track these on a per
4606 	 * destination address basis.
4607 	 */
4608 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4609 		net->prev_cwnd = net->cwnd;
4610 		net->net_ack = 0;
4611 		net->net_ack2 = 0;
4612 
4613 		/*
4614 		 * CMT: Reset CUC and Fast recovery algo variables before
4615 		 * SACK processing
4616 		 */
4617 		net->new_pseudo_cumack = 0;
4618 		net->will_exit_fast_recovery = 0;
4619 	}
4620 	/* process the new consecutive TSN first */
4621 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4622 	while (tp1) {
4623 		if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
4624 		    MAX_TSN) ||
4625 		    last_tsn == tp1->rec.data.TSN_seq) {
4626 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4627 				/*
4628 				 * ECN Nonce: Add the nonce to the sender's
4629 				 * nonce sum
4630 				 */
4631 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4632 				accum_moved = 1;
4633 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4634 					/*
4635 					 * If it is less than ACKED, it is
4636 					 * now no-longer in flight. Higher
4637 					 * values may occur during marking
4638 					 */
4639 					if ((tp1->whoTo->dest_state &
4640 					    SCTP_ADDR_UNCONFIRMED) &&
4641 					    (tp1->snd_count < 2)) {
4642 						/*
4643 						 * If there was no retran
4644 						 * and the address is
4645 						 * un-confirmed and we sent
4646 						 * there and are now
4647 						 * sacked.. its confirmed,
4648 						 * mark it so.
4649 						 */
4650 						tp1->whoTo->dest_state &=
4651 						    ~SCTP_ADDR_UNCONFIRMED;
4652 					}
4653 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4654 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4655 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4656 							    tp1->whoTo->flight_size,
4657 							    tp1->book_size,
4658 							    (uintptr_t) tp1->whoTo,
4659 							    tp1->rec.data.TSN_seq);
4660 						}
4661 						sctp_flight_size_decrease(tp1);
4662 						sctp_total_flight_decrease(stcb, tp1);
4663 					}
4664 					tp1->whoTo->net_ack += tp1->send_size;
4665 
4666 					/* CMT SFR and DAC algos */
4667 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4668 					tp1->whoTo->saw_newack = 1;
4669 
4670 					if (tp1->snd_count < 2) {
4671 						/*
4672 						 * True non-retransmited
4673 						 * chunk
4674 						 */
4675 						tp1->whoTo->net_ack2 +=
4676 						    tp1->send_size;
4677 
4678 						/* update RTO too? */
4679 						if (tp1->do_rtt) {
4680 							tp1->whoTo->RTO =
4681 							    sctp_calculate_rto(stcb,
4682 							    asoc, tp1->whoTo,
4683 							    &tp1->sent_rcv_time,
4684 							    sctp_align_safe_nocopy);
4685 							tp1->do_rtt = 0;
4686 						}
4687 					}
4688 					/*
4689 					 * CMT: CUCv2 algorithm. From the
4690 					 * cumack'd TSNs, for each TSN being
4691 					 * acked for the first time, set the
4692 					 * following variables for the
4693 					 * corresp destination.
4694 					 * new_pseudo_cumack will trigger a
4695 					 * cwnd update.
4696 					 * find_(rtx_)pseudo_cumack will
4697 					 * trigger search for the next
4698 					 * expected (rtx-)pseudo-cumack.
4699 					 */
4700 					tp1->whoTo->new_pseudo_cumack = 1;
4701 					tp1->whoTo->find_pseudo_cumack = 1;
4702 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4703 
4704 
4705 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4706 						sctp_log_sack(asoc->last_acked_seq,
4707 						    cum_ack,
4708 						    tp1->rec.data.TSN_seq,
4709 						    0,
4710 						    0,
4711 						    SCTP_LOG_TSN_ACKED);
4712 					}
4713 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4714 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4715 					}
4716 				}
4717 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4718 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4719 #ifdef SCTP_AUDITING_ENABLED
4720 					sctp_audit_log(0xB3,
4721 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4722 #endif
4723 				}
4724 				if (tp1->rec.data.chunk_was_revoked) {
4725 					/* deflate the cwnd */
4726 					tp1->whoTo->cwnd -= tp1->book_size;
4727 					tp1->rec.data.chunk_was_revoked = 0;
4728 				}
4729 				tp1->sent = SCTP_DATAGRAM_ACKED;
4730 			}
4731 		} else {
4732 			break;
4733 		}
4734 		tp1 = TAILQ_NEXT(tp1, sctp_next);
4735 	}
4736 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4737 	/* always set this up to cum-ack */
4738 	asoc->this_sack_highest_gap = last_tsn;
4739 
4740 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4741 
4742 		/*
4743 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4744 		 * to be greater than the cumack. Also reset saw_newack to 0
4745 		 * for all dests.
4746 		 */
4747 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4748 			net->saw_newack = 0;
4749 			net->this_sack_highest_newack = last_tsn;
4750 		}
4751 
4752 		/*
4753 		 * thisSackHighestGap will increase while handling NEW
4754 		 * segments this_sack_highest_newack will increase while
4755 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4756 		 * used for CMT DAC algo. saw_newack will also change.
4757 		 */
4758 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4759 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4760 		    num_seg, num_nr_seg, &ecn_seg_sums)) {
4761 			wake_him++;
4762 		}
4763 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4764 			/*
4765 			 * validate the biggest_tsn_acked in the gap acks if
4766 			 * strict adherence is wanted.
4767 			 */
4768 			if ((biggest_tsn_acked == send_s) ||
4769 			    (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
4770 				/*
4771 				 * peer is either confused or we are under
4772 				 * attack. We must abort.
4773 				 */
4774 				printf("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4775 				    biggest_tsn_acked,
4776 				    send_s);
4777 
4778 				goto hopeless_peer;
4779 			}
4780 		}
4781 	}
4782 	/*******************************************/
4783 	/* cancel ALL T3-send timer if accum moved */
4784 	/*******************************************/
4785 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
4786 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4787 			if (net->new_pseudo_cumack)
4788 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4789 				    stcb, net,
4790 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4791 
4792 		}
4793 	} else {
4794 		if (accum_moved) {
4795 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4796 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4797 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4798 			}
4799 		}
4800 	}
4801 	/********************************************/
4802 	/* drop the acked chunks from the sendqueue */
4803 	/********************************************/
4804 	asoc->last_acked_seq = cum_ack;
4805 
4806 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4807 	if (tp1 == NULL)
4808 		goto done_with_it;
4809 	do {
4810 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
4811 		    MAX_TSN)) {
4812 			break;
4813 		}
4814 		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4815 			/* no more sent on list */
4816 			printf("Warning, tp1->sent == %d and its now acked?\n",
4817 			    tp1->sent);
4818 		}
4819 		tp2 = TAILQ_NEXT(tp1, sctp_next);
4820 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4821 		if (tp1->pr_sctp_on) {
4822 			if (asoc->pr_sctp_cnt != 0)
4823 				asoc->pr_sctp_cnt--;
4824 		}
4825 		if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
4826 		    (asoc->total_flight > 0)) {
4827 #ifdef INVARIANTS
4828 			panic("Warning flight size is postive and should be 0");
4829 #else
4830 			SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4831 			    asoc->total_flight);
4832 #endif
4833 			asoc->total_flight = 0;
4834 		}
4835 		if (tp1->data) {
4836 			/* sa_ignore NO_NULL_CHK */
4837 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4838 			sctp_m_freem(tp1->data);
4839 			if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4840 				asoc->sent_queue_cnt_removeable--;
4841 			}
4842 		}
4843 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4844 			sctp_log_sack(asoc->last_acked_seq,
4845 			    cum_ack,
4846 			    tp1->rec.data.TSN_seq,
4847 			    0,
4848 			    0,
4849 			    SCTP_LOG_FREE_SENT);
4850 		}
4851 		tp1->data = NULL;
4852 		asoc->sent_queue_cnt--;
4853 		sctp_free_a_chunk(stcb, tp1);
4854 		wake_him++;
4855 		tp1 = tp2;
4856 	} while (tp1 != NULL);
4857 
4858 done_with_it:
4859 	/* sa_ignore NO_NULL_CHK */
4860 	if ((wake_him) && (stcb->sctp_socket)) {
4861 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4862 		struct socket *so;
4863 
4864 #endif
4865 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4866 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4867 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
4868 		}
4869 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4870 		so = SCTP_INP_SO(stcb->sctp_ep);
4871 		atomic_add_int(&stcb->asoc.refcnt, 1);
4872 		SCTP_TCB_UNLOCK(stcb);
4873 		SCTP_SOCKET_LOCK(so, 1);
4874 		SCTP_TCB_LOCK(stcb);
4875 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4876 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4877 			/* assoc was freed while we were unlocked */
4878 			SCTP_SOCKET_UNLOCK(so, 1);
4879 			return;
4880 		}
4881 #endif
4882 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4883 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4884 		SCTP_SOCKET_UNLOCK(so, 1);
4885 #endif
4886 	} else {
4887 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4888 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
4889 		}
4890 	}
4891 
4892 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4893 		if (compare_with_wrap(asoc->last_acked_seq,
4894 		    asoc->fast_recovery_tsn, MAX_TSN) ||
4895 		    asoc->last_acked_seq == asoc->fast_recovery_tsn) {
4896 			/* Setup so we will exit RFC2582 fast recovery */
4897 			will_exit_fast_recovery = 1;
4898 		}
4899 	}
4900 	/*
4901 	 * Check for revoked fragments:
4902 	 *
4903 	 * if Previous sack - Had no frags then we can't have any revoked if
4904 	 * Previous sack - Had frag's then - If we now have frags aka
4905 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4906 	 * some of them. else - The peer revoked all ACKED fragments, since
4907 	 * we had some before and now we have NONE.
4908 	 */
4909 
4910 	if (num_seg)
4911 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4912 	else if (asoc->saw_sack_with_frags) {
4913 		int cnt_revoked = 0;
4914 
4915 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
4916 		if (tp1 != NULL) {
4917 			/* Peer revoked all dg's marked or acked */
4918 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4919 				/*
4920 				 * EY- maybe check only if it is nr_acked
4921 				 * nr_marked may not be possible
4922 				 */
4923 				if ((tp1->sent == SCTP_DATAGRAM_NR_ACKED) ||
4924 				    (tp1->sent == SCTP_DATAGRAM_NR_MARKED)) {
4925 					continue;
4926 				}
4927 				if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4928 					tp1->sent = SCTP_DATAGRAM_SENT;
4929 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4930 						sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4931 						    tp1->whoTo->flight_size,
4932 						    tp1->book_size,
4933 						    (uintptr_t) tp1->whoTo,
4934 						    tp1->rec.data.TSN_seq);
4935 					}
4936 					sctp_flight_size_increase(tp1);
4937 					sctp_total_flight_increase(stcb, tp1);
4938 					tp1->rec.data.chunk_was_revoked = 1;
4939 					/*
4940 					 * To ensure that this increase in
4941 					 * flightsize, which is artificial,
4942 					 * does not throttle the sender, we
4943 					 * also increase the cwnd
4944 					 * artificially.
4945 					 */
4946 					tp1->whoTo->cwnd += tp1->book_size;
4947 					cnt_revoked++;
4948 				}
4949 			}
4950 			if (cnt_revoked) {
4951 				reneged_all = 1;
4952 			}
4953 		}
4954 		asoc->saw_sack_with_frags = 0;
4955 	}
4956 	if (num_seg || num_nr_seg)
4957 		asoc->saw_sack_with_frags = 1;
4958 	else
4959 		asoc->saw_sack_with_frags = 0;
4960 
4961 	/* JRS - Use the congestion control given in the CC module */
4962 	asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4963 
4964 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4965 		/* nothing left in-flight */
4966 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4967 			/* stop all timers */
4968 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4969 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4970 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4971 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4972 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4973 				}
4974 			}
4975 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4976 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4977 			net->flight_size = 0;
4978 			net->partial_bytes_acked = 0;
4979 		}
4980 		asoc->total_flight = 0;
4981 		asoc->total_flight_count = 0;
4982 	}
4983 	/**********************************/
4984 	/* Now what about shutdown issues */
4985 	/**********************************/
4986 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4987 		/* nothing left on sendqueue.. consider done */
4988 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4989 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4990 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4991 		}
4992 		asoc->peers_rwnd = a_rwnd;
4993 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4994 			/* SWS sender side engages */
4995 			asoc->peers_rwnd = 0;
4996 		}
4997 		/* clean up */
4998 		if ((asoc->stream_queue_cnt == 1) &&
4999 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5000 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5001 		    (asoc->locked_on_sending)
5002 		    ) {
5003 			struct sctp_stream_queue_pending *sp;
5004 
5005 			/*
5006 			 * I may be in a state where we got all across.. but
5007 			 * cannot write more due to a shutdown... we abort
5008 			 * since the user did not indicate EOR in this case.
5009 			 */
5010 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
5011 			    sctp_streamhead);
5012 			if ((sp) && (sp->length == 0)) {
5013 				asoc->locked_on_sending = NULL;
5014 				if (sp->msg_is_complete) {
5015 					asoc->stream_queue_cnt--;
5016 				} else {
5017 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5018 					asoc->stream_queue_cnt--;
5019 				}
5020 			}
5021 		}
5022 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5023 		    (asoc->stream_queue_cnt == 0)) {
5024 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5025 				/* Need to abort here */
5026 				struct mbuf *oper;
5027 
5028 		abort_out_now:
5029 				*abort_now = 1;
5030 				/* XXX */
5031 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
5032 				    0, M_DONTWAIT, 1, MT_DATA);
5033 				if (oper) {
5034 					struct sctp_paramhdr *ph;
5035 					uint32_t *ippp;
5036 
5037 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5038 					    sizeof(uint32_t);
5039 					ph = mtod(oper, struct sctp_paramhdr *);
5040 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
5041 					ph->param_length = htons(SCTP_BUF_LEN(oper));
5042 					ippp = (uint32_t *) (ph + 1);
5043 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
5044 				}
5045 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
5046 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
5047 				return;
5048 			} else {
5049 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5050 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5051 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5052 				}
5053 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5054 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5055 				sctp_stop_timers_for_shutdown(stcb);
5056 				sctp_send_shutdown(stcb,
5057 				    stcb->asoc.primary_destination);
5058 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5059 				    stcb->sctp_ep, stcb, asoc->primary_destination);
5060 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5061 				    stcb->sctp_ep, stcb, asoc->primary_destination);
5062 			}
5063 			return;
5064 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5065 		    (asoc->stream_queue_cnt == 0)) {
5066 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5067 				goto abort_out_now;
5068 			}
5069 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5070 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5071 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5072 			sctp_send_shutdown_ack(stcb,
5073 			    stcb->asoc.primary_destination);
5074 
5075 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5076 			    stcb->sctp_ep, stcb, asoc->primary_destination);
5077 			return;
5078 		}
5079 	}
5080 	/*
5081 	 * Now here we are going to recycle net_ack for a different use...
5082 	 * HEADS UP.
5083 	 */
5084 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5085 		net->net_ack = 0;
5086 	}
5087 
5088 	/*
5089 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5090 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5091 	 * automatically ensure that.
5092 	 */
5093 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
5094 		this_sack_lowest_newack = cum_ack;
5095 	}
5096 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5097 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5098 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5099 	}
5100 	/* JRS - Use the congestion control given in the CC module */
5101 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5102 
5103 	/******************************************************************
5104 	 *  Here we do the stuff with ECN Nonce checking.
5105 	 *  We basically check to see if the nonce sum flag was incorrect
5106 	 *  or if resynchronization needs to be done. Also if we catch a
5107 	 *  misbehaving receiver we give him the kick.
5108 	 ******************************************************************/
5109 
5110 	if (asoc->ecn_nonce_allowed) {
5111 		if (asoc->nonce_sum_check) {
5112 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5113 				if (asoc->nonce_wait_for_ecne == 0) {
5114 					struct sctp_tmit_chunk *lchk;
5115 
5116 					lchk = TAILQ_FIRST(&asoc->send_queue);
5117 					asoc->nonce_wait_for_ecne = 1;
5118 					if (lchk) {
5119 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5120 					} else {
5121 						asoc->nonce_wait_tsn = asoc->sending_seq;
5122 					}
5123 				} else {
5124 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5125 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5126 						/*
5127 						 * Misbehaving peer. We need
5128 						 * to react to this guy
5129 						 */
5130 						asoc->ecn_allowed = 0;
5131 						asoc->ecn_nonce_allowed = 0;
5132 					}
5133 				}
5134 			}
5135 		} else {
5136 			/* See if Resynchronization Possible */
5137 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5138 				asoc->nonce_sum_check = 1;
5139 				/*
5140 				 * now we must calculate what the base is.
5141 				 * We do this based on two things, we know
5142 				 * the total's for all the segments
5143 				 * gap-acked in the SACK, its stored in
5144 				 * ecn_seg_sums. We also know the SACK's
5145 				 * nonce sum, its in nonce_sum_flag. So we
5146 				 * can build a truth table to back-calculate
5147 				 * the new value of
5148 				 * asoc->nonce_sum_expect_base:
5149 				 *
5150 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
5151 				 * 1                    0 1 0 1 1 1
5152 				 * 1 0
5153 				 */
5154 				asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5155 			}
5156 		}
5157 	}
5158 	/* Now are we exiting loss recovery ? */
5159 	if (will_exit_fast_recovery) {
5160 		/* Ok, we must exit fast recovery */
5161 		asoc->fast_retran_loss_recovery = 0;
5162 	}
5163 	if ((asoc->sat_t3_loss_recovery) &&
5164 	    ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5165 	    MAX_TSN) ||
5166 	    (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5167 		/* end satellite t3 loss recovery */
5168 		asoc->sat_t3_loss_recovery = 0;
5169 	}
5170 	/*
5171 	 * CMT Fast recovery
5172 	 */
5173 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5174 		if (net->will_exit_fast_recovery) {
5175 			/* Ok, we must exit fast recovery */
5176 			net->fast_retran_loss_recovery = 0;
5177 		}
5178 	}
5179 
5180 	/* Adjust and set the new rwnd value */
5181 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5182 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5183 		    asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5184 	}
5185 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5186 	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5187 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5188 		/* SWS sender side engages */
5189 		asoc->peers_rwnd = 0;
5190 	}
5191 	if (asoc->peers_rwnd > old_rwnd) {
5192 		win_probe_recovery = 1;
5193 	}
5194 	/*
5195 	 * Now we must setup so we have a timer up for anyone with
5196 	 * outstanding data.
5197 	 */
5198 	done_once = 0;
5199 again:
5200 	j = 0;
5201 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5202 		if (win_probe_recovery && (net->window_probe)) {
5203 			win_probe_recovered = 1;
5204 			/*-
5205 			 * Find first chunk that was used with
5206 			 * window probe and clear the event. Put
5207 			 * it back into the send queue as if has
5208 			 * not been sent.
5209 			 */
5210 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5211 				if (tp1->window_probe) {
5212 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
5213 					break;
5214 				}
5215 			}
5216 		}
5217 		if (net->flight_size) {
5218 			j++;
5219 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5220 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5221 				    stcb->sctp_ep, stcb, net);
5222 			}
5223 			if (net->window_probe) {
5224 				net->window_probe = 0;
5225 			}
5226 		} else {
5227 			if (net->window_probe) {
5228 				/*
5229 				 * In window probes we must assure a timer
5230 				 * is still running there
5231 				 */
5232 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5233 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5234 					    stcb->sctp_ep, stcb, net);
5235 
5236 				}
5237 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5238 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5239 				    stcb, net,
5240 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5241 			}
5242 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5243 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5244 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5245 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5246 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5247 				}
5248 			}
5249 		}
5250 	}
5251 	if ((j == 0) &&
5252 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5253 	    (asoc->sent_queue_retran_cnt == 0) &&
5254 	    (win_probe_recovered == 0) &&
5255 	    (done_once == 0)) {
5256 		/*
5257 		 * huh, this should not happen unless all packets are
5258 		 * PR-SCTP and marked to skip of course.
5259 		 */
5260 		if (sctp_fs_audit(asoc)) {
5261 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5262 				net->flight_size = 0;
5263 			}
5264 			asoc->total_flight = 0;
5265 			asoc->total_flight_count = 0;
5266 			asoc->sent_queue_retran_cnt = 0;
5267 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5268 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5269 					sctp_flight_size_increase(tp1);
5270 					sctp_total_flight_increase(stcb, tp1);
5271 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5272 					asoc->sent_queue_retran_cnt++;
5273 				}
5274 			}
5275 		}
5276 		done_once = 1;
5277 		goto again;
5278 	}
5279 	/*********************************************/
5280 	/* Here we perform PR-SCTP procedures        */
5281 	/* (section 4.2)                             */
5282 	/*********************************************/
5283 	/* C1. update advancedPeerAckPoint */
5284 	if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5285 		asoc->advanced_peer_ack_point = cum_ack;
5286 	}
5287 	/* C2. try to further move advancedPeerAckPoint ahead */
5288 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5289 		struct sctp_tmit_chunk *lchk;
5290 		uint32_t old_adv_peer_ack_point;
5291 
5292 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5293 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5294 		/* C3. See if we need to send a Fwd-TSN */
5295 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5296 		    MAX_TSN)) {
5297 			/*
5298 			 * ISSUE with ECN, see FWD-TSN processing for notes
5299 			 * on issues that will occur when the ECN NONCE
5300 			 * stuff is put into SCTP for cross checking.
5301 			 */
5302 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5303 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5304 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5305 				    old_adv_peer_ack_point);
5306 			}
5307 			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
5308 			    MAX_TSN)) {
5309 				send_forward_tsn(stcb, asoc);
5310 				/*
5311 				 * ECN Nonce: Disable Nonce Sum check when
5312 				 * FWD TSN is sent and store resync tsn
5313 				 */
5314 				asoc->nonce_sum_check = 0;
5315 				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5316 			} else if (lchk) {
5317 				/* try to FR fwd-tsn's that get lost too */
5318 				lchk->rec.data.fwd_tsn_cnt++;
5319 				if (lchk->rec.data.fwd_tsn_cnt > 3) {
5320 					send_forward_tsn(stcb, asoc);
5321 					lchk->rec.data.fwd_tsn_cnt = 0;
5322 				}
5323 			}
5324 		}
5325 		if (lchk) {
5326 			/* Assure a timer is up */
5327 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5328 			    stcb->sctp_ep, stcb, lchk->whoTo);
5329 		}
5330 	}
5331 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5332 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5333 		    a_rwnd,
5334 		    stcb->asoc.peers_rwnd,
5335 		    stcb->asoc.total_flight,
5336 		    stcb->asoc.total_output_queue_size);
5337 	}
5338 }
5339 
5340 void
5341 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5342     struct sctp_nets *netp, int *abort_flag)
5343 {
5344 	/* Copy cum-ack */
5345 	uint32_t cum_ack, a_rwnd;
5346 
5347 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5348 	/* Arrange so a_rwnd does NOT change */
5349 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5350 
5351 	/* Now call the express sack handling */
5352 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5353 }
5354 
5355 static void
5356 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5357     struct sctp_stream_in *strmin)
5358 {
5359 	struct sctp_queued_to_read *ctl, *nctl;
5360 	struct sctp_association *asoc;
5361 	int tt;
5362 
5363 	asoc = &stcb->asoc;
5364 	tt = strmin->last_sequence_delivered;
5365 	/*
5366 	 * First deliver anything prior to and including the stream no that
5367 	 * came in
5368 	 */
5369 	ctl = TAILQ_FIRST(&strmin->inqueue);
5370 	while (ctl) {
5371 		nctl = TAILQ_NEXT(ctl, next);
5372 		if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5373 		    (tt == ctl->sinfo_ssn)) {
5374 			/* this is deliverable now */
5375 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5376 			/* subtract pending on streams */
5377 			asoc->size_on_all_streams -= ctl->length;
5378 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5379 			/* deliver it to at least the delivery-q */
5380 			if (stcb->sctp_socket) {
5381 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5382 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5383 				    ctl,
5384 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5385 			}
5386 		} else {
5387 			/* no more delivery now. */
5388 			break;
5389 		}
5390 		ctl = nctl;
5391 	}
5392 	/*
5393 	 * now we must deliver things in queue the normal way  if any are
5394 	 * now ready.
5395 	 */
5396 	tt = strmin->last_sequence_delivered + 1;
5397 	ctl = TAILQ_FIRST(&strmin->inqueue);
5398 	while (ctl) {
5399 		nctl = TAILQ_NEXT(ctl, next);
5400 		if (tt == ctl->sinfo_ssn) {
5401 			/* this is deliverable now */
5402 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5403 			/* subtract pending on streams */
5404 			asoc->size_on_all_streams -= ctl->length;
5405 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5406 			/* deliver it to at least the delivery-q */
5407 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5408 			if (stcb->sctp_socket) {
5409 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5410 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5411 				    ctl,
5412 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5413 
5414 			}
5415 			tt = strmin->last_sequence_delivered + 1;
5416 		} else {
5417 			break;
5418 		}
5419 		ctl = nctl;
5420 	}
5421 }
5422 
5423 static void
5424 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5425     struct sctp_association *asoc,
5426     uint16_t stream, uint16_t seq)
5427 {
5428 	struct sctp_tmit_chunk *chk, *at;
5429 
5430 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5431 		/* For each one on here see if we need to toss it */
5432 		/*
5433 		 * For now large messages held on the reasmqueue that are
5434 		 * complete will be tossed too. We could in theory do more
5435 		 * work to spin through and stop after dumping one msg aka
5436 		 * seeing the start of a new msg at the head, and call the
5437 		 * delivery function... to see if it can be delivered... But
5438 		 * for now we just dump everything on the queue.
5439 		 */
5440 		chk = TAILQ_FIRST(&asoc->reasmqueue);
5441 		while (chk) {
5442 			at = TAILQ_NEXT(chk, sctp_next);
5443 			/*
5444 			 * Do not toss it if on a different stream or marked
5445 			 * for unordered delivery in which case the stream
5446 			 * sequence number has no meaning.
5447 			 */
5448 			if ((chk->rec.data.stream_number != stream) ||
5449 			    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5450 				chk = at;
5451 				continue;
5452 			}
5453 			if (chk->rec.data.stream_seq == seq) {
5454 				/* It needs to be tossed */
5455 				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5456 				if (compare_with_wrap(chk->rec.data.TSN_seq,
5457 				    asoc->tsn_last_delivered, MAX_TSN)) {
5458 					asoc->tsn_last_delivered =
5459 					    chk->rec.data.TSN_seq;
5460 					asoc->str_of_pdapi =
5461 					    chk->rec.data.stream_number;
5462 					asoc->ssn_of_pdapi =
5463 					    chk->rec.data.stream_seq;
5464 					asoc->fragment_flags =
5465 					    chk->rec.data.rcv_flags;
5466 				}
5467 				asoc->size_on_reasm_queue -= chk->send_size;
5468 				sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5469 
5470 				/* Clear up any stream problem */
5471 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5472 				    SCTP_DATA_UNORDERED &&
5473 				    (compare_with_wrap(chk->rec.data.stream_seq,
5474 				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5475 				    MAX_SEQ))) {
5476 					/*
5477 					 * We must dump forward this streams
5478 					 * sequence number if the chunk is
5479 					 * not unordered that is being
5480 					 * skipped. There is a chance that
5481 					 * if the peer does not include the
5482 					 * last fragment in its FWD-TSN we
5483 					 * WILL have a problem here since
5484 					 * you would have a partial chunk in
5485 					 * queue that may not be
5486 					 * deliverable. Also if a Partial
5487 					 * delivery API as started the user
5488 					 * may get a partial chunk. The next
5489 					 * read returning a new chunk...
5490 					 * really ugly but I see no way
5491 					 * around it! Maybe a notify??
5492 					 */
5493 					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5494 					    chk->rec.data.stream_seq;
5495 				}
5496 				if (chk->data) {
5497 					sctp_m_freem(chk->data);
5498 					chk->data = NULL;
5499 				}
5500 				sctp_free_a_chunk(stcb, chk);
5501 			} else if (compare_with_wrap(chk->rec.data.stream_seq, seq, MAX_SEQ)) {
5502 				/*
5503 				 * If the stream_seq is > than the purging
5504 				 * one, we are done
5505 				 */
5506 				break;
5507 			}
5508 			chk = at;
5509 		}
5510 	}
5511 }
5512 
5513 
5514 void
5515 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5516     struct sctp_forward_tsn_chunk *fwd,
5517     int *abort_flag, struct mbuf *m, int offset)
5518 {
5519 	/*
5520 	 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5521 	 * forward TSN, when the SACK comes back that acknowledges the
5522 	 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5523 	 * get quite tricky since we may have sent more data interveneing
5524 	 * and must carefully account for what the SACK says on the nonce
5525 	 * and any gaps that are reported. This work will NOT be done here,
5526 	 * but I note it here since it is really related to PR-SCTP and
5527 	 * FWD-TSN's
5528 	 */
5529 
5530 	/* The pr-sctp fwd tsn */
5531 	/*
5532 	 * here we will perform all the data receiver side steps for
5533 	 * processing FwdTSN, as required in by pr-sctp draft:
5534 	 *
5535 	 * Assume we get FwdTSN(x):
5536 	 *
5537 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5538 	 * others we have 3) examine and update re-ordering queue on
5539 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5540 	 * report where we are.
5541 	 */
5542 	struct sctp_association *asoc;
5543 	uint32_t new_cum_tsn, tsn, gap;
5544 	unsigned int i, fwd_sz, cumack_set_flag, m_size, fnd = 0;
5545 	uint32_t str_seq;
5546 	struct sctp_stream_in *strm;
5547 	struct sctp_tmit_chunk *chk, *at;
5548 	struct sctp_queued_to_read *ctl, *sv;
5549 
5550 	cumack_set_flag = 0;
5551 	asoc = &stcb->asoc;
5552 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5553 		SCTPDBG(SCTP_DEBUG_INDATA1,
5554 		    "Bad size too small/big fwd-tsn\n");
5555 		return;
5556 	}
5557 	m_size = (stcb->asoc.mapping_array_size << 3);
5558 	/*************************************************************/
5559 	/* 1. Here we update local cumTSN and shift the bitmap array */
5560 	/*************************************************************/
5561 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5562 
5563 	if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
5564 	    asoc->cumulative_tsn == new_cum_tsn) {
5565 		/* Already got there ... */
5566 		return;
5567 	}
5568 	if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
5569 	    MAX_TSN)) {
5570 		asoc->highest_tsn_inside_map = new_cum_tsn;
5571 
5572 	}
5573 	if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_nr_map,
5574 	    MAX_TSN)) {
5575 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5576 	}
5577 	/*
5578 	 * now we know the new TSN is more advanced, let's find the actual
5579 	 * gap
5580 	 */
5581 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5582 	asoc->cumulative_tsn = new_cum_tsn;
5583 	if (gap >= m_size) {
5584 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5585 			struct mbuf *oper;
5586 
5587 			/*
5588 			 * out of range (of single byte chunks in the rwnd I
5589 			 * give out). This must be an attacker.
5590 			 */
5591 			*abort_flag = 1;
5592 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5593 			    0, M_DONTWAIT, 1, MT_DATA);
5594 			if (oper) {
5595 				struct sctp_paramhdr *ph;
5596 				uint32_t *ippp;
5597 
5598 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5599 				    (sizeof(uint32_t) * 3);
5600 				ph = mtod(oper, struct sctp_paramhdr *);
5601 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5602 				ph->param_length = htons(SCTP_BUF_LEN(oper));
5603 				ippp = (uint32_t *) (ph + 1);
5604 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5605 				ippp++;
5606 				*ippp = asoc->highest_tsn_inside_map;
5607 				ippp++;
5608 				*ippp = new_cum_tsn;
5609 			}
5610 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5611 			sctp_abort_an_association(stcb->sctp_ep, stcb,
5612 			    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
5613 			return;
5614 		}
5615 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5616 
5617 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5618 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5619 		asoc->highest_tsn_inside_map = new_cum_tsn;
5620 
5621 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5622 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5623 
5624 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5625 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5626 		}
5627 		asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
5628 	} else {
5629 		SCTP_TCB_LOCK_ASSERT(stcb);
5630 		for (i = 0; i <= gap; i++) {
5631 			SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, i);
5632 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5633 			/* FIX ME add something to set up highest TSN in map */
5634 		}
5635 		if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
5636 			asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5637 		}
5638 		if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map, MAX_TSN) ||
5639 		    new_cum_tsn == asoc->highest_tsn_inside_map) {
5640 			/* We must back down to see what the new highest is */
5641 			for (tsn = new_cum_tsn; (compare_with_wrap(tsn, asoc->mapping_array_base_tsn, MAX_TSN) ||
5642 			    (tsn == asoc->mapping_array_base_tsn)); tsn--) {
5643 				SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
5644 				if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
5645 					asoc->highest_tsn_inside_map = tsn;
5646 					fnd = 1;
5647 					break;
5648 				}
5649 			}
5650 			if (!fnd) {
5651 				asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
5652 			}
5653 		}
5654 		/*
5655 		 * Now after marking all, slide thing forward but no sack
5656 		 * please.
5657 		 */
5658 		sctp_slide_mapping_arrays(stcb);
5659 	}
5660 	/*************************************************************/
5661 	/* 2. Clear up re-assembly queue                             */
5662 	/*************************************************************/
5663 	/*
5664 	 * First service it if pd-api is up, just in case we can progress it
5665 	 * forward
5666 	 */
5667 	if (asoc->fragmented_delivery_inprogress) {
5668 		sctp_service_reassembly(stcb, asoc);
5669 	}
5670 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5671 		/* For each one on here see if we need to toss it */
5672 		/*
5673 		 * For now large messages held on the reasmqueue that are
5674 		 * complete will be tossed too. We could in theory do more
5675 		 * work to spin through and stop after dumping one msg aka
5676 		 * seeing the start of a new msg at the head, and call the
5677 		 * delivery function... to see if it can be delivered... But
5678 		 * for now we just dump everything on the queue.
5679 		 */
5680 		chk = TAILQ_FIRST(&asoc->reasmqueue);
5681 		while (chk) {
5682 			at = TAILQ_NEXT(chk, sctp_next);
5683 			if ((compare_with_wrap(new_cum_tsn,
5684 			    chk->rec.data.TSN_seq, MAX_TSN)) ||
5685 			    (new_cum_tsn == chk->rec.data.TSN_seq)) {
5686 				/* It needs to be tossed */
5687 				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5688 				if (compare_with_wrap(chk->rec.data.TSN_seq,
5689 				    asoc->tsn_last_delivered, MAX_TSN)) {
5690 					asoc->tsn_last_delivered =
5691 					    chk->rec.data.TSN_seq;
5692 					asoc->str_of_pdapi =
5693 					    chk->rec.data.stream_number;
5694 					asoc->ssn_of_pdapi =
5695 					    chk->rec.data.stream_seq;
5696 					asoc->fragment_flags =
5697 					    chk->rec.data.rcv_flags;
5698 				}
5699 				asoc->size_on_reasm_queue -= chk->send_size;
5700 				sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5701 
5702 				/* Clear up any stream problem */
5703 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5704 				    SCTP_DATA_UNORDERED &&
5705 				    (compare_with_wrap(chk->rec.data.stream_seq,
5706 				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5707 				    MAX_SEQ))) {
5708 					/*
5709 					 * We must dump forward this streams
5710 					 * sequence number if the chunk is
5711 					 * not unordered that is being
5712 					 * skipped. There is a chance that
5713 					 * if the peer does not include the
5714 					 * last fragment in its FWD-TSN we
5715 					 * WILL have a problem here since
5716 					 * you would have a partial chunk in
5717 					 * queue that may not be
5718 					 * deliverable. Also if a Partial
5719 					 * delivery API as started the user
5720 					 * may get a partial chunk. The next
5721 					 * read returning a new chunk...
5722 					 * really ugly but I see no way
5723 					 * around it! Maybe a notify??
5724 					 */
5725 					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5726 					    chk->rec.data.stream_seq;
5727 				}
5728 				if (chk->data) {
5729 					sctp_m_freem(chk->data);
5730 					chk->data = NULL;
5731 				}
5732 				sctp_free_a_chunk(stcb, chk);
5733 			} else {
5734 				/*
5735 				 * Ok we have gone beyond the end of the
5736 				 * fwd-tsn's mark.
5737 				 */
5738 				break;
5739 			}
5740 			chk = at;
5741 		}
5742 	}
5743 	/*******************************************************/
5744 	/* 3. Update the PR-stream re-ordering queues and fix  */
5745 	/* delivery issues as needed.                       */
5746 	/*******************************************************/
5747 	fwd_sz -= sizeof(*fwd);
5748 	if (m && fwd_sz) {
5749 		/* New method. */
5750 		unsigned int num_str;
5751 		struct sctp_strseq *stseq, strseqbuf;
5752 
5753 		offset += sizeof(*fwd);
5754 
5755 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5756 		num_str = fwd_sz / sizeof(struct sctp_strseq);
5757 		for (i = 0; i < num_str; i++) {
5758 			uint16_t st;
5759 
5760 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5761 			    sizeof(struct sctp_strseq),
5762 			    (uint8_t *) & strseqbuf);
5763 			offset += sizeof(struct sctp_strseq);
5764 			if (stseq == NULL) {
5765 				break;
5766 			}
5767 			/* Convert */
5768 			st = ntohs(stseq->stream);
5769 			stseq->stream = st;
5770 			st = ntohs(stseq->sequence);
5771 			stseq->sequence = st;
5772 
5773 			/* now process */
5774 
5775 			/*
5776 			 * Ok we now look for the stream/seq on the read
5777 			 * queue where its not all delivered. If we find it
5778 			 * we transmute the read entry into a PDI_ABORTED.
5779 			 */
5780 			if (stseq->stream >= asoc->streamincnt) {
5781 				/* screwed up streams, stop!  */
5782 				break;
5783 			}
5784 			if ((asoc->str_of_pdapi == stseq->stream) &&
5785 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5786 				/*
5787 				 * If this is the one we were partially
5788 				 * delivering now then we no longer are.
5789 				 * Note this will change with the reassembly
5790 				 * re-write.
5791 				 */
5792 				asoc->fragmented_delivery_inprogress = 0;
5793 			}
5794 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5795 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5796 				if ((ctl->sinfo_stream == stseq->stream) &&
5797 				    (ctl->sinfo_ssn == stseq->sequence)) {
5798 					str_seq = (stseq->stream << 16) | stseq->sequence;
5799 					ctl->end_added = 1;
5800 					ctl->pdapi_aborted = 1;
5801 					sv = stcb->asoc.control_pdapi;
5802 					stcb->asoc.control_pdapi = ctl;
5803 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5804 					    stcb,
5805 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5806 					    (void *)&str_seq,
5807 					    SCTP_SO_NOT_LOCKED);
5808 					stcb->asoc.control_pdapi = sv;
5809 					break;
5810 				} else if ((ctl->sinfo_stream == stseq->stream) &&
5811 				    (compare_with_wrap(ctl->sinfo_ssn, stseq->sequence, MAX_SEQ))) {
5812 					/* We are past our victim SSN */
5813 					break;
5814 				}
5815 			}
5816 			strm = &asoc->strmin[stseq->stream];
5817 			if (compare_with_wrap(stseq->sequence,
5818 			    strm->last_sequence_delivered, MAX_SEQ)) {
5819 				/* Update the sequence number */
5820 				strm->last_sequence_delivered =
5821 				    stseq->sequence;
5822 			}
5823 			/* now kick the stream the new way */
5824 			/* sa_ignore NO_NULL_CHK */
5825 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5826 		}
5827 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5828 	}
5829 	if (TAILQ_FIRST(&asoc->reasmqueue)) {
5830 		/* now lets kick out and check for more fragmented delivery */
5831 		/* sa_ignore NO_NULL_CHK */
5832 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5833 	}
5834 }
5835