xref: /freebsd/sys/netinet/sctp_indata.c (revision a3cf0ef5a295c885c895fabfd56470c0d1db322d)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 
48 
49 /*
50  * NOTES: On the outbound side of things I need to check the sack timer to
51  * see if I should generate a sack into the chunk queue (if I have data to
52  * send that is and will be sending it .. for bundling.
53  *
54  * The callback in sctp_usrreq.c will get called when the socket is read from.
55  * This will cause sctp_service_queues() to get called on the top entry in
56  * the list.
57  */
58 
59 void
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61 {
62 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63 }
64 
65 /* Calculate what the rwnd would be */
66 uint32_t
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68 {
69 	uint32_t calc = 0;
70 
71 	/*
72 	 * This is really set wrong with respect to a 1-2-m socket. Since
73 	 * the sb_cc is the count that everyone as put up. When we re-write
74 	 * sctp_soreceive then we will fix this so that ONLY this
75 	 * associations data is taken into account.
76 	 */
77 	if (stcb->sctp_socket == NULL)
78 		return (calc);
79 
80 	if (stcb->asoc.sb_cc == 0 &&
81 	    asoc->size_on_reasm_queue == 0 &&
82 	    asoc->size_on_all_streams == 0) {
83 		/* Full rwnd granted */
84 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85 		return (calc);
86 	}
87 	/* get actual space */
88 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89 
90 	/*
91 	 * take out what has NOT been put on socket queue and we yet hold
92 	 * for putting up.
93 	 */
94 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 	    asoc->cnt_on_reasm_queue * MSIZE));
96 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 	    asoc->cnt_on_all_streams * MSIZE));
98 
99 	if (calc == 0) {
100 		/* out of space */
101 		return (calc);
102 	}
103 	/* what is the overhead of all these rwnd's */
104 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
105 	/*
106 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 	 * even it is 0. SWS engaged
108 	 */
109 	if (calc < stcb->asoc.my_rwnd_control_len) {
110 		calc = 1;
111 	}
112 	return (calc);
113 }
114 
115 
116 
117 /*
118  * Build out our readq entry based on the incoming packet.
119  */
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122     struct sctp_nets *net,
123     uint32_t tsn, uint32_t ppid,
124     uint32_t context, uint16_t stream_no,
125     uint16_t stream_seq, uint8_t flags,
126     struct mbuf *dm)
127 {
128 	struct sctp_queued_to_read *read_queue_e = NULL;
129 
130 	sctp_alloc_a_readq(stcb, read_queue_e);
131 	if (read_queue_e == NULL) {
132 		goto failed_build;
133 	}
134 	read_queue_e->sinfo_stream = stream_no;
135 	read_queue_e->sinfo_ssn = stream_seq;
136 	read_queue_e->sinfo_flags = (flags << 8);
137 	read_queue_e->sinfo_ppid = ppid;
138 	read_queue_e->sinfo_context = stcb->asoc.context;
139 	read_queue_e->sinfo_timetolive = 0;
140 	read_queue_e->sinfo_tsn = tsn;
141 	read_queue_e->sinfo_cumtsn = tsn;
142 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 	read_queue_e->whoFrom = net;
144 	read_queue_e->length = 0;
145 	atomic_add_int(&net->ref_count, 1);
146 	read_queue_e->data = dm;
147 	read_queue_e->spec_flags = 0;
148 	read_queue_e->tail_mbuf = NULL;
149 	read_queue_e->aux_data = NULL;
150 	read_queue_e->stcb = stcb;
151 	read_queue_e->port_from = stcb->rport;
152 	read_queue_e->do_not_ref_stcb = 0;
153 	read_queue_e->end_added = 0;
154 	read_queue_e->some_taken = 0;
155 	read_queue_e->pdapi_aborted = 0;
156 failed_build:
157 	return (read_queue_e);
158 }
159 
160 
161 /*
162  * Build out our readq entry based on the incoming packet.
163  */
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166     struct sctp_tmit_chunk *chk)
167 {
168 	struct sctp_queued_to_read *read_queue_e = NULL;
169 
170 	sctp_alloc_a_readq(stcb, read_queue_e);
171 	if (read_queue_e == NULL) {
172 		goto failed_build;
173 	}
174 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 	read_queue_e->sinfo_context = stcb->asoc.context;
179 	read_queue_e->sinfo_timetolive = 0;
180 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 	read_queue_e->whoFrom = chk->whoTo;
184 	read_queue_e->aux_data = NULL;
185 	read_queue_e->length = 0;
186 	atomic_add_int(&chk->whoTo->ref_count, 1);
187 	read_queue_e->data = chk->data;
188 	read_queue_e->tail_mbuf = NULL;
189 	read_queue_e->stcb = stcb;
190 	read_queue_e->port_from = stcb->rport;
191 	read_queue_e->spec_flags = 0;
192 	read_queue_e->do_not_ref_stcb = 0;
193 	read_queue_e->end_added = 0;
194 	read_queue_e->some_taken = 0;
195 	read_queue_e->pdapi_aborted = 0;
196 failed_build:
197 	return (read_queue_e);
198 }
199 
200 
201 struct mbuf *
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
203     struct sctp_sndrcvinfo *sinfo)
204 {
205 	struct sctp_sndrcvinfo *outinfo;
206 	struct cmsghdr *cmh;
207 	struct mbuf *ret;
208 	int len;
209 	int use_extended = 0;
210 
211 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
212 		/* user does not want the sndrcv ctl */
213 		return (NULL);
214 	}
215 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
216 		use_extended = 1;
217 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
218 	} else {
219 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
220 	}
221 
222 
223 	ret = sctp_get_mbuf_for_msg(len,
224 	    0, M_DONTWAIT, 1, MT_DATA);
225 
226 	if (ret == NULL) {
227 		/* No space */
228 		return (ret);
229 	}
230 	/* We need a CMSG header followed by the struct  */
231 	cmh = mtod(ret, struct cmsghdr *);
232 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
233 	cmh->cmsg_level = IPPROTO_SCTP;
234 	if (use_extended) {
235 		cmh->cmsg_type = SCTP_EXTRCV;
236 		cmh->cmsg_len = len;
237 		memcpy(outinfo, sinfo, len);
238 	} else {
239 		cmh->cmsg_type = SCTP_SNDRCV;
240 		cmh->cmsg_len = len;
241 		*outinfo = *sinfo;
242 	}
243 	SCTP_BUF_LEN(ret) = cmh->cmsg_len;
244 	return (ret);
245 }
246 
247 
248 char *
249 sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
250     int *control_len,
251     struct sctp_sndrcvinfo *sinfo)
252 {
253 	struct sctp_sndrcvinfo *outinfo;
254 	struct cmsghdr *cmh;
255 	char *buf;
256 	int len;
257 	int use_extended = 0;
258 
259 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
260 		/* user does not want the sndrcv ctl */
261 		return (NULL);
262 	}
263 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
264 		use_extended = 1;
265 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
266 	} else {
267 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 	}
269 	SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
270 	if (buf == NULL) {
271 		/* No space */
272 		return (buf);
273 	}
274 	/* We need a CMSG header followed by the struct  */
275 	cmh = (struct cmsghdr *)buf;
276 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
277 	cmh->cmsg_level = IPPROTO_SCTP;
278 	if (use_extended) {
279 		cmh->cmsg_type = SCTP_EXTRCV;
280 		cmh->cmsg_len = len;
281 		memcpy(outinfo, sinfo, len);
282 	} else {
283 		cmh->cmsg_type = SCTP_SNDRCV;
284 		cmh->cmsg_len = len;
285 		*outinfo = *sinfo;
286 	}
287 	*control_len = len;
288 	return (buf);
289 }
290 
291 static void
292 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
293 {
294 	uint32_t gap, i, cumackp1;
295 	int fnd = 0;
296 
297 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
298 		return;
299 	}
300 	cumackp1 = asoc->cumulative_tsn + 1;
301 	if (compare_with_wrap(cumackp1, tsn, MAX_TSN)) {
302 		/*
303 		 * this tsn is behind the cum ack and thus we don't need to
304 		 * worry about it being moved from one to the other.
305 		 */
306 		return;
307 	}
308 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
309 	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
310 		printf("gap:%x tsn:%x\n", gap, tsn);
311 		sctp_print_mapping_array(asoc);
312 #ifdef INVARIANTS
313 		panic("Things are really messed up now!!");
314 #endif
315 	}
316 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
317 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
318 	if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
319 		asoc->highest_tsn_inside_nr_map = tsn;
320 	}
321 	if (tsn == asoc->highest_tsn_inside_map) {
322 		/* We must back down to see what the new highest is */
323 		for (i = tsn - 1; (compare_with_wrap(i, asoc->mapping_array_base_tsn, MAX_TSN) ||
324 		    (i == asoc->mapping_array_base_tsn)); i--) {
325 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
326 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
327 				asoc->highest_tsn_inside_map = i;
328 				fnd = 1;
329 				break;
330 			}
331 		}
332 		if (!fnd) {
333 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
334 		}
335 	}
336 }
337 
338 
339 /*
340  * We are delivering currently from the reassembly queue. We must continue to
341  * deliver until we either: 1) run out of space. 2) run out of sequential
342  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
343  */
344 static void
345 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
346 {
347 	struct sctp_tmit_chunk *chk;
348 	uint16_t nxt_todel;
349 	uint16_t stream_no;
350 	int end = 0;
351 	int cntDel;
352 
353 	struct sctp_queued_to_read *control, *ctl, *ctlat;
354 
355 	if (stcb == NULL)
356 		return;
357 
358 	cntDel = stream_no = 0;
359 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
360 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
361 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
362 		/* socket above is long gone or going.. */
363 abandon:
364 		asoc->fragmented_delivery_inprogress = 0;
365 		chk = TAILQ_FIRST(&asoc->reasmqueue);
366 		while (chk) {
367 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
368 			asoc->size_on_reasm_queue -= chk->send_size;
369 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
370 			/*
371 			 * Lose the data pointer, since its in the socket
372 			 * buffer
373 			 */
374 			if (chk->data) {
375 				sctp_m_freem(chk->data);
376 				chk->data = NULL;
377 			}
378 			/* Now free the address and data */
379 			sctp_free_a_chunk(stcb, chk);
380 			/* sa_ignore FREED_MEMORY */
381 			chk = TAILQ_FIRST(&asoc->reasmqueue);
382 		}
383 		return;
384 	}
385 	SCTP_TCB_LOCK_ASSERT(stcb);
386 	do {
387 		chk = TAILQ_FIRST(&asoc->reasmqueue);
388 		if (chk == NULL) {
389 			return;
390 		}
391 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
392 			/* Can't deliver more :< */
393 			return;
394 		}
395 		stream_no = chk->rec.data.stream_number;
396 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
397 		if (nxt_todel != chk->rec.data.stream_seq &&
398 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
399 			/*
400 			 * Not the next sequence to deliver in its stream OR
401 			 * unordered
402 			 */
403 			return;
404 		}
405 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
406 
407 			control = sctp_build_readq_entry_chk(stcb, chk);
408 			if (control == NULL) {
409 				/* out of memory? */
410 				return;
411 			}
412 			/* save it off for our future deliveries */
413 			stcb->asoc.control_pdapi = control;
414 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
415 				end = 1;
416 			else
417 				end = 0;
418 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
419 			sctp_add_to_readq(stcb->sctp_ep,
420 			    stcb, control, &stcb->sctp_socket->so_rcv, end,
421 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
422 			cntDel++;
423 		} else {
424 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
425 				end = 1;
426 			else
427 				end = 0;
428 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
429 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
430 			    stcb->asoc.control_pdapi,
431 			    chk->data, end, chk->rec.data.TSN_seq,
432 			    &stcb->sctp_socket->so_rcv)) {
433 				/*
434 				 * something is very wrong, either
435 				 * control_pdapi is NULL, or the tail_mbuf
436 				 * is corrupt, or there is a EOM already on
437 				 * the mbuf chain.
438 				 */
439 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
440 					goto abandon;
441 				} else {
442 #ifdef INVARIANTS
443 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
444 						panic("This should not happen control_pdapi NULL?");
445 					}
446 					/* if we did not panic, it was a EOM */
447 					panic("Bad chunking ??");
448 #else
449 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
450 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
451 					}
452 					SCTP_PRINTF("Bad chunking ??\n");
453 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
454 
455 #endif
456 					goto abandon;
457 				}
458 			}
459 			cntDel++;
460 		}
461 		/* pull it we did it */
462 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
463 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
464 			asoc->fragmented_delivery_inprogress = 0;
465 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
466 				asoc->strmin[stream_no].last_sequence_delivered++;
467 			}
468 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
469 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
470 			}
471 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
472 			/*
473 			 * turn the flag back on since we just  delivered
474 			 * yet another one.
475 			 */
476 			asoc->fragmented_delivery_inprogress = 1;
477 		}
478 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
479 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
480 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
481 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
482 
483 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
484 		asoc->size_on_reasm_queue -= chk->send_size;
485 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
486 		/* free up the chk */
487 		chk->data = NULL;
488 		sctp_free_a_chunk(stcb, chk);
489 
490 		if (asoc->fragmented_delivery_inprogress == 0) {
491 			/*
492 			 * Now lets see if we can deliver the next one on
493 			 * the stream
494 			 */
495 			struct sctp_stream_in *strm;
496 
497 			strm = &asoc->strmin[stream_no];
498 			nxt_todel = strm->last_sequence_delivered + 1;
499 			ctl = TAILQ_FIRST(&strm->inqueue);
500 			if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
501 				while (ctl != NULL) {
502 					/* Deliver more if we can. */
503 					if (nxt_todel == ctl->sinfo_ssn) {
504 						ctlat = TAILQ_NEXT(ctl, next);
505 						TAILQ_REMOVE(&strm->inqueue, ctl, next);
506 						asoc->size_on_all_streams -= ctl->length;
507 						sctp_ucount_decr(asoc->cnt_on_all_streams);
508 						strm->last_sequence_delivered++;
509 						sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
510 						sctp_add_to_readq(stcb->sctp_ep, stcb,
511 						    ctl,
512 						    &stcb->sctp_socket->so_rcv, 1,
513 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
514 						ctl = ctlat;
515 					} else {
516 						break;
517 					}
518 					nxt_todel = strm->last_sequence_delivered + 1;
519 				}
520 			}
521 			break;
522 		}
523 		/* sa_ignore FREED_MEMORY */
524 		chk = TAILQ_FIRST(&asoc->reasmqueue);
525 	} while (chk);
526 }
527 
528 /*
529  * Queue the chunk either right into the socket buffer if it is the next one
530  * to go OR put it in the correct place in the delivery queue.  If we do
531  * append to the so_buf, keep doing so until we are out of order. One big
532  * question still remains, what to do when the socket buffer is FULL??
533  */
534 static void
535 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
536     struct sctp_queued_to_read *control, int *abort_flag)
537 {
538 	/*
539 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
540 	 * all the data in one stream this could happen quite rapidly. One
541 	 * could use the TSN to keep track of things, but this scheme breaks
542 	 * down in the other type of stream useage that could occur. Send a
543 	 * single msg to stream 0, send 4Billion messages to stream 1, now
544 	 * send a message to stream 0. You have a situation where the TSN
545 	 * has wrapped but not in the stream. Is this worth worrying about
546 	 * or should we just change our queue sort at the bottom to be by
547 	 * TSN.
548 	 *
549 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
550 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
551 	 * assignment this could happen... and I don't see how this would be
552 	 * a violation. So for now I am undecided an will leave the sort by
553 	 * SSN alone. Maybe a hybred approach is the answer
554 	 *
555 	 */
556 	struct sctp_stream_in *strm;
557 	struct sctp_queued_to_read *at;
558 	int queue_needed;
559 	uint16_t nxt_todel;
560 	struct mbuf *oper;
561 
562 	queue_needed = 1;
563 	asoc->size_on_all_streams += control->length;
564 	sctp_ucount_incr(asoc->cnt_on_all_streams);
565 	strm = &asoc->strmin[control->sinfo_stream];
566 	nxt_todel = strm->last_sequence_delivered + 1;
567 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
568 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
569 	}
570 	SCTPDBG(SCTP_DEBUG_INDATA1,
571 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
572 	    (uint32_t) control->sinfo_stream,
573 	    (uint32_t) strm->last_sequence_delivered,
574 	    (uint32_t) nxt_todel);
575 	if (compare_with_wrap(strm->last_sequence_delivered,
576 	    control->sinfo_ssn, MAX_SEQ) ||
577 	    (strm->last_sequence_delivered == control->sinfo_ssn)) {
578 		/* The incoming sseq is behind where we last delivered? */
579 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
580 		    control->sinfo_ssn, strm->last_sequence_delivered);
581 protocol_error:
582 		/*
583 		 * throw it in the stream so it gets cleaned up in
584 		 * association destruction
585 		 */
586 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
587 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
588 		    0, M_DONTWAIT, 1, MT_DATA);
589 		if (oper) {
590 			struct sctp_paramhdr *ph;
591 			uint32_t *ippp;
592 
593 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
594 			    (sizeof(uint32_t) * 3);
595 			ph = mtod(oper, struct sctp_paramhdr *);
596 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
597 			ph->param_length = htons(SCTP_BUF_LEN(oper));
598 			ippp = (uint32_t *) (ph + 1);
599 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
600 			ippp++;
601 			*ippp = control->sinfo_tsn;
602 			ippp++;
603 			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
604 		}
605 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
606 		sctp_abort_an_association(stcb->sctp_ep, stcb,
607 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
608 
609 		*abort_flag = 1;
610 		return;
611 
612 	}
613 	if (nxt_todel == control->sinfo_ssn) {
614 		/* can be delivered right away? */
615 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
616 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
617 		}
618 		/* EY it wont be queued if it could be delivered directly */
619 		queue_needed = 0;
620 		asoc->size_on_all_streams -= control->length;
621 		sctp_ucount_decr(asoc->cnt_on_all_streams);
622 		strm->last_sequence_delivered++;
623 
624 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
625 		sctp_add_to_readq(stcb->sctp_ep, stcb,
626 		    control,
627 		    &stcb->sctp_socket->so_rcv, 1,
628 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
629 		control = TAILQ_FIRST(&strm->inqueue);
630 		while (control != NULL) {
631 			/* all delivered */
632 			nxt_todel = strm->last_sequence_delivered + 1;
633 			if (nxt_todel == control->sinfo_ssn) {
634 				at = TAILQ_NEXT(control, next);
635 				TAILQ_REMOVE(&strm->inqueue, control, next);
636 				asoc->size_on_all_streams -= control->length;
637 				sctp_ucount_decr(asoc->cnt_on_all_streams);
638 				strm->last_sequence_delivered++;
639 				/*
640 				 * We ignore the return of deliver_data here
641 				 * since we always can hold the chunk on the
642 				 * d-queue. And we have a finite number that
643 				 * can be delivered from the strq.
644 				 */
645 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
646 					sctp_log_strm_del(control, NULL,
647 					    SCTP_STR_LOG_FROM_IMMED_DEL);
648 				}
649 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
650 				sctp_add_to_readq(stcb->sctp_ep, stcb,
651 				    control,
652 				    &stcb->sctp_socket->so_rcv, 1,
653 				    SCTP_READ_LOCK_NOT_HELD,
654 				    SCTP_SO_NOT_LOCKED);
655 				control = at;
656 				continue;
657 			}
658 			break;
659 		}
660 	}
661 	if (queue_needed) {
662 		/*
663 		 * Ok, we did not deliver this guy, find the correct place
664 		 * to put it on the queue.
665 		 */
666 		if ((compare_with_wrap(asoc->cumulative_tsn,
667 		    control->sinfo_tsn, MAX_TSN)) ||
668 		    (control->sinfo_tsn == asoc->cumulative_tsn)) {
669 			goto protocol_error;
670 		}
671 		if (TAILQ_EMPTY(&strm->inqueue)) {
672 			/* Empty queue */
673 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
674 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
675 			}
676 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
677 		} else {
678 			TAILQ_FOREACH(at, &strm->inqueue, next) {
679 				if (compare_with_wrap(at->sinfo_ssn,
680 				    control->sinfo_ssn, MAX_SEQ)) {
681 					/*
682 					 * one in queue is bigger than the
683 					 * new one, insert before this one
684 					 */
685 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
686 						sctp_log_strm_del(control, at,
687 						    SCTP_STR_LOG_FROM_INSERT_MD);
688 					}
689 					TAILQ_INSERT_BEFORE(at, control, next);
690 					break;
691 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
692 					/*
693 					 * Gak, He sent me a duplicate str
694 					 * seq number
695 					 */
696 					/*
697 					 * foo bar, I guess I will just free
698 					 * this new guy, should we abort
699 					 * too? FIX ME MAYBE? Or it COULD be
700 					 * that the SSN's have wrapped.
701 					 * Maybe I should compare to TSN
702 					 * somehow... sigh for now just blow
703 					 * away the chunk!
704 					 */
705 
706 					if (control->data)
707 						sctp_m_freem(control->data);
708 					control->data = NULL;
709 					asoc->size_on_all_streams -= control->length;
710 					sctp_ucount_decr(asoc->cnt_on_all_streams);
711 					if (control->whoFrom) {
712 						sctp_free_remote_addr(control->whoFrom);
713 						control->whoFrom = NULL;
714 					}
715 					sctp_free_a_readq(stcb, control);
716 					return;
717 				} else {
718 					if (TAILQ_NEXT(at, next) == NULL) {
719 						/*
720 						 * We are at the end, insert
721 						 * it after this one
722 						 */
723 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
724 							sctp_log_strm_del(control, at,
725 							    SCTP_STR_LOG_FROM_INSERT_TL);
726 						}
727 						TAILQ_INSERT_AFTER(&strm->inqueue,
728 						    at, control, next);
729 						break;
730 					}
731 				}
732 			}
733 		}
734 	}
735 }
736 
737 /*
738  * Returns two things: You get the total size of the deliverable parts of the
739  * first fragmented message on the reassembly queue. And you get a 1 back if
740  * all of the message is ready or a 0 back if the message is still incomplete
741  */
742 static int
743 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
744 {
745 	struct sctp_tmit_chunk *chk;
746 	uint32_t tsn;
747 
748 	*t_size = 0;
749 	chk = TAILQ_FIRST(&asoc->reasmqueue);
750 	if (chk == NULL) {
751 		/* nothing on the queue */
752 		return (0);
753 	}
754 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
755 		/* Not a first on the queue */
756 		return (0);
757 	}
758 	tsn = chk->rec.data.TSN_seq;
759 	while (chk) {
760 		if (tsn != chk->rec.data.TSN_seq) {
761 			return (0);
762 		}
763 		*t_size += chk->send_size;
764 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
765 			return (1);
766 		}
767 		tsn++;
768 		chk = TAILQ_NEXT(chk, sctp_next);
769 	}
770 	return (0);
771 }
772 
773 static void
774 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
775 {
776 	struct sctp_tmit_chunk *chk;
777 	uint16_t nxt_todel;
778 	uint32_t tsize, pd_point;
779 
780 doit_again:
781 	chk = TAILQ_FIRST(&asoc->reasmqueue);
782 	if (chk == NULL) {
783 		/* Huh? */
784 		asoc->size_on_reasm_queue = 0;
785 		asoc->cnt_on_reasm_queue = 0;
786 		return;
787 	}
788 	if (asoc->fragmented_delivery_inprogress == 0) {
789 		nxt_todel =
790 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
791 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
792 		    (nxt_todel == chk->rec.data.stream_seq ||
793 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
794 			/*
795 			 * Yep the first one is here and its ok to deliver
796 			 * but should we?
797 			 */
798 			if (stcb->sctp_socket) {
799 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
800 				    stcb->sctp_ep->partial_delivery_point);
801 			} else {
802 				pd_point = stcb->sctp_ep->partial_delivery_point;
803 			}
804 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
805 
806 				/*
807 				 * Yes, we setup to start reception, by
808 				 * backing down the TSN just in case we
809 				 * can't deliver. If we
810 				 */
811 				asoc->fragmented_delivery_inprogress = 1;
812 				asoc->tsn_last_delivered =
813 				    chk->rec.data.TSN_seq - 1;
814 				asoc->str_of_pdapi =
815 				    chk->rec.data.stream_number;
816 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
817 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
818 				asoc->fragment_flags = chk->rec.data.rcv_flags;
819 				sctp_service_reassembly(stcb, asoc);
820 			}
821 		}
822 	} else {
823 		/*
824 		 * Service re-assembly will deliver stream data queued at
825 		 * the end of fragmented delivery.. but it wont know to go
826 		 * back and call itself again... we do that here with the
827 		 * got doit_again
828 		 */
829 		sctp_service_reassembly(stcb, asoc);
830 		if (asoc->fragmented_delivery_inprogress == 0) {
831 			/*
832 			 * finished our Fragmented delivery, could be more
833 			 * waiting?
834 			 */
835 			goto doit_again;
836 		}
837 	}
838 }
839 
840 /*
841  * Dump onto the re-assembly queue, in its proper place. After dumping on the
842  * queue, see if anthing can be delivered. If so pull it off (or as much as
843  * we can. If we run out of space then we must dump what we can and set the
844  * appropriate flag to say we queued what we could.
845  */
846 static void
847 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
848     struct sctp_tmit_chunk *chk, int *abort_flag)
849 {
850 	struct mbuf *oper;
851 	uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
852 	u_char last_flags;
853 	struct sctp_tmit_chunk *at, *prev, *next;
854 
855 	prev = next = NULL;
856 	cum_ackp1 = asoc->tsn_last_delivered + 1;
857 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
858 		/* This is the first one on the queue */
859 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
860 		/*
861 		 * we do not check for delivery of anything when only one
862 		 * fragment is here
863 		 */
864 		asoc->size_on_reasm_queue = chk->send_size;
865 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
866 		if (chk->rec.data.TSN_seq == cum_ackp1) {
867 			if (asoc->fragmented_delivery_inprogress == 0 &&
868 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
869 			    SCTP_DATA_FIRST_FRAG) {
870 				/*
871 				 * An empty queue, no delivery inprogress,
872 				 * we hit the next one and it does NOT have
873 				 * a FIRST fragment mark.
874 				 */
875 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
876 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
877 				    0, M_DONTWAIT, 1, MT_DATA);
878 
879 				if (oper) {
880 					struct sctp_paramhdr *ph;
881 					uint32_t *ippp;
882 
883 					SCTP_BUF_LEN(oper) =
884 					    sizeof(struct sctp_paramhdr) +
885 					    (sizeof(uint32_t) * 3);
886 					ph = mtod(oper, struct sctp_paramhdr *);
887 					ph->param_type =
888 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
889 					ph->param_length = htons(SCTP_BUF_LEN(oper));
890 					ippp = (uint32_t *) (ph + 1);
891 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
892 					ippp++;
893 					*ippp = chk->rec.data.TSN_seq;
894 					ippp++;
895 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
896 
897 				}
898 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
899 				sctp_abort_an_association(stcb->sctp_ep, stcb,
900 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
901 				*abort_flag = 1;
902 			} else if (asoc->fragmented_delivery_inprogress &&
903 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
904 				/*
905 				 * We are doing a partial delivery and the
906 				 * NEXT chunk MUST be either the LAST or
907 				 * MIDDLE fragment NOT a FIRST
908 				 */
909 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
910 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
911 				    0, M_DONTWAIT, 1, MT_DATA);
912 				if (oper) {
913 					struct sctp_paramhdr *ph;
914 					uint32_t *ippp;
915 
916 					SCTP_BUF_LEN(oper) =
917 					    sizeof(struct sctp_paramhdr) +
918 					    (3 * sizeof(uint32_t));
919 					ph = mtod(oper, struct sctp_paramhdr *);
920 					ph->param_type =
921 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
922 					ph->param_length = htons(SCTP_BUF_LEN(oper));
923 					ippp = (uint32_t *) (ph + 1);
924 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
925 					ippp++;
926 					*ippp = chk->rec.data.TSN_seq;
927 					ippp++;
928 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
929 				}
930 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
931 				sctp_abort_an_association(stcb->sctp_ep, stcb,
932 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
933 				*abort_flag = 1;
934 			} else if (asoc->fragmented_delivery_inprogress) {
935 				/*
936 				 * Here we are ok with a MIDDLE or LAST
937 				 * piece
938 				 */
939 				if (chk->rec.data.stream_number !=
940 				    asoc->str_of_pdapi) {
941 					/* Got to be the right STR No */
942 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
943 					    chk->rec.data.stream_number,
944 					    asoc->str_of_pdapi);
945 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
946 					    0, M_DONTWAIT, 1, MT_DATA);
947 					if (oper) {
948 						struct sctp_paramhdr *ph;
949 						uint32_t *ippp;
950 
951 						SCTP_BUF_LEN(oper) =
952 						    sizeof(struct sctp_paramhdr) +
953 						    (sizeof(uint32_t) * 3);
954 						ph = mtod(oper,
955 						    struct sctp_paramhdr *);
956 						ph->param_type =
957 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
958 						ph->param_length =
959 						    htons(SCTP_BUF_LEN(oper));
960 						ippp = (uint32_t *) (ph + 1);
961 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
962 						ippp++;
963 						*ippp = chk->rec.data.TSN_seq;
964 						ippp++;
965 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
966 					}
967 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
968 					sctp_abort_an_association(stcb->sctp_ep,
969 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
970 					*abort_flag = 1;
971 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
972 					    SCTP_DATA_UNORDERED &&
973 				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
974 					/* Got to be the right STR Seq */
975 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
976 					    chk->rec.data.stream_seq,
977 					    asoc->ssn_of_pdapi);
978 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
979 					    0, M_DONTWAIT, 1, MT_DATA);
980 					if (oper) {
981 						struct sctp_paramhdr *ph;
982 						uint32_t *ippp;
983 
984 						SCTP_BUF_LEN(oper) =
985 						    sizeof(struct sctp_paramhdr) +
986 						    (3 * sizeof(uint32_t));
987 						ph = mtod(oper,
988 						    struct sctp_paramhdr *);
989 						ph->param_type =
990 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
991 						ph->param_length =
992 						    htons(SCTP_BUF_LEN(oper));
993 						ippp = (uint32_t *) (ph + 1);
994 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
995 						ippp++;
996 						*ippp = chk->rec.data.TSN_seq;
997 						ippp++;
998 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
999 
1000 					}
1001 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
1002 					sctp_abort_an_association(stcb->sctp_ep,
1003 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1004 					*abort_flag = 1;
1005 				}
1006 			}
1007 		}
1008 		return;
1009 	}
1010 	/* Find its place */
1011 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1012 		if (compare_with_wrap(at->rec.data.TSN_seq,
1013 		    chk->rec.data.TSN_seq, MAX_TSN)) {
1014 			/*
1015 			 * one in queue is bigger than the new one, insert
1016 			 * before this one
1017 			 */
1018 			/* A check */
1019 			asoc->size_on_reasm_queue += chk->send_size;
1020 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1021 			next = at;
1022 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1023 			break;
1024 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1025 			/* Gak, He sent me a duplicate str seq number */
1026 			/*
1027 			 * foo bar, I guess I will just free this new guy,
1028 			 * should we abort too? FIX ME MAYBE? Or it COULD be
1029 			 * that the SSN's have wrapped. Maybe I should
1030 			 * compare to TSN somehow... sigh for now just blow
1031 			 * away the chunk!
1032 			 */
1033 			if (chk->data) {
1034 				sctp_m_freem(chk->data);
1035 				chk->data = NULL;
1036 			}
1037 			sctp_free_a_chunk(stcb, chk);
1038 			return;
1039 		} else {
1040 			last_flags = at->rec.data.rcv_flags;
1041 			last_tsn = at->rec.data.TSN_seq;
1042 			prev = at;
1043 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1044 				/*
1045 				 * We are at the end, insert it after this
1046 				 * one
1047 				 */
1048 				/* check it first */
1049 				asoc->size_on_reasm_queue += chk->send_size;
1050 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1051 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1052 				break;
1053 			}
1054 		}
1055 	}
1056 	/* Now the audits */
1057 	if (prev) {
1058 		prev_tsn = chk->rec.data.TSN_seq - 1;
1059 		if (prev_tsn == prev->rec.data.TSN_seq) {
1060 			/*
1061 			 * Ok the one I am dropping onto the end is the
1062 			 * NEXT. A bit of valdiation here.
1063 			 */
1064 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1065 			    SCTP_DATA_FIRST_FRAG ||
1066 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1067 			    SCTP_DATA_MIDDLE_FRAG) {
1068 				/*
1069 				 * Insert chk MUST be a MIDDLE or LAST
1070 				 * fragment
1071 				 */
1072 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1073 				    SCTP_DATA_FIRST_FRAG) {
1074 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1075 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1076 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1077 					    0, M_DONTWAIT, 1, MT_DATA);
1078 					if (oper) {
1079 						struct sctp_paramhdr *ph;
1080 						uint32_t *ippp;
1081 
1082 						SCTP_BUF_LEN(oper) =
1083 						    sizeof(struct sctp_paramhdr) +
1084 						    (3 * sizeof(uint32_t));
1085 						ph = mtod(oper,
1086 						    struct sctp_paramhdr *);
1087 						ph->param_type =
1088 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1089 						ph->param_length =
1090 						    htons(SCTP_BUF_LEN(oper));
1091 						ippp = (uint32_t *) (ph + 1);
1092 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1093 						ippp++;
1094 						*ippp = chk->rec.data.TSN_seq;
1095 						ippp++;
1096 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1097 
1098 					}
1099 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1100 					sctp_abort_an_association(stcb->sctp_ep,
1101 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1102 					*abort_flag = 1;
1103 					return;
1104 				}
1105 				if (chk->rec.data.stream_number !=
1106 				    prev->rec.data.stream_number) {
1107 					/*
1108 					 * Huh, need the correct STR here,
1109 					 * they must be the same.
1110 					 */
1111 					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1112 					    chk->rec.data.stream_number,
1113 					    prev->rec.data.stream_number);
1114 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1115 					    0, M_DONTWAIT, 1, MT_DATA);
1116 					if (oper) {
1117 						struct sctp_paramhdr *ph;
1118 						uint32_t *ippp;
1119 
1120 						SCTP_BUF_LEN(oper) =
1121 						    sizeof(struct sctp_paramhdr) +
1122 						    (3 * sizeof(uint32_t));
1123 						ph = mtod(oper,
1124 						    struct sctp_paramhdr *);
1125 						ph->param_type =
1126 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1127 						ph->param_length =
1128 						    htons(SCTP_BUF_LEN(oper));
1129 						ippp = (uint32_t *) (ph + 1);
1130 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1131 						ippp++;
1132 						*ippp = chk->rec.data.TSN_seq;
1133 						ippp++;
1134 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1135 					}
1136 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1137 					sctp_abort_an_association(stcb->sctp_ep,
1138 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1139 
1140 					*abort_flag = 1;
1141 					return;
1142 				}
1143 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1144 				    chk->rec.data.stream_seq !=
1145 				    prev->rec.data.stream_seq) {
1146 					/*
1147 					 * Huh, need the correct STR here,
1148 					 * they must be the same.
1149 					 */
1150 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1151 					    chk->rec.data.stream_seq,
1152 					    prev->rec.data.stream_seq);
1153 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1154 					    0, M_DONTWAIT, 1, MT_DATA);
1155 					if (oper) {
1156 						struct sctp_paramhdr *ph;
1157 						uint32_t *ippp;
1158 
1159 						SCTP_BUF_LEN(oper) =
1160 						    sizeof(struct sctp_paramhdr) +
1161 						    (3 * sizeof(uint32_t));
1162 						ph = mtod(oper,
1163 						    struct sctp_paramhdr *);
1164 						ph->param_type =
1165 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1166 						ph->param_length =
1167 						    htons(SCTP_BUF_LEN(oper));
1168 						ippp = (uint32_t *) (ph + 1);
1169 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1170 						ippp++;
1171 						*ippp = chk->rec.data.TSN_seq;
1172 						ippp++;
1173 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1174 					}
1175 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1176 					sctp_abort_an_association(stcb->sctp_ep,
1177 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1178 
1179 					*abort_flag = 1;
1180 					return;
1181 				}
1182 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1183 			    SCTP_DATA_LAST_FRAG) {
1184 				/* Insert chk MUST be a FIRST */
1185 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1186 				    SCTP_DATA_FIRST_FRAG) {
1187 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1188 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1189 					    0, M_DONTWAIT, 1, MT_DATA);
1190 					if (oper) {
1191 						struct sctp_paramhdr *ph;
1192 						uint32_t *ippp;
1193 
1194 						SCTP_BUF_LEN(oper) =
1195 						    sizeof(struct sctp_paramhdr) +
1196 						    (3 * sizeof(uint32_t));
1197 						ph = mtod(oper,
1198 						    struct sctp_paramhdr *);
1199 						ph->param_type =
1200 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1201 						ph->param_length =
1202 						    htons(SCTP_BUF_LEN(oper));
1203 						ippp = (uint32_t *) (ph + 1);
1204 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1205 						ippp++;
1206 						*ippp = chk->rec.data.TSN_seq;
1207 						ippp++;
1208 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1209 
1210 					}
1211 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1212 					sctp_abort_an_association(stcb->sctp_ep,
1213 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1214 
1215 					*abort_flag = 1;
1216 					return;
1217 				}
1218 			}
1219 		}
1220 	}
1221 	if (next) {
1222 		post_tsn = chk->rec.data.TSN_seq + 1;
1223 		if (post_tsn == next->rec.data.TSN_seq) {
1224 			/*
1225 			 * Ok the one I am inserting ahead of is my NEXT
1226 			 * one. A bit of valdiation here.
1227 			 */
1228 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1229 				/* Insert chk MUST be a last fragment */
1230 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1231 				    != SCTP_DATA_LAST_FRAG) {
1232 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1233 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1234 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1235 					    0, M_DONTWAIT, 1, MT_DATA);
1236 					if (oper) {
1237 						struct sctp_paramhdr *ph;
1238 						uint32_t *ippp;
1239 
1240 						SCTP_BUF_LEN(oper) =
1241 						    sizeof(struct sctp_paramhdr) +
1242 						    (3 * sizeof(uint32_t));
1243 						ph = mtod(oper,
1244 						    struct sctp_paramhdr *);
1245 						ph->param_type =
1246 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1247 						ph->param_length =
1248 						    htons(SCTP_BUF_LEN(oper));
1249 						ippp = (uint32_t *) (ph + 1);
1250 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1251 						ippp++;
1252 						*ippp = chk->rec.data.TSN_seq;
1253 						ippp++;
1254 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1255 					}
1256 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1257 					sctp_abort_an_association(stcb->sctp_ep,
1258 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1259 
1260 					*abort_flag = 1;
1261 					return;
1262 				}
1263 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1264 				    SCTP_DATA_MIDDLE_FRAG ||
1265 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1266 			    SCTP_DATA_LAST_FRAG) {
1267 				/*
1268 				 * Insert chk CAN be MIDDLE or FIRST NOT
1269 				 * LAST
1270 				 */
1271 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1272 				    SCTP_DATA_LAST_FRAG) {
1273 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1274 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1275 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1276 					    0, M_DONTWAIT, 1, MT_DATA);
1277 					if (oper) {
1278 						struct sctp_paramhdr *ph;
1279 						uint32_t *ippp;
1280 
1281 						SCTP_BUF_LEN(oper) =
1282 						    sizeof(struct sctp_paramhdr) +
1283 						    (3 * sizeof(uint32_t));
1284 						ph = mtod(oper,
1285 						    struct sctp_paramhdr *);
1286 						ph->param_type =
1287 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1288 						ph->param_length =
1289 						    htons(SCTP_BUF_LEN(oper));
1290 						ippp = (uint32_t *) (ph + 1);
1291 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1292 						ippp++;
1293 						*ippp = chk->rec.data.TSN_seq;
1294 						ippp++;
1295 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1296 
1297 					}
1298 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1299 					sctp_abort_an_association(stcb->sctp_ep,
1300 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1301 
1302 					*abort_flag = 1;
1303 					return;
1304 				}
1305 				if (chk->rec.data.stream_number !=
1306 				    next->rec.data.stream_number) {
1307 					/*
1308 					 * Huh, need the correct STR here,
1309 					 * they must be the same.
1310 					 */
1311 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1312 					    chk->rec.data.stream_number,
1313 					    next->rec.data.stream_number);
1314 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1315 					    0, M_DONTWAIT, 1, MT_DATA);
1316 					if (oper) {
1317 						struct sctp_paramhdr *ph;
1318 						uint32_t *ippp;
1319 
1320 						SCTP_BUF_LEN(oper) =
1321 						    sizeof(struct sctp_paramhdr) +
1322 						    (3 * sizeof(uint32_t));
1323 						ph = mtod(oper,
1324 						    struct sctp_paramhdr *);
1325 						ph->param_type =
1326 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1327 						ph->param_length =
1328 						    htons(SCTP_BUF_LEN(oper));
1329 						ippp = (uint32_t *) (ph + 1);
1330 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1331 						ippp++;
1332 						*ippp = chk->rec.data.TSN_seq;
1333 						ippp++;
1334 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1335 
1336 					}
1337 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1338 					sctp_abort_an_association(stcb->sctp_ep,
1339 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1340 
1341 					*abort_flag = 1;
1342 					return;
1343 				}
1344 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1345 				    chk->rec.data.stream_seq !=
1346 				    next->rec.data.stream_seq) {
1347 					/*
1348 					 * Huh, need the correct STR here,
1349 					 * they must be the same.
1350 					 */
1351 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1352 					    chk->rec.data.stream_seq,
1353 					    next->rec.data.stream_seq);
1354 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1355 					    0, M_DONTWAIT, 1, MT_DATA);
1356 					if (oper) {
1357 						struct sctp_paramhdr *ph;
1358 						uint32_t *ippp;
1359 
1360 						SCTP_BUF_LEN(oper) =
1361 						    sizeof(struct sctp_paramhdr) +
1362 						    (3 * sizeof(uint32_t));
1363 						ph = mtod(oper,
1364 						    struct sctp_paramhdr *);
1365 						ph->param_type =
1366 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1367 						ph->param_length =
1368 						    htons(SCTP_BUF_LEN(oper));
1369 						ippp = (uint32_t *) (ph + 1);
1370 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1371 						ippp++;
1372 						*ippp = chk->rec.data.TSN_seq;
1373 						ippp++;
1374 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1375 					}
1376 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1377 					sctp_abort_an_association(stcb->sctp_ep,
1378 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1379 
1380 					*abort_flag = 1;
1381 					return;
1382 				}
1383 			}
1384 		}
1385 	}
1386 	/* Do we need to do some delivery? check */
1387 	sctp_deliver_reasm_check(stcb, asoc);
1388 }
1389 
1390 /*
1391  * This is an unfortunate routine. It checks to make sure a evil guy is not
1392  * stuffing us full of bad packet fragments. A broken peer could also do this
1393  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1394  * :< more cycles.
1395  */
1396 static int
1397 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1398     uint32_t TSN_seq)
1399 {
1400 	struct sctp_tmit_chunk *at;
1401 	uint32_t tsn_est;
1402 
1403 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1404 		if (compare_with_wrap(TSN_seq,
1405 		    at->rec.data.TSN_seq, MAX_TSN)) {
1406 			/* is it one bigger? */
1407 			tsn_est = at->rec.data.TSN_seq + 1;
1408 			if (tsn_est == TSN_seq) {
1409 				/* yep. It better be a last then */
1410 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1411 				    SCTP_DATA_LAST_FRAG) {
1412 					/*
1413 					 * Ok this guy belongs next to a guy
1414 					 * that is NOT last, it should be a
1415 					 * middle/last, not a complete
1416 					 * chunk.
1417 					 */
1418 					return (1);
1419 				} else {
1420 					/*
1421 					 * This guy is ok since its a LAST
1422 					 * and the new chunk is a fully
1423 					 * self- contained one.
1424 					 */
1425 					return (0);
1426 				}
1427 			}
1428 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1429 			/* Software error since I have a dup? */
1430 			return (1);
1431 		} else {
1432 			/*
1433 			 * Ok, 'at' is larger than new chunk but does it
1434 			 * need to be right before it.
1435 			 */
1436 			tsn_est = TSN_seq + 1;
1437 			if (tsn_est == at->rec.data.TSN_seq) {
1438 				/* Yep, It better be a first */
1439 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1440 				    SCTP_DATA_FIRST_FRAG) {
1441 					return (1);
1442 				} else {
1443 					return (0);
1444 				}
1445 			}
1446 		}
1447 	}
1448 	return (0);
1449 }
1450 
1451 
1452 static int
1453 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1454     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1455     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1456     int *break_flag, int last_chunk)
1457 {
1458 	/* Process a data chunk */
1459 	/* struct sctp_tmit_chunk *chk; */
1460 	struct sctp_tmit_chunk *chk;
1461 	uint32_t tsn, gap;
1462 	struct mbuf *dmbuf;
1463 	int indx, the_len;
1464 	int need_reasm_check = 0;
1465 	uint16_t strmno, strmseq;
1466 	struct mbuf *oper;
1467 	struct sctp_queued_to_read *control;
1468 	int ordered;
1469 	uint32_t protocol_id;
1470 	uint8_t chunk_flags;
1471 	struct sctp_stream_reset_list *liste;
1472 
1473 	chk = NULL;
1474 	tsn = ntohl(ch->dp.tsn);
1475 	chunk_flags = ch->ch.chunk_flags;
1476 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1477 		asoc->send_sack = 1;
1478 	}
1479 	protocol_id = ch->dp.protocol_id;
1480 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1481 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1482 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1483 	}
1484 	if (stcb == NULL) {
1485 		return (0);
1486 	}
1487 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1488 	if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1489 	    asoc->cumulative_tsn == tsn) {
1490 		/* It is a duplicate */
1491 		SCTP_STAT_INCR(sctps_recvdupdata);
1492 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1493 			/* Record a dup for the next outbound sack */
1494 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1495 			asoc->numduptsns++;
1496 		}
1497 		asoc->send_sack = 1;
1498 		return (0);
1499 	}
1500 	/* Calculate the number of TSN's between the base and this TSN */
1501 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1502 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1503 		/* Can't hold the bit in the mapping at max array, toss it */
1504 		return (0);
1505 	}
1506 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1507 		SCTP_TCB_LOCK_ASSERT(stcb);
1508 		if (sctp_expand_mapping_array(asoc, gap)) {
1509 			/* Can't expand, drop it */
1510 			return (0);
1511 		}
1512 	}
1513 	if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1514 		*high_tsn = tsn;
1515 	}
1516 	/* See if we have received this one already */
1517 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1518 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1519 		SCTP_STAT_INCR(sctps_recvdupdata);
1520 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1521 			/* Record a dup for the next outbound sack */
1522 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1523 			asoc->numduptsns++;
1524 		}
1525 		asoc->send_sack = 1;
1526 		return (0);
1527 	}
1528 	/*
1529 	 * Check to see about the GONE flag, duplicates would cause a sack
1530 	 * to be sent up above
1531 	 */
1532 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1533 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1534 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1535 	    ) {
1536 		/*
1537 		 * wait a minute, this guy is gone, there is no longer a
1538 		 * receiver. Send peer an ABORT!
1539 		 */
1540 		struct mbuf *op_err;
1541 
1542 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1543 		sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1544 		*abort_flag = 1;
1545 		return (0);
1546 	}
1547 	/*
1548 	 * Now before going further we see if there is room. If NOT then we
1549 	 * MAY let one through only IF this TSN is the one we are waiting
1550 	 * for on a partial delivery API.
1551 	 */
1552 
1553 	/* now do the tests */
1554 	if (((asoc->cnt_on_all_streams +
1555 	    asoc->cnt_on_reasm_queue +
1556 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1557 	    (((int)asoc->my_rwnd) <= 0)) {
1558 		/*
1559 		 * When we have NO room in the rwnd we check to make sure
1560 		 * the reader is doing its job...
1561 		 */
1562 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1563 			/* some to read, wake-up */
1564 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1565 			struct socket *so;
1566 
1567 			so = SCTP_INP_SO(stcb->sctp_ep);
1568 			atomic_add_int(&stcb->asoc.refcnt, 1);
1569 			SCTP_TCB_UNLOCK(stcb);
1570 			SCTP_SOCKET_LOCK(so, 1);
1571 			SCTP_TCB_LOCK(stcb);
1572 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1573 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1574 				/* assoc was freed while we were unlocked */
1575 				SCTP_SOCKET_UNLOCK(so, 1);
1576 				return (0);
1577 			}
1578 #endif
1579 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1580 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1581 			SCTP_SOCKET_UNLOCK(so, 1);
1582 #endif
1583 		}
1584 		/* now is it in the mapping array of what we have accepted? */
1585 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
1586 		    compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1587 			/* Nope not in the valid range dump it */
1588 			sctp_set_rwnd(stcb, asoc);
1589 			if ((asoc->cnt_on_all_streams +
1590 			    asoc->cnt_on_reasm_queue +
1591 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1592 				SCTP_STAT_INCR(sctps_datadropchklmt);
1593 			} else {
1594 				SCTP_STAT_INCR(sctps_datadroprwnd);
1595 			}
1596 			indx = *break_flag;
1597 			*break_flag = 1;
1598 			return (0);
1599 		}
1600 	}
1601 	strmno = ntohs(ch->dp.stream_id);
1602 	if (strmno >= asoc->streamincnt) {
1603 		struct sctp_paramhdr *phdr;
1604 		struct mbuf *mb;
1605 
1606 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1607 		    0, M_DONTWAIT, 1, MT_DATA);
1608 		if (mb != NULL) {
1609 			/* add some space up front so prepend will work well */
1610 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1611 			phdr = mtod(mb, struct sctp_paramhdr *);
1612 			/*
1613 			 * Error causes are just param's and this one has
1614 			 * two back to back phdr, one with the error type
1615 			 * and size, the other with the streamid and a rsvd
1616 			 */
1617 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1618 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1619 			phdr->param_length =
1620 			    htons(sizeof(struct sctp_paramhdr) * 2);
1621 			phdr++;
1622 			/* We insert the stream in the type field */
1623 			phdr->param_type = ch->dp.stream_id;
1624 			/* And set the length to 0 for the rsvd field */
1625 			phdr->param_length = 0;
1626 			sctp_queue_op_err(stcb, mb);
1627 		}
1628 		SCTP_STAT_INCR(sctps_badsid);
1629 		SCTP_TCB_LOCK_ASSERT(stcb);
1630 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1631 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1632 			asoc->highest_tsn_inside_nr_map = tsn;
1633 		}
1634 		if (tsn == (asoc->cumulative_tsn + 1)) {
1635 			/* Update cum-ack */
1636 			asoc->cumulative_tsn = tsn;
1637 		}
1638 		return (0);
1639 	}
1640 	/*
1641 	 * Before we continue lets validate that we are not being fooled by
1642 	 * an evil attacker. We can only have 4k chunks based on our TSN
1643 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1644 	 * way our stream sequence numbers could have wrapped. We of course
1645 	 * only validate the FIRST fragment so the bit must be set.
1646 	 */
1647 	strmseq = ntohs(ch->dp.stream_sequence);
1648 #ifdef SCTP_ASOCLOG_OF_TSNS
1649 	SCTP_TCB_LOCK_ASSERT(stcb);
1650 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1651 		asoc->tsn_in_at = 0;
1652 		asoc->tsn_in_wrapped = 1;
1653 	}
1654 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1655 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1656 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1657 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1658 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1659 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1660 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1661 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1662 	asoc->tsn_in_at++;
1663 #endif
1664 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1665 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1666 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1667 	    (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1668 	    strmseq, MAX_SEQ) ||
1669 	    asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1670 		/* The incoming sseq is behind where we last delivered? */
1671 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1672 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1673 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1674 		    0, M_DONTWAIT, 1, MT_DATA);
1675 		if (oper) {
1676 			struct sctp_paramhdr *ph;
1677 			uint32_t *ippp;
1678 
1679 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1680 			    (3 * sizeof(uint32_t));
1681 			ph = mtod(oper, struct sctp_paramhdr *);
1682 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1683 			ph->param_length = htons(SCTP_BUF_LEN(oper));
1684 			ippp = (uint32_t *) (ph + 1);
1685 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1686 			ippp++;
1687 			*ippp = tsn;
1688 			ippp++;
1689 			*ippp = ((strmno << 16) | strmseq);
1690 
1691 		}
1692 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1693 		sctp_abort_an_association(stcb->sctp_ep, stcb,
1694 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1695 		*abort_flag = 1;
1696 		return (0);
1697 	}
1698 	/************************************
1699 	 * From here down we may find ch-> invalid
1700 	 * so its a good idea NOT to use it.
1701 	 *************************************/
1702 
1703 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1704 	if (last_chunk == 0) {
1705 		dmbuf = SCTP_M_COPYM(*m,
1706 		    (offset + sizeof(struct sctp_data_chunk)),
1707 		    the_len, M_DONTWAIT);
1708 #ifdef SCTP_MBUF_LOGGING
1709 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1710 			struct mbuf *mat;
1711 
1712 			mat = dmbuf;
1713 			while (mat) {
1714 				if (SCTP_BUF_IS_EXTENDED(mat)) {
1715 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1716 				}
1717 				mat = SCTP_BUF_NEXT(mat);
1718 			}
1719 		}
1720 #endif
1721 	} else {
1722 		/* We can steal the last chunk */
1723 		int l_len;
1724 
1725 		dmbuf = *m;
1726 		/* lop off the top part */
1727 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1728 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1729 			l_len = SCTP_BUF_LEN(dmbuf);
1730 		} else {
1731 			/*
1732 			 * need to count up the size hopefully does not hit
1733 			 * this to often :-0
1734 			 */
1735 			struct mbuf *lat;
1736 
1737 			l_len = 0;
1738 			lat = dmbuf;
1739 			while (lat) {
1740 				l_len += SCTP_BUF_LEN(lat);
1741 				lat = SCTP_BUF_NEXT(lat);
1742 			}
1743 		}
1744 		if (l_len > the_len) {
1745 			/* Trim the end round bytes off  too */
1746 			m_adj(dmbuf, -(l_len - the_len));
1747 		}
1748 	}
1749 	if (dmbuf == NULL) {
1750 		SCTP_STAT_INCR(sctps_nomem);
1751 		return (0);
1752 	}
1753 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1754 	    asoc->fragmented_delivery_inprogress == 0 &&
1755 	    TAILQ_EMPTY(&asoc->resetHead) &&
1756 	    ((ordered == 0) ||
1757 	    ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1758 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1759 		/* Candidate for express delivery */
1760 		/*
1761 		 * Its not fragmented, No PD-API is up, Nothing in the
1762 		 * delivery queue, Its un-ordered OR ordered and the next to
1763 		 * deliver AND nothing else is stuck on the stream queue,
1764 		 * And there is room for it in the socket buffer. Lets just
1765 		 * stuff it up the buffer....
1766 		 */
1767 
1768 		/* It would be nice to avoid this copy if we could :< */
1769 		sctp_alloc_a_readq(stcb, control);
1770 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1771 		    protocol_id,
1772 		    stcb->asoc.context,
1773 		    strmno, strmseq,
1774 		    chunk_flags,
1775 		    dmbuf);
1776 		if (control == NULL) {
1777 			goto failed_express_del;
1778 		}
1779 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1780 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1781 			asoc->highest_tsn_inside_nr_map = tsn;
1782 		}
1783 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1784 		    control, &stcb->sctp_socket->so_rcv,
1785 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1786 
1787 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1788 			/* for ordered, bump what we delivered */
1789 			asoc->strmin[strmno].last_sequence_delivered++;
1790 		}
1791 		SCTP_STAT_INCR(sctps_recvexpress);
1792 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1793 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1794 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1795 		}
1796 		control = NULL;
1797 
1798 		goto finish_express_del;
1799 	}
1800 failed_express_del:
1801 	/* If we reach here this is a new chunk */
1802 	chk = NULL;
1803 	control = NULL;
1804 	/* Express for fragmented delivery? */
1805 	if ((asoc->fragmented_delivery_inprogress) &&
1806 	    (stcb->asoc.control_pdapi) &&
1807 	    (asoc->str_of_pdapi == strmno) &&
1808 	    (asoc->ssn_of_pdapi == strmseq)
1809 	    ) {
1810 		control = stcb->asoc.control_pdapi;
1811 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1812 			/* Can't be another first? */
1813 			goto failed_pdapi_express_del;
1814 		}
1815 		if (tsn == (control->sinfo_tsn + 1)) {
1816 			/* Yep, we can add it on */
1817 			int end = 0;
1818 			uint32_t cumack;
1819 
1820 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1821 				end = 1;
1822 			}
1823 			cumack = asoc->cumulative_tsn;
1824 			if ((cumack + 1) == tsn)
1825 				cumack = tsn;
1826 
1827 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1828 			    tsn,
1829 			    &stcb->sctp_socket->so_rcv)) {
1830 				SCTP_PRINTF("Append fails end:%d\n", end);
1831 				goto failed_pdapi_express_del;
1832 			}
1833 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1834 			if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1835 				asoc->highest_tsn_inside_nr_map = tsn;
1836 			}
1837 			SCTP_STAT_INCR(sctps_recvexpressm);
1838 			control->sinfo_tsn = tsn;
1839 			asoc->tsn_last_delivered = tsn;
1840 			asoc->fragment_flags = chunk_flags;
1841 			asoc->tsn_of_pdapi_last_delivered = tsn;
1842 			asoc->last_flags_delivered = chunk_flags;
1843 			asoc->last_strm_seq_delivered = strmseq;
1844 			asoc->last_strm_no_delivered = strmno;
1845 			if (end) {
1846 				/* clean up the flags and such */
1847 				asoc->fragmented_delivery_inprogress = 0;
1848 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1849 					asoc->strmin[strmno].last_sequence_delivered++;
1850 				}
1851 				stcb->asoc.control_pdapi = NULL;
1852 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1853 					/*
1854 					 * There could be another message
1855 					 * ready
1856 					 */
1857 					need_reasm_check = 1;
1858 				}
1859 			}
1860 			control = NULL;
1861 			goto finish_express_del;
1862 		}
1863 	}
1864 failed_pdapi_express_del:
1865 	control = NULL;
1866 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1867 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1868 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1869 			asoc->highest_tsn_inside_nr_map = tsn;
1870 		}
1871 	} else {
1872 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1873 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1874 			asoc->highest_tsn_inside_map = tsn;
1875 		}
1876 	}
1877 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1878 		sctp_alloc_a_chunk(stcb, chk);
1879 		if (chk == NULL) {
1880 			/* No memory so we drop the chunk */
1881 			SCTP_STAT_INCR(sctps_nomem);
1882 			if (last_chunk == 0) {
1883 				/* we copied it, free the copy */
1884 				sctp_m_freem(dmbuf);
1885 			}
1886 			return (0);
1887 		}
1888 		chk->rec.data.TSN_seq = tsn;
1889 		chk->no_fr_allowed = 0;
1890 		chk->rec.data.stream_seq = strmseq;
1891 		chk->rec.data.stream_number = strmno;
1892 		chk->rec.data.payloadtype = protocol_id;
1893 		chk->rec.data.context = stcb->asoc.context;
1894 		chk->rec.data.doing_fast_retransmit = 0;
1895 		chk->rec.data.rcv_flags = chunk_flags;
1896 		chk->asoc = asoc;
1897 		chk->send_size = the_len;
1898 		chk->whoTo = net;
1899 		atomic_add_int(&net->ref_count, 1);
1900 		chk->data = dmbuf;
1901 	} else {
1902 		sctp_alloc_a_readq(stcb, control);
1903 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1904 		    protocol_id,
1905 		    stcb->asoc.context,
1906 		    strmno, strmseq,
1907 		    chunk_flags,
1908 		    dmbuf);
1909 		if (control == NULL) {
1910 			/* No memory so we drop the chunk */
1911 			SCTP_STAT_INCR(sctps_nomem);
1912 			if (last_chunk == 0) {
1913 				/* we copied it, free the copy */
1914 				sctp_m_freem(dmbuf);
1915 			}
1916 			return (0);
1917 		}
1918 		control->length = the_len;
1919 	}
1920 
1921 	/* Mark it as received */
1922 	/* Now queue it where it belongs */
1923 	if (control != NULL) {
1924 		/* First a sanity check */
1925 		if (asoc->fragmented_delivery_inprogress) {
1926 			/*
1927 			 * Ok, we have a fragmented delivery in progress if
1928 			 * this chunk is next to deliver OR belongs in our
1929 			 * view to the reassembly, the peer is evil or
1930 			 * broken.
1931 			 */
1932 			uint32_t estimate_tsn;
1933 
1934 			estimate_tsn = asoc->tsn_last_delivered + 1;
1935 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1936 			    (estimate_tsn == control->sinfo_tsn)) {
1937 				/* Evil/Broke peer */
1938 				sctp_m_freem(control->data);
1939 				control->data = NULL;
1940 				if (control->whoFrom) {
1941 					sctp_free_remote_addr(control->whoFrom);
1942 					control->whoFrom = NULL;
1943 				}
1944 				sctp_free_a_readq(stcb, control);
1945 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1946 				    0, M_DONTWAIT, 1, MT_DATA);
1947 				if (oper) {
1948 					struct sctp_paramhdr *ph;
1949 					uint32_t *ippp;
1950 
1951 					SCTP_BUF_LEN(oper) =
1952 					    sizeof(struct sctp_paramhdr) +
1953 					    (3 * sizeof(uint32_t));
1954 					ph = mtod(oper, struct sctp_paramhdr *);
1955 					ph->param_type =
1956 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1957 					ph->param_length = htons(SCTP_BUF_LEN(oper));
1958 					ippp = (uint32_t *) (ph + 1);
1959 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1960 					ippp++;
1961 					*ippp = tsn;
1962 					ippp++;
1963 					*ippp = ((strmno << 16) | strmseq);
1964 				}
1965 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1966 				sctp_abort_an_association(stcb->sctp_ep, stcb,
1967 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1968 
1969 				*abort_flag = 1;
1970 				return (0);
1971 			} else {
1972 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1973 					sctp_m_freem(control->data);
1974 					control->data = NULL;
1975 					if (control->whoFrom) {
1976 						sctp_free_remote_addr(control->whoFrom);
1977 						control->whoFrom = NULL;
1978 					}
1979 					sctp_free_a_readq(stcb, control);
1980 
1981 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1982 					    0, M_DONTWAIT, 1, MT_DATA);
1983 					if (oper) {
1984 						struct sctp_paramhdr *ph;
1985 						uint32_t *ippp;
1986 
1987 						SCTP_BUF_LEN(oper) =
1988 						    sizeof(struct sctp_paramhdr) +
1989 						    (3 * sizeof(uint32_t));
1990 						ph = mtod(oper,
1991 						    struct sctp_paramhdr *);
1992 						ph->param_type =
1993 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1994 						ph->param_length =
1995 						    htons(SCTP_BUF_LEN(oper));
1996 						ippp = (uint32_t *) (ph + 1);
1997 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1998 						ippp++;
1999 						*ippp = tsn;
2000 						ippp++;
2001 						*ippp = ((strmno << 16) | strmseq);
2002 					}
2003 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2004 					sctp_abort_an_association(stcb->sctp_ep,
2005 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2006 
2007 					*abort_flag = 1;
2008 					return (0);
2009 				}
2010 			}
2011 		} else {
2012 			/* No PDAPI running */
2013 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2014 				/*
2015 				 * Reassembly queue is NOT empty validate
2016 				 * that this tsn does not need to be in
2017 				 * reasembly queue. If it does then our peer
2018 				 * is broken or evil.
2019 				 */
2020 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2021 					sctp_m_freem(control->data);
2022 					control->data = NULL;
2023 					if (control->whoFrom) {
2024 						sctp_free_remote_addr(control->whoFrom);
2025 						control->whoFrom = NULL;
2026 					}
2027 					sctp_free_a_readq(stcb, control);
2028 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2029 					    0, M_DONTWAIT, 1, MT_DATA);
2030 					if (oper) {
2031 						struct sctp_paramhdr *ph;
2032 						uint32_t *ippp;
2033 
2034 						SCTP_BUF_LEN(oper) =
2035 						    sizeof(struct sctp_paramhdr) +
2036 						    (3 * sizeof(uint32_t));
2037 						ph = mtod(oper,
2038 						    struct sctp_paramhdr *);
2039 						ph->param_type =
2040 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2041 						ph->param_length =
2042 						    htons(SCTP_BUF_LEN(oper));
2043 						ippp = (uint32_t *) (ph + 1);
2044 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2045 						ippp++;
2046 						*ippp = tsn;
2047 						ippp++;
2048 						*ippp = ((strmno << 16) | strmseq);
2049 					}
2050 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2051 					sctp_abort_an_association(stcb->sctp_ep,
2052 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2053 
2054 					*abort_flag = 1;
2055 					return (0);
2056 				}
2057 			}
2058 		}
2059 		/* ok, if we reach here we have passed the sanity checks */
2060 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2061 			/* queue directly into socket buffer */
2062 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2063 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2064 			    control,
2065 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2066 		} else {
2067 			/*
2068 			 * Special check for when streams are resetting. We
2069 			 * could be more smart about this and check the
2070 			 * actual stream to see if it is not being reset..
2071 			 * that way we would not create a HOLB when amongst
2072 			 * streams being reset and those not being reset.
2073 			 *
2074 			 * We take complete messages that have a stream reset
2075 			 * intervening (aka the TSN is after where our
2076 			 * cum-ack needs to be) off and put them on a
2077 			 * pending_reply_queue. The reassembly ones we do
2078 			 * not have to worry about since they are all sorted
2079 			 * and proceessed by TSN order. It is only the
2080 			 * singletons I must worry about.
2081 			 */
2082 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2083 			    ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)))
2084 			    ) {
2085 				/*
2086 				 * yep its past where we need to reset... go
2087 				 * ahead and queue it.
2088 				 */
2089 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2090 					/* first one on */
2091 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2092 				} else {
2093 					struct sctp_queued_to_read *ctlOn;
2094 					unsigned char inserted = 0;
2095 
2096 					ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2097 					while (ctlOn) {
2098 						if (compare_with_wrap(control->sinfo_tsn,
2099 						    ctlOn->sinfo_tsn, MAX_TSN)) {
2100 							ctlOn = TAILQ_NEXT(ctlOn, next);
2101 						} else {
2102 							/* found it */
2103 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2104 							inserted = 1;
2105 							break;
2106 						}
2107 					}
2108 					if (inserted == 0) {
2109 						/*
2110 						 * must be put at end, use
2111 						 * prevP (all setup from
2112 						 * loop) to setup nextP.
2113 						 */
2114 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2115 					}
2116 				}
2117 			} else {
2118 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2119 				if (*abort_flag) {
2120 					return (0);
2121 				}
2122 			}
2123 		}
2124 	} else {
2125 		/* Into the re-assembly queue */
2126 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2127 		if (*abort_flag) {
2128 			/*
2129 			 * the assoc is now gone and chk was put onto the
2130 			 * reasm queue, which has all been freed.
2131 			 */
2132 			*m = NULL;
2133 			return (0);
2134 		}
2135 	}
2136 finish_express_del:
2137 	if (tsn == (asoc->cumulative_tsn + 1)) {
2138 		/* Update cum-ack */
2139 		asoc->cumulative_tsn = tsn;
2140 	}
2141 	if (last_chunk) {
2142 		*m = NULL;
2143 	}
2144 	if (ordered) {
2145 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2146 	} else {
2147 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2148 	}
2149 	SCTP_STAT_INCR(sctps_recvdata);
2150 	/* Set it present please */
2151 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2152 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2153 	}
2154 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2155 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2156 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2157 	}
2158 	/* check the special flag for stream resets */
2159 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2160 	    ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2161 	    (asoc->cumulative_tsn == liste->tsn))
2162 	    ) {
2163 		/*
2164 		 * we have finished working through the backlogged TSN's now
2165 		 * time to reset streams. 1: call reset function. 2: free
2166 		 * pending_reply space 3: distribute any chunks in
2167 		 * pending_reply_queue.
2168 		 */
2169 		struct sctp_queued_to_read *ctl;
2170 
2171 		sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2172 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2173 		SCTP_FREE(liste, SCTP_M_STRESET);
2174 		/* sa_ignore FREED_MEMORY */
2175 		liste = TAILQ_FIRST(&asoc->resetHead);
2176 		ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2177 		if (ctl && (liste == NULL)) {
2178 			/* All can be removed */
2179 			while (ctl) {
2180 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2181 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2182 				if (*abort_flag) {
2183 					return (0);
2184 				}
2185 				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2186 			}
2187 		} else if (ctl) {
2188 			/* more than one in queue */
2189 			while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2190 				/*
2191 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2192 				 * process it which is the NOT of
2193 				 * ctl->sinfo_tsn > liste->tsn
2194 				 */
2195 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2196 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2197 				if (*abort_flag) {
2198 					return (0);
2199 				}
2200 				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2201 			}
2202 		}
2203 		/*
2204 		 * Now service re-assembly to pick up anything that has been
2205 		 * held on reassembly queue?
2206 		 */
2207 		sctp_deliver_reasm_check(stcb, asoc);
2208 		need_reasm_check = 0;
2209 	}
2210 	if (need_reasm_check) {
2211 		/* Another one waits ? */
2212 		sctp_deliver_reasm_check(stcb, asoc);
2213 	}
2214 	return (1);
2215 }
2216 
2217 int8_t sctp_map_lookup_tab[256] = {
2218 	0, 1, 0, 2, 0, 1, 0, 3,
2219 	0, 1, 0, 2, 0, 1, 0, 4,
2220 	0, 1, 0, 2, 0, 1, 0, 3,
2221 	0, 1, 0, 2, 0, 1, 0, 5,
2222 	0, 1, 0, 2, 0, 1, 0, 3,
2223 	0, 1, 0, 2, 0, 1, 0, 4,
2224 	0, 1, 0, 2, 0, 1, 0, 3,
2225 	0, 1, 0, 2, 0, 1, 0, 6,
2226 	0, 1, 0, 2, 0, 1, 0, 3,
2227 	0, 1, 0, 2, 0, 1, 0, 4,
2228 	0, 1, 0, 2, 0, 1, 0, 3,
2229 	0, 1, 0, 2, 0, 1, 0, 5,
2230 	0, 1, 0, 2, 0, 1, 0, 3,
2231 	0, 1, 0, 2, 0, 1, 0, 4,
2232 	0, 1, 0, 2, 0, 1, 0, 3,
2233 	0, 1, 0, 2, 0, 1, 0, 7,
2234 	0, 1, 0, 2, 0, 1, 0, 3,
2235 	0, 1, 0, 2, 0, 1, 0, 4,
2236 	0, 1, 0, 2, 0, 1, 0, 3,
2237 	0, 1, 0, 2, 0, 1, 0, 5,
2238 	0, 1, 0, 2, 0, 1, 0, 3,
2239 	0, 1, 0, 2, 0, 1, 0, 4,
2240 	0, 1, 0, 2, 0, 1, 0, 3,
2241 	0, 1, 0, 2, 0, 1, 0, 6,
2242 	0, 1, 0, 2, 0, 1, 0, 3,
2243 	0, 1, 0, 2, 0, 1, 0, 4,
2244 	0, 1, 0, 2, 0, 1, 0, 3,
2245 	0, 1, 0, 2, 0, 1, 0, 5,
2246 	0, 1, 0, 2, 0, 1, 0, 3,
2247 	0, 1, 0, 2, 0, 1, 0, 4,
2248 	0, 1, 0, 2, 0, 1, 0, 3,
2249 	0, 1, 0, 2, 0, 1, 0, 8
2250 };
2251 
2252 
2253 void
2254 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2255 {
2256 	/*
2257 	 * Now we also need to check the mapping array in a couple of ways.
2258 	 * 1) Did we move the cum-ack point?
2259 	 *
2260 	 * When you first glance at this you might think that all entries that
2261 	 * make up the postion of the cum-ack would be in the nr-mapping
2262 	 * array only.. i.e. things up to the cum-ack are always
2263 	 * deliverable. Thats true with one exception, when its a fragmented
2264 	 * message we may not deliver the data until some threshold (or all
2265 	 * of it) is in place. So we must OR the nr_mapping_array and
2266 	 * mapping_array to get a true picture of the cum-ack.
2267 	 */
2268 	struct sctp_association *asoc;
2269 	int at;
2270 	uint8_t val;
2271 	int slide_from, slide_end, lgap, distance;
2272 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2273 
2274 	asoc = &stcb->asoc;
2275 	at = 0;
2276 
2277 	old_cumack = asoc->cumulative_tsn;
2278 	old_base = asoc->mapping_array_base_tsn;
2279 	old_highest = asoc->highest_tsn_inside_map;
2280 	/*
2281 	 * We could probably improve this a small bit by calculating the
2282 	 * offset of the current cum-ack as the starting point.
2283 	 */
2284 	at = 0;
2285 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2286 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2287 		if (val == 0xff) {
2288 			at += 8;
2289 		} else {
2290 			/* there is a 0 bit */
2291 			at += sctp_map_lookup_tab[val];
2292 			break;
2293 		}
2294 	}
2295 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2296 
2297 	if (compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
2298 	    compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
2299 #ifdef INVARIANTS
2300 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2301 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2302 #else
2303 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2304 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2305 		sctp_print_mapping_array(asoc);
2306 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2307 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2308 		}
2309 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2310 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2311 #endif
2312 	}
2313 	if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
2314 	    asoc->highest_tsn_inside_map,
2315 	    MAX_TSN)) {
2316 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2317 	} else {
2318 		highest_tsn = asoc->highest_tsn_inside_map;
2319 	}
2320 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2321 		/* The complete array was completed by a single FR */
2322 		/* highest becomes the cum-ack */
2323 		int clr;
2324 
2325 #ifdef INVARIANTS
2326 		unsigned int i;
2327 
2328 #endif
2329 
2330 		/* clear the array */
2331 		clr = ((at + 7) >> 3);
2332 		if (clr > asoc->mapping_array_size) {
2333 			clr = asoc->mapping_array_size;
2334 		}
2335 		memset(asoc->mapping_array, 0, clr);
2336 		memset(asoc->nr_mapping_array, 0, clr);
2337 #ifdef INVARIANTS
2338 		for (i = 0; i < asoc->mapping_array_size; i++) {
2339 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2340 				printf("Error Mapping array's not clean at clear\n");
2341 				sctp_print_mapping_array(asoc);
2342 			}
2343 		}
2344 #endif
2345 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2346 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2347 	} else if (at >= 8) {
2348 		/* we can slide the mapping array down */
2349 		/* slide_from holds where we hit the first NON 0xff byte */
2350 
2351 		/*
2352 		 * now calculate the ceiling of the move using our highest
2353 		 * TSN value
2354 		 */
2355 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2356 		slide_end = (lgap >> 3);
2357 		if (slide_end < slide_from) {
2358 			sctp_print_mapping_array(asoc);
2359 #ifdef INVARIANTS
2360 			panic("impossible slide");
2361 #else
2362 			printf("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2363 			    lgap, slide_end, slide_from, at);
2364 			return;
2365 #endif
2366 		}
2367 		if (slide_end > asoc->mapping_array_size) {
2368 #ifdef INVARIANTS
2369 			panic("would overrun buffer");
2370 #else
2371 			printf("Gak, would have overrun map end:%d slide_end:%d\n",
2372 			    asoc->mapping_array_size, slide_end);
2373 			slide_end = asoc->mapping_array_size;
2374 #endif
2375 		}
2376 		distance = (slide_end - slide_from) + 1;
2377 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2378 			sctp_log_map(old_base, old_cumack, old_highest,
2379 			    SCTP_MAP_PREPARE_SLIDE);
2380 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2381 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2382 		}
2383 		if (distance + slide_from > asoc->mapping_array_size ||
2384 		    distance < 0) {
2385 			/*
2386 			 * Here we do NOT slide forward the array so that
2387 			 * hopefully when more data comes in to fill it up
2388 			 * we will be able to slide it forward. Really I
2389 			 * don't think this should happen :-0
2390 			 */
2391 
2392 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2393 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2394 				    (uint32_t) asoc->mapping_array_size,
2395 				    SCTP_MAP_SLIDE_NONE);
2396 			}
2397 		} else {
2398 			int ii;
2399 
2400 			for (ii = 0; ii < distance; ii++) {
2401 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2402 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2403 
2404 			}
2405 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2406 				asoc->mapping_array[ii] = 0;
2407 				asoc->nr_mapping_array[ii] = 0;
2408 			}
2409 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2410 				asoc->highest_tsn_inside_map += (slide_from << 3);
2411 			}
2412 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2413 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2414 			}
2415 			asoc->mapping_array_base_tsn += (slide_from << 3);
2416 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2417 				sctp_log_map(asoc->mapping_array_base_tsn,
2418 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2419 				    SCTP_MAP_SLIDE_RESULT);
2420 			}
2421 		}
2422 	}
2423 }
2424 
2425 
2426 void
2427 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap, int *abort_flag)
2428 {
2429 	struct sctp_association *asoc;
2430 	uint32_t highest_tsn;
2431 
2432 	asoc = &stcb->asoc;
2433 	if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
2434 	    asoc->highest_tsn_inside_map,
2435 	    MAX_TSN)) {
2436 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2437 	} else {
2438 		highest_tsn = asoc->highest_tsn_inside_map;
2439 	}
2440 
2441 	/*
2442 	 * Now we need to see if we need to queue a sack or just start the
2443 	 * timer (if allowed).
2444 	 */
2445 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2446 		/*
2447 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2448 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2449 		 * SACK
2450 		 */
2451 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2452 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2453 			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2454 		}
2455 		sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2456 		sctp_send_sack(stcb);
2457 	} else {
2458 		int is_a_gap;
2459 
2460 		/* is there a gap now ? */
2461 		is_a_gap = compare_with_wrap(highest_tsn, stcb->asoc.cumulative_tsn, MAX_TSN);
2462 
2463 		/*
2464 		 * CMT DAC algorithm: increase number of packets received
2465 		 * since last ack
2466 		 */
2467 		stcb->asoc.cmt_dac_pkts_rcvd++;
2468 
2469 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2470 							 * SACK */
2471 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2472 							 * longer is one */
2473 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2474 		    (is_a_gap) ||	/* is still a gap */
2475 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2476 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2477 		    ) {
2478 
2479 			if ((stcb->asoc.sctp_cmt_on_off == 1) &&
2480 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2481 			    (stcb->asoc.send_sack == 0) &&
2482 			    (stcb->asoc.numduptsns == 0) &&
2483 			    (stcb->asoc.delayed_ack) &&
2484 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2485 
2486 				/*
2487 				 * CMT DAC algorithm: With CMT, delay acks
2488 				 * even in the face of
2489 				 *
2490 				 * reordering. Therefore, if acks that do not
2491 				 * have to be sent because of the above
2492 				 * reasons, will be delayed. That is, acks
2493 				 * that would have been sent due to gap
2494 				 * reports will be delayed with DAC. Start
2495 				 * the delayed ack timer.
2496 				 */
2497 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2498 				    stcb->sctp_ep, stcb, NULL);
2499 			} else {
2500 				/*
2501 				 * Ok we must build a SACK since the timer
2502 				 * is pending, we got our first packet OR
2503 				 * there are gaps or duplicates.
2504 				 */
2505 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2506 				sctp_send_sack(stcb);
2507 			}
2508 		} else {
2509 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2510 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2511 				    stcb->sctp_ep, stcb, NULL);
2512 			}
2513 		}
2514 	}
2515 }
2516 
2517 void
2518 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2519 {
2520 	struct sctp_tmit_chunk *chk;
2521 	uint32_t tsize, pd_point;
2522 	uint16_t nxt_todel;
2523 
2524 	if (asoc->fragmented_delivery_inprogress) {
2525 		sctp_service_reassembly(stcb, asoc);
2526 	}
2527 	/* Can we proceed further, i.e. the PD-API is complete */
2528 	if (asoc->fragmented_delivery_inprogress) {
2529 		/* no */
2530 		return;
2531 	}
2532 	/*
2533 	 * Now is there some other chunk I can deliver from the reassembly
2534 	 * queue.
2535 	 */
2536 doit_again:
2537 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2538 	if (chk == NULL) {
2539 		asoc->size_on_reasm_queue = 0;
2540 		asoc->cnt_on_reasm_queue = 0;
2541 		return;
2542 	}
2543 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2544 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2545 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2546 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2547 		/*
2548 		 * Yep the first one is here. We setup to start reception,
2549 		 * by backing down the TSN just in case we can't deliver.
2550 		 */
2551 
2552 		/*
2553 		 * Before we start though either all of the message should
2554 		 * be here or the socket buffer max or nothing on the
2555 		 * delivery queue and something can be delivered.
2556 		 */
2557 		if (stcb->sctp_socket) {
2558 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2559 			    stcb->sctp_ep->partial_delivery_point);
2560 		} else {
2561 			pd_point = stcb->sctp_ep->partial_delivery_point;
2562 		}
2563 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2564 			asoc->fragmented_delivery_inprogress = 1;
2565 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2566 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2567 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2568 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2569 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2570 			sctp_service_reassembly(stcb, asoc);
2571 			if (asoc->fragmented_delivery_inprogress == 0) {
2572 				goto doit_again;
2573 			}
2574 		}
2575 	}
2576 }
2577 
2578 int
2579 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2580     struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2581     struct sctp_nets *net, uint32_t * high_tsn)
2582 {
2583 	struct sctp_data_chunk *ch, chunk_buf;
2584 	struct sctp_association *asoc;
2585 	int num_chunks = 0;	/* number of control chunks processed */
2586 	int stop_proc = 0;
2587 	int chk_length, break_flag, last_chunk;
2588 	int abort_flag = 0, was_a_gap = 0;
2589 	struct mbuf *m;
2590 
2591 	/* set the rwnd */
2592 	sctp_set_rwnd(stcb, &stcb->asoc);
2593 
2594 	m = *mm;
2595 	SCTP_TCB_LOCK_ASSERT(stcb);
2596 	asoc = &stcb->asoc;
2597 	if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2598 	    stcb->asoc.cumulative_tsn, MAX_TSN)) {
2599 		/* there was a gap before this data was processed */
2600 		was_a_gap = 1;
2601 	}
2602 	/*
2603 	 * setup where we got the last DATA packet from for any SACK that
2604 	 * may need to go out. Don't bump the net. This is done ONLY when a
2605 	 * chunk is assigned.
2606 	 */
2607 	asoc->last_data_chunk_from = net;
2608 
2609 	/*-
2610 	 * Now before we proceed we must figure out if this is a wasted
2611 	 * cluster... i.e. it is a small packet sent in and yet the driver
2612 	 * underneath allocated a full cluster for it. If so we must copy it
2613 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2614 	 * with cluster starvation. Note for __Panda__ we don't do this
2615 	 * since it has clusters all the way down to 64 bytes.
2616 	 */
2617 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2618 		/* we only handle mbufs that are singletons.. not chains */
2619 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2620 		if (m) {
2621 			/* ok lets see if we can copy the data up */
2622 			caddr_t *from, *to;
2623 
2624 			/* get the pointers and copy */
2625 			to = mtod(m, caddr_t *);
2626 			from = mtod((*mm), caddr_t *);
2627 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2628 			/* copy the length and free up the old */
2629 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2630 			sctp_m_freem(*mm);
2631 			/* sucess, back copy */
2632 			*mm = m;
2633 		} else {
2634 			/* We are in trouble in the mbuf world .. yikes */
2635 			m = *mm;
2636 		}
2637 	}
2638 	/* get pointer to the first chunk header */
2639 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2640 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2641 	if (ch == NULL) {
2642 		return (1);
2643 	}
2644 	/*
2645 	 * process all DATA chunks...
2646 	 */
2647 	*high_tsn = asoc->cumulative_tsn;
2648 	break_flag = 0;
2649 	asoc->data_pkts_seen++;
2650 	while (stop_proc == 0) {
2651 		/* validate chunk length */
2652 		chk_length = ntohs(ch->ch.chunk_length);
2653 		if (length - *offset < chk_length) {
2654 			/* all done, mutulated chunk */
2655 			stop_proc = 1;
2656 			break;
2657 		}
2658 		if (ch->ch.chunk_type == SCTP_DATA) {
2659 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2660 				/*
2661 				 * Need to send an abort since we had a
2662 				 * invalid data chunk.
2663 				 */
2664 				struct mbuf *op_err;
2665 
2666 				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2667 				    0, M_DONTWAIT, 1, MT_DATA);
2668 
2669 				if (op_err) {
2670 					struct sctp_paramhdr *ph;
2671 					uint32_t *ippp;
2672 
2673 					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2674 					    (2 * sizeof(uint32_t));
2675 					ph = mtod(op_err, struct sctp_paramhdr *);
2676 					ph->param_type =
2677 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2678 					ph->param_length = htons(SCTP_BUF_LEN(op_err));
2679 					ippp = (uint32_t *) (ph + 1);
2680 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2681 					ippp++;
2682 					*ippp = asoc->cumulative_tsn;
2683 
2684 				}
2685 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2686 				sctp_abort_association(inp, stcb, m, iphlen, sh,
2687 				    op_err, 0, net->port);
2688 				return (2);
2689 			}
2690 #ifdef SCTP_AUDITING_ENABLED
2691 			sctp_audit_log(0xB1, 0);
2692 #endif
2693 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2694 				last_chunk = 1;
2695 			} else {
2696 				last_chunk = 0;
2697 			}
2698 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2699 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2700 			    last_chunk)) {
2701 				num_chunks++;
2702 			}
2703 			if (abort_flag)
2704 				return (2);
2705 
2706 			if (break_flag) {
2707 				/*
2708 				 * Set because of out of rwnd space and no
2709 				 * drop rep space left.
2710 				 */
2711 				stop_proc = 1;
2712 				break;
2713 			}
2714 		} else {
2715 			/* not a data chunk in the data region */
2716 			switch (ch->ch.chunk_type) {
2717 			case SCTP_INITIATION:
2718 			case SCTP_INITIATION_ACK:
2719 			case SCTP_SELECTIVE_ACK:
2720 			case SCTP_NR_SELECTIVE_ACK:	/* EY */
2721 			case SCTP_HEARTBEAT_REQUEST:
2722 			case SCTP_HEARTBEAT_ACK:
2723 			case SCTP_ABORT_ASSOCIATION:
2724 			case SCTP_SHUTDOWN:
2725 			case SCTP_SHUTDOWN_ACK:
2726 			case SCTP_OPERATION_ERROR:
2727 			case SCTP_COOKIE_ECHO:
2728 			case SCTP_COOKIE_ACK:
2729 			case SCTP_ECN_ECHO:
2730 			case SCTP_ECN_CWR:
2731 			case SCTP_SHUTDOWN_COMPLETE:
2732 			case SCTP_AUTHENTICATION:
2733 			case SCTP_ASCONF_ACK:
2734 			case SCTP_PACKET_DROPPED:
2735 			case SCTP_STREAM_RESET:
2736 			case SCTP_FORWARD_CUM_TSN:
2737 			case SCTP_ASCONF:
2738 				/*
2739 				 * Now, what do we do with KNOWN chunks that
2740 				 * are NOT in the right place?
2741 				 *
2742 				 * For now, I do nothing but ignore them. We
2743 				 * may later want to add sysctl stuff to
2744 				 * switch out and do either an ABORT() or
2745 				 * possibly process them.
2746 				 */
2747 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2748 					struct mbuf *op_err;
2749 
2750 					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2751 					sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
2752 					return (2);
2753 				}
2754 				break;
2755 			default:
2756 				/* unknown chunk type, use bit rules */
2757 				if (ch->ch.chunk_type & 0x40) {
2758 					/* Add a error report to the queue */
2759 					struct mbuf *merr;
2760 					struct sctp_paramhdr *phd;
2761 
2762 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2763 					if (merr) {
2764 						phd = mtod(merr, struct sctp_paramhdr *);
2765 						/*
2766 						 * We cheat and use param
2767 						 * type since we did not
2768 						 * bother to define a error
2769 						 * cause struct. They are
2770 						 * the same basic format
2771 						 * with different names.
2772 						 */
2773 						phd->param_type =
2774 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2775 						phd->param_length =
2776 						    htons(chk_length + sizeof(*phd));
2777 						SCTP_BUF_LEN(merr) = sizeof(*phd);
2778 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
2779 						    SCTP_SIZE32(chk_length),
2780 						    M_DONTWAIT);
2781 						if (SCTP_BUF_NEXT(merr)) {
2782 							sctp_queue_op_err(stcb, merr);
2783 						} else {
2784 							sctp_m_freem(merr);
2785 						}
2786 					}
2787 				}
2788 				if ((ch->ch.chunk_type & 0x80) == 0) {
2789 					/* discard the rest of this packet */
2790 					stop_proc = 1;
2791 				}	/* else skip this bad chunk and
2792 					 * continue... */
2793 				break;
2794 			};	/* switch of chunk type */
2795 		}
2796 		*offset += SCTP_SIZE32(chk_length);
2797 		if ((*offset >= length) || stop_proc) {
2798 			/* no more data left in the mbuf chain */
2799 			stop_proc = 1;
2800 			continue;
2801 		}
2802 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2803 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2804 		if (ch == NULL) {
2805 			*offset = length;
2806 			stop_proc = 1;
2807 			break;
2808 
2809 		}
2810 	}			/* while */
2811 	if (break_flag) {
2812 		/*
2813 		 * we need to report rwnd overrun drops.
2814 		 */
2815 		sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2816 	}
2817 	if (num_chunks) {
2818 		/*
2819 		 * Did we get data, if so update the time for auto-close and
2820 		 * give peer credit for being alive.
2821 		 */
2822 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2823 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2824 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2825 			    stcb->asoc.overall_error_count,
2826 			    0,
2827 			    SCTP_FROM_SCTP_INDATA,
2828 			    __LINE__);
2829 		}
2830 		stcb->asoc.overall_error_count = 0;
2831 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2832 	}
2833 	/* now service all of the reassm queue if needed */
2834 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2835 		sctp_service_queues(stcb, asoc);
2836 
2837 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2838 		/* Assure that we ack right away */
2839 		stcb->asoc.send_sack = 1;
2840 	}
2841 	/* Start a sack timer or QUEUE a SACK for sending */
2842 	sctp_sack_check(stcb, was_a_gap, &abort_flag);
2843 	if (abort_flag)
2844 		return (2);
2845 
2846 	return (0);
2847 }
2848 
2849 static int
2850 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2851     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2852     int *num_frs,
2853     uint32_t * biggest_newly_acked_tsn,
2854     uint32_t * this_sack_lowest_newack,
2855     int *ecn_seg_sums)
2856 {
2857 	struct sctp_tmit_chunk *tp1;
2858 	unsigned int theTSN;
2859 	int j, wake_him = 0, circled = 0;
2860 
2861 	/* Recover the tp1 we last saw */
2862 	tp1 = *p_tp1;
2863 	if (tp1 == NULL) {
2864 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2865 	}
2866 	for (j = frag_strt; j <= frag_end; j++) {
2867 		theTSN = j + last_tsn;
2868 		while (tp1) {
2869 			if (tp1->rec.data.doing_fast_retransmit)
2870 				(*num_frs) += 1;
2871 
2872 			/*-
2873 			 * CMT: CUCv2 algorithm. For each TSN being
2874 			 * processed from the sent queue, track the
2875 			 * next expected pseudo-cumack, or
2876 			 * rtx_pseudo_cumack, if required. Separate
2877 			 * cumack trackers for first transmissions,
2878 			 * and retransmissions.
2879 			 */
2880 			if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2881 			    (tp1->snd_count == 1)) {
2882 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2883 				tp1->whoTo->find_pseudo_cumack = 0;
2884 			}
2885 			if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2886 			    (tp1->snd_count > 1)) {
2887 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2888 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2889 			}
2890 			if (tp1->rec.data.TSN_seq == theTSN) {
2891 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2892 					/*-
2893 					 * must be held until
2894 					 * cum-ack passes
2895 					 */
2896 					/*-
2897 					 * ECN Nonce: Add the nonce
2898 					 * value to the sender's
2899 					 * nonce sum
2900 					 */
2901 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2902 						/*-
2903 						 * If it is less than RESEND, it is
2904 						 * now no-longer in flight.
2905 						 * Higher values may already be set
2906 						 * via previous Gap Ack Blocks...
2907 						 * i.e. ACKED or RESEND.
2908 						 */
2909 						if (compare_with_wrap(tp1->rec.data.TSN_seq,
2910 						    *biggest_newly_acked_tsn, MAX_TSN)) {
2911 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2912 						}
2913 						/*-
2914 						 * CMT: SFR algo (and HTNA) - set
2915 						 * saw_newack to 1 for dest being
2916 						 * newly acked. update
2917 						 * this_sack_highest_newack if
2918 						 * appropriate.
2919 						 */
2920 						if (tp1->rec.data.chunk_was_revoked == 0)
2921 							tp1->whoTo->saw_newack = 1;
2922 
2923 						if (compare_with_wrap(tp1->rec.data.TSN_seq,
2924 						    tp1->whoTo->this_sack_highest_newack,
2925 						    MAX_TSN)) {
2926 							tp1->whoTo->this_sack_highest_newack =
2927 							    tp1->rec.data.TSN_seq;
2928 						}
2929 						/*-
2930 						 * CMT DAC algo: also update
2931 						 * this_sack_lowest_newack
2932 						 */
2933 						if (*this_sack_lowest_newack == 0) {
2934 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2935 								sctp_log_sack(*this_sack_lowest_newack,
2936 								    last_tsn,
2937 								    tp1->rec.data.TSN_seq,
2938 								    0,
2939 								    0,
2940 								    SCTP_LOG_TSN_ACKED);
2941 							}
2942 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2943 						}
2944 						/*-
2945 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2946 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2947 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2948 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2949 						 * Separate pseudo_cumack trackers for first transmissions and
2950 						 * retransmissions.
2951 						 */
2952 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2953 							if (tp1->rec.data.chunk_was_revoked == 0) {
2954 								tp1->whoTo->new_pseudo_cumack = 1;
2955 							}
2956 							tp1->whoTo->find_pseudo_cumack = 1;
2957 						}
2958 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2959 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2960 						}
2961 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2962 							if (tp1->rec.data.chunk_was_revoked == 0) {
2963 								tp1->whoTo->new_pseudo_cumack = 1;
2964 							}
2965 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2966 						}
2967 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2968 							sctp_log_sack(*biggest_newly_acked_tsn,
2969 							    last_tsn,
2970 							    tp1->rec.data.TSN_seq,
2971 							    frag_strt,
2972 							    frag_end,
2973 							    SCTP_LOG_TSN_ACKED);
2974 						}
2975 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2976 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2977 							    tp1->whoTo->flight_size,
2978 							    tp1->book_size,
2979 							    (uintptr_t) tp1->whoTo,
2980 							    tp1->rec.data.TSN_seq);
2981 						}
2982 						sctp_flight_size_decrease(tp1);
2983 						sctp_total_flight_decrease(stcb, tp1);
2984 
2985 						tp1->whoTo->net_ack += tp1->send_size;
2986 						if (tp1->snd_count < 2) {
2987 							/*-
2988 							 * True non-retransmited chunk
2989 							 */
2990 							tp1->whoTo->net_ack2 += tp1->send_size;
2991 
2992 							/*-
2993 							 * update RTO too ?
2994 							 */
2995 							if (tp1->do_rtt) {
2996 								tp1->whoTo->RTO =
2997 								    sctp_calculate_rto(stcb,
2998 								    &stcb->asoc,
2999 								    tp1->whoTo,
3000 								    &tp1->sent_rcv_time,
3001 								    sctp_align_safe_nocopy);
3002 								tp1->do_rtt = 0;
3003 							}
3004 						}
3005 					}
3006 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3007 						(*ecn_seg_sums) += tp1->rec.data.ect_nonce;
3008 						(*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
3009 						if (compare_with_wrap(tp1->rec.data.TSN_seq,
3010 						    stcb->asoc.this_sack_highest_gap,
3011 						    MAX_TSN)) {
3012 							stcb->asoc.this_sack_highest_gap =
3013 							    tp1->rec.data.TSN_seq;
3014 						}
3015 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3016 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3017 #ifdef SCTP_AUDITING_ENABLED
3018 							sctp_audit_log(0xB2,
3019 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3020 #endif
3021 						}
3022 					}
3023 					/*-
3024 					 * All chunks NOT UNSENT fall through here and are marked
3025 					 * (leave PR-SCTP ones that are to skip alone though)
3026 					 */
3027 					if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
3028 						tp1->sent = SCTP_DATAGRAM_MARKED;
3029 
3030 					if (tp1->rec.data.chunk_was_revoked) {
3031 						/* deflate the cwnd */
3032 						tp1->whoTo->cwnd -= tp1->book_size;
3033 						tp1->rec.data.chunk_was_revoked = 0;
3034 					}
3035 					/* NR Sack code here */
3036 					if (nr_sacking) {
3037 						if (tp1->data) {
3038 							/*
3039 							 * sa_ignore
3040 							 * NO_NULL_CHK
3041 							 */
3042 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3043 							sctp_m_freem(tp1->data);
3044 							tp1->data = NULL;
3045 						}
3046 						wake_him++;
3047 					}
3048 				}
3049 				break;
3050 			}	/* if (tp1->TSN_seq == theTSN) */
3051 			if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
3052 			    MAX_TSN))
3053 				break;
3054 
3055 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3056 			if ((tp1 == NULL) && (circled == 0)) {
3057 				circled++;
3058 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3059 			}
3060 		}		/* end while (tp1) */
3061 		if (tp1 == NULL) {
3062 			circled = 0;
3063 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3064 		}
3065 		/* In case the fragments were not in order we must reset */
3066 	}			/* end for (j = fragStart */
3067 	*p_tp1 = tp1;
3068 	return (wake_him);	/* Return value only used for nr-sack */
3069 }
3070 
3071 
3072 static int
3073 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3074     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3075     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3076     int num_seg, int num_nr_seg, int *ecn_seg_sums)
3077 {
3078 	struct sctp_gap_ack_block *frag, block;
3079 	struct sctp_tmit_chunk *tp1;
3080 	int i;
3081 	int num_frs = 0;
3082 	int chunk_freed;
3083 	int non_revocable;
3084 	uint16_t frag_strt, frag_end;
3085 	uint32_t last_frag_high;
3086 
3087 	tp1 = NULL;
3088 	last_frag_high = 0;
3089 	chunk_freed = 0;
3090 
3091 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3092 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3093 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3094 		*offset += sizeof(block);
3095 		if (frag == NULL) {
3096 			return (chunk_freed);
3097 		}
3098 		frag_strt = ntohs(frag->start);
3099 		frag_end = ntohs(frag->end);
3100 		/* some sanity checks on the fragment offsets */
3101 		if (frag_strt > frag_end) {
3102 			/* this one is malformed, skip */
3103 			continue;
3104 		}
3105 		if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
3106 		    MAX_TSN))
3107 			*biggest_tsn_acked = frag_end + last_tsn;
3108 
3109 		/* mark acked dgs and find out the highestTSN being acked */
3110 		if (tp1 == NULL) {
3111 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3112 			/* save the locations of the last frags */
3113 			last_frag_high = frag_end + last_tsn;
3114 		} else {
3115 			/*
3116 			 * now lets see if we need to reset the queue due to
3117 			 * a out-of-order SACK fragment
3118 			 */
3119 			if (compare_with_wrap(frag_strt + last_tsn,
3120 			    last_frag_high, MAX_TSN)) {
3121 				/*
3122 				 * if the new frag starts after the last TSN
3123 				 * frag covered, we are ok and this one is
3124 				 * beyond the last one
3125 				 */
3126 				;
3127 			} else {
3128 				/*
3129 				 * ok, they have reset us, so we need to
3130 				 * reset the queue this will cause extra
3131 				 * hunting but hey, they chose the
3132 				 * performance hit when they failed to order
3133 				 * their gaps
3134 				 */
3135 				tp1 = TAILQ_FIRST(&asoc->sent_queue);
3136 			}
3137 			last_frag_high = frag_end + last_tsn;
3138 		}
3139 		if (i < num_seg) {
3140 			non_revocable = 0;
3141 		} else {
3142 			non_revocable = 1;
3143 		}
3144 		if (i == num_seg) {
3145 			tp1 = NULL;
3146 		}
3147 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3148 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3149 		    this_sack_lowest_newack, ecn_seg_sums)) {
3150 			chunk_freed = 1;
3151 		}
3152 	}
3153 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3154 		if (num_frs)
3155 			sctp_log_fr(*biggest_tsn_acked,
3156 			    *biggest_newly_acked_tsn,
3157 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3158 	}
3159 	return (chunk_freed);
3160 }
3161 
3162 static void
3163 sctp_check_for_revoked(struct sctp_tcb *stcb,
3164     struct sctp_association *asoc, uint32_t cumack,
3165     uint32_t biggest_tsn_acked)
3166 {
3167 	struct sctp_tmit_chunk *tp1;
3168 	int tot_revoked = 0;
3169 
3170 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3171 	while (tp1) {
3172 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3173 		    MAX_TSN)) {
3174 			/*
3175 			 * ok this guy is either ACK or MARKED. If it is
3176 			 * ACKED it has been previously acked but not this
3177 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3178 			 * again.
3179 			 */
3180 			if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3181 			    MAX_TSN))
3182 				break;
3183 
3184 
3185 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3186 				/* it has been revoked */
3187 				tp1->sent = SCTP_DATAGRAM_SENT;
3188 				tp1->rec.data.chunk_was_revoked = 1;
3189 				/*
3190 				 * We must add this stuff back in to assure
3191 				 * timers and such get started.
3192 				 */
3193 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3194 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3195 					    tp1->whoTo->flight_size,
3196 					    tp1->book_size,
3197 					    (uintptr_t) tp1->whoTo,
3198 					    tp1->rec.data.TSN_seq);
3199 				}
3200 				sctp_flight_size_increase(tp1);
3201 				sctp_total_flight_increase(stcb, tp1);
3202 				/*
3203 				 * We inflate the cwnd to compensate for our
3204 				 * artificial inflation of the flight_size.
3205 				 */
3206 				tp1->whoTo->cwnd += tp1->book_size;
3207 				tot_revoked++;
3208 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3209 					sctp_log_sack(asoc->last_acked_seq,
3210 					    cumack,
3211 					    tp1->rec.data.TSN_seq,
3212 					    0,
3213 					    0,
3214 					    SCTP_LOG_TSN_REVOKED);
3215 				}
3216 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3217 				/* it has been re-acked in this SACK */
3218 				tp1->sent = SCTP_DATAGRAM_ACKED;
3219 			}
3220 		}
3221 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3222 			break;
3223 		tp1 = TAILQ_NEXT(tp1, sctp_next);
3224 	}
3225 	if (tot_revoked > 0) {
3226 		/*
3227 		 * Setup the ecn nonce re-sync point. We do this since once
3228 		 * data is revoked we begin to retransmit things, which do
3229 		 * NOT have the ECN bits set. This means we are now out of
3230 		 * sync and must wait until we get back in sync with the
3231 		 * peer to check ECN bits.
3232 		 */
3233 		tp1 = TAILQ_FIRST(&asoc->send_queue);
3234 		if (tp1 == NULL) {
3235 			asoc->nonce_resync_tsn = asoc->sending_seq;
3236 		} else {
3237 			asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3238 		}
3239 		asoc->nonce_wait_for_ecne = 0;
3240 		asoc->nonce_sum_check = 0;
3241 	}
3242 }
3243 
3244 
3245 static void
3246 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3247     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3248 {
3249 	struct sctp_tmit_chunk *tp1;
3250 	int strike_flag = 0;
3251 	struct timeval now;
3252 	int tot_retrans = 0;
3253 	uint32_t sending_seq;
3254 	struct sctp_nets *net;
3255 	int num_dests_sacked = 0;
3256 
3257 	/*
3258 	 * select the sending_seq, this is either the next thing ready to be
3259 	 * sent but not transmitted, OR, the next seq we assign.
3260 	 */
3261 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3262 	if (tp1 == NULL) {
3263 		sending_seq = asoc->sending_seq;
3264 	} else {
3265 		sending_seq = tp1->rec.data.TSN_seq;
3266 	}
3267 
3268 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3269 	if ((asoc->sctp_cmt_on_off == 1) &&
3270 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3271 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3272 			if (net->saw_newack)
3273 				num_dests_sacked++;
3274 		}
3275 	}
3276 	if (stcb->asoc.peer_supports_prsctp) {
3277 		(void)SCTP_GETTIME_TIMEVAL(&now);
3278 	}
3279 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3280 	while (tp1) {
3281 		strike_flag = 0;
3282 		if (tp1->no_fr_allowed) {
3283 			/* this one had a timeout or something */
3284 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3285 			continue;
3286 		}
3287 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3288 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3289 				sctp_log_fr(biggest_tsn_newly_acked,
3290 				    tp1->rec.data.TSN_seq,
3291 				    tp1->sent,
3292 				    SCTP_FR_LOG_CHECK_STRIKE);
3293 		}
3294 		if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3295 		    MAX_TSN) ||
3296 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3297 			/* done */
3298 			break;
3299 		}
3300 		if (stcb->asoc.peer_supports_prsctp) {
3301 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3302 				/* Is it expired? */
3303 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3304 					/* Yes so drop it */
3305 					if (tp1->data != NULL) {
3306 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3307 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3308 						    SCTP_SO_NOT_LOCKED);
3309 					}
3310 					tp1 = TAILQ_NEXT(tp1, sctp_next);
3311 					continue;
3312 				}
3313 			}
3314 		}
3315 		if (compare_with_wrap(tp1->rec.data.TSN_seq,
3316 		    asoc->this_sack_highest_gap, MAX_TSN)) {
3317 			/* we are beyond the tsn in the sack  */
3318 			break;
3319 		}
3320 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3321 			/* either a RESEND, ACKED, or MARKED */
3322 			/* skip */
3323 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3324 				/* Continue strikin FWD-TSN chunks */
3325 				tp1->rec.data.fwd_tsn_cnt++;
3326 			}
3327 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3328 			continue;
3329 		}
3330 		/*
3331 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3332 		 */
3333 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3334 			/*
3335 			 * No new acks were receieved for data sent to this
3336 			 * dest. Therefore, according to the SFR algo for
3337 			 * CMT, no data sent to this dest can be marked for
3338 			 * FR using this SACK.
3339 			 */
3340 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3341 			continue;
3342 		} else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq,
3343 		    tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3344 			/*
3345 			 * CMT: New acks were receieved for data sent to
3346 			 * this dest. But no new acks were seen for data
3347 			 * sent after tp1. Therefore, according to the SFR
3348 			 * algo for CMT, tp1 cannot be marked for FR using
3349 			 * this SACK. This step covers part of the DAC algo
3350 			 * and the HTNA algo as well.
3351 			 */
3352 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3353 			continue;
3354 		}
3355 		/*
3356 		 * Here we check to see if we were have already done a FR
3357 		 * and if so we see if the biggest TSN we saw in the sack is
3358 		 * smaller than the recovery point. If so we don't strike
3359 		 * the tsn... otherwise we CAN strike the TSN.
3360 		 */
3361 		/*
3362 		 * @@@ JRI: Check for CMT if (accum_moved &&
3363 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3364 		 * 0)) {
3365 		 */
3366 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3367 			/*
3368 			 * Strike the TSN if in fast-recovery and cum-ack
3369 			 * moved.
3370 			 */
3371 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3372 				sctp_log_fr(biggest_tsn_newly_acked,
3373 				    tp1->rec.data.TSN_seq,
3374 				    tp1->sent,
3375 				    SCTP_FR_LOG_STRIKE_CHUNK);
3376 			}
3377 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3378 				tp1->sent++;
3379 			}
3380 			if ((asoc->sctp_cmt_on_off == 1) &&
3381 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3382 				/*
3383 				 * CMT DAC algorithm: If SACK flag is set to
3384 				 * 0, then lowest_newack test will not pass
3385 				 * because it would have been set to the
3386 				 * cumack earlier. If not already to be
3387 				 * rtx'd, If not a mixed sack and if tp1 is
3388 				 * not between two sacked TSNs, then mark by
3389 				 * one more. NOTE that we are marking by one
3390 				 * additional time since the SACK DAC flag
3391 				 * indicates that two packets have been
3392 				 * received after this missing TSN.
3393 				 */
3394 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3395 				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3396 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3397 						sctp_log_fr(16 + num_dests_sacked,
3398 						    tp1->rec.data.TSN_seq,
3399 						    tp1->sent,
3400 						    SCTP_FR_LOG_STRIKE_CHUNK);
3401 					}
3402 					tp1->sent++;
3403 				}
3404 			}
3405 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3406 		    (asoc->sctp_cmt_on_off == 0)) {
3407 			/*
3408 			 * For those that have done a FR we must take
3409 			 * special consideration if we strike. I.e the
3410 			 * biggest_newly_acked must be higher than the
3411 			 * sending_seq at the time we did the FR.
3412 			 */
3413 			if (
3414 #ifdef SCTP_FR_TO_ALTERNATE
3415 			/*
3416 			 * If FR's go to new networks, then we must only do
3417 			 * this for singly homed asoc's. However if the FR's
3418 			 * go to the same network (Armando's work) then its
3419 			 * ok to FR multiple times.
3420 			 */
3421 			    (asoc->numnets < 2)
3422 #else
3423 			    (1)
3424 #endif
3425 			    ) {
3426 
3427 				if ((compare_with_wrap(biggest_tsn_newly_acked,
3428 				    tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3429 				    (biggest_tsn_newly_acked ==
3430 				    tp1->rec.data.fast_retran_tsn)) {
3431 					/*
3432 					 * Strike the TSN, since this ack is
3433 					 * beyond where things were when we
3434 					 * did a FR.
3435 					 */
3436 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3437 						sctp_log_fr(biggest_tsn_newly_acked,
3438 						    tp1->rec.data.TSN_seq,
3439 						    tp1->sent,
3440 						    SCTP_FR_LOG_STRIKE_CHUNK);
3441 					}
3442 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3443 						tp1->sent++;
3444 					}
3445 					strike_flag = 1;
3446 					if ((asoc->sctp_cmt_on_off == 1) &&
3447 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3448 						/*
3449 						 * CMT DAC algorithm: If
3450 						 * SACK flag is set to 0,
3451 						 * then lowest_newack test
3452 						 * will not pass because it
3453 						 * would have been set to
3454 						 * the cumack earlier. If
3455 						 * not already to be rtx'd,
3456 						 * If not a mixed sack and
3457 						 * if tp1 is not between two
3458 						 * sacked TSNs, then mark by
3459 						 * one more. NOTE that we
3460 						 * are marking by one
3461 						 * additional time since the
3462 						 * SACK DAC flag indicates
3463 						 * that two packets have
3464 						 * been received after this
3465 						 * missing TSN.
3466 						 */
3467 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3468 						    (num_dests_sacked == 1) &&
3469 						    compare_with_wrap(this_sack_lowest_newack,
3470 						    tp1->rec.data.TSN_seq, MAX_TSN)) {
3471 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3472 								sctp_log_fr(32 + num_dests_sacked,
3473 								    tp1->rec.data.TSN_seq,
3474 								    tp1->sent,
3475 								    SCTP_FR_LOG_STRIKE_CHUNK);
3476 							}
3477 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3478 								tp1->sent++;
3479 							}
3480 						}
3481 					}
3482 				}
3483 			}
3484 			/*
3485 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3486 			 * algo covers HTNA.
3487 			 */
3488 		} else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3489 		    biggest_tsn_newly_acked, MAX_TSN)) {
3490 			/*
3491 			 * We don't strike these: This is the  HTNA
3492 			 * algorithm i.e. we don't strike If our TSN is
3493 			 * larger than the Highest TSN Newly Acked.
3494 			 */
3495 			;
3496 		} else {
3497 			/* Strike the TSN */
3498 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3499 				sctp_log_fr(biggest_tsn_newly_acked,
3500 				    tp1->rec.data.TSN_seq,
3501 				    tp1->sent,
3502 				    SCTP_FR_LOG_STRIKE_CHUNK);
3503 			}
3504 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3505 				tp1->sent++;
3506 			}
3507 			if ((asoc->sctp_cmt_on_off == 1) &&
3508 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3509 				/*
3510 				 * CMT DAC algorithm: If SACK flag is set to
3511 				 * 0, then lowest_newack test will not pass
3512 				 * because it would have been set to the
3513 				 * cumack earlier. If not already to be
3514 				 * rtx'd, If not a mixed sack and if tp1 is
3515 				 * not between two sacked TSNs, then mark by
3516 				 * one more. NOTE that we are marking by one
3517 				 * additional time since the SACK DAC flag
3518 				 * indicates that two packets have been
3519 				 * received after this missing TSN.
3520 				 */
3521 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3522 				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3523 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3524 						sctp_log_fr(48 + num_dests_sacked,
3525 						    tp1->rec.data.TSN_seq,
3526 						    tp1->sent,
3527 						    SCTP_FR_LOG_STRIKE_CHUNK);
3528 					}
3529 					tp1->sent++;
3530 				}
3531 			}
3532 		}
3533 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3534 			struct sctp_nets *alt;
3535 
3536 			/* fix counts and things */
3537 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3538 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3539 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3540 				    tp1->book_size,
3541 				    (uintptr_t) tp1->whoTo,
3542 				    tp1->rec.data.TSN_seq);
3543 			}
3544 			if (tp1->whoTo) {
3545 				tp1->whoTo->net_ack++;
3546 				sctp_flight_size_decrease(tp1);
3547 			}
3548 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3549 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3550 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3551 			}
3552 			/* add back to the rwnd */
3553 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3554 
3555 			/* remove from the total flight */
3556 			sctp_total_flight_decrease(stcb, tp1);
3557 
3558 			if ((stcb->asoc.peer_supports_prsctp) &&
3559 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3560 				/*
3561 				 * Has it been retransmitted tv_sec times? -
3562 				 * we store the retran count there.
3563 				 */
3564 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3565 					/* Yes, so drop it */
3566 					if (tp1->data != NULL) {
3567 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3568 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3569 						    SCTP_SO_NOT_LOCKED);
3570 					}
3571 					/* Make sure to flag we had a FR */
3572 					tp1->whoTo->net_ack++;
3573 					tp1 = TAILQ_NEXT(tp1, sctp_next);
3574 					continue;
3575 				}
3576 			}
3577 			/* printf("OK, we are now ready to FR this guy\n"); */
3578 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3579 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3580 				    0, SCTP_FR_MARKED);
3581 			}
3582 			if (strike_flag) {
3583 				/* This is a subsequent FR */
3584 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3585 			}
3586 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3587 			if (asoc->sctp_cmt_on_off == 1) {
3588 				/*
3589 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3590 				 * If CMT is being used, then pick dest with
3591 				 * largest ssthresh for any retransmission.
3592 				 */
3593 				tp1->no_fr_allowed = 1;
3594 				alt = tp1->whoTo;
3595 				/* sa_ignore NO_NULL_CHK */
3596 				if (asoc->sctp_cmt_pf > 0) {
3597 					/*
3598 					 * JRS 5/18/07 - If CMT PF is on,
3599 					 * use the PF version of
3600 					 * find_alt_net()
3601 					 */
3602 					alt = sctp_find_alternate_net(stcb, alt, 2);
3603 				} else {
3604 					/*
3605 					 * JRS 5/18/07 - If only CMT is on,
3606 					 * use the CMT version of
3607 					 * find_alt_net()
3608 					 */
3609 					/* sa_ignore NO_NULL_CHK */
3610 					alt = sctp_find_alternate_net(stcb, alt, 1);
3611 				}
3612 				if (alt == NULL) {
3613 					alt = tp1->whoTo;
3614 				}
3615 				/*
3616 				 * CUCv2: If a different dest is picked for
3617 				 * the retransmission, then new
3618 				 * (rtx-)pseudo_cumack needs to be tracked
3619 				 * for orig dest. Let CUCv2 track new (rtx-)
3620 				 * pseudo-cumack always.
3621 				 */
3622 				if (tp1->whoTo) {
3623 					tp1->whoTo->find_pseudo_cumack = 1;
3624 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3625 				}
3626 			} else {/* CMT is OFF */
3627 
3628 #ifdef SCTP_FR_TO_ALTERNATE
3629 				/* Can we find an alternate? */
3630 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3631 #else
3632 				/*
3633 				 * default behavior is to NOT retransmit
3634 				 * FR's to an alternate. Armando Caro's
3635 				 * paper details why.
3636 				 */
3637 				alt = tp1->whoTo;
3638 #endif
3639 			}
3640 
3641 			tp1->rec.data.doing_fast_retransmit = 1;
3642 			tot_retrans++;
3643 			/* mark the sending seq for possible subsequent FR's */
3644 			/*
3645 			 * printf("Marking TSN for FR new value %x\n",
3646 			 * (uint32_t)tpi->rec.data.TSN_seq);
3647 			 */
3648 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3649 				/*
3650 				 * If the queue of send is empty then its
3651 				 * the next sequence number that will be
3652 				 * assigned so we subtract one from this to
3653 				 * get the one we last sent.
3654 				 */
3655 				tp1->rec.data.fast_retran_tsn = sending_seq;
3656 			} else {
3657 				/*
3658 				 * If there are chunks on the send queue
3659 				 * (unsent data that has made it from the
3660 				 * stream queues but not out the door, we
3661 				 * take the first one (which will have the
3662 				 * lowest TSN) and subtract one to get the
3663 				 * one we last sent.
3664 				 */
3665 				struct sctp_tmit_chunk *ttt;
3666 
3667 				ttt = TAILQ_FIRST(&asoc->send_queue);
3668 				tp1->rec.data.fast_retran_tsn =
3669 				    ttt->rec.data.TSN_seq;
3670 			}
3671 
3672 			if (tp1->do_rtt) {
3673 				/*
3674 				 * this guy had a RTO calculation pending on
3675 				 * it, cancel it
3676 				 */
3677 				tp1->do_rtt = 0;
3678 			}
3679 			if (alt != tp1->whoTo) {
3680 				/* yes, there is an alternate. */
3681 				sctp_free_remote_addr(tp1->whoTo);
3682 				/* sa_ignore FREED_MEMORY */
3683 				tp1->whoTo = alt;
3684 				atomic_add_int(&alt->ref_count, 1);
3685 			}
3686 		}
3687 		tp1 = TAILQ_NEXT(tp1, sctp_next);
3688 	}			/* while (tp1) */
3689 
3690 	if (tot_retrans > 0) {
3691 		/*
3692 		 * Setup the ecn nonce re-sync point. We do this since once
3693 		 * we go to FR something we introduce a Karn's rule scenario
3694 		 * and won't know the totals for the ECN bits.
3695 		 */
3696 		asoc->nonce_resync_tsn = sending_seq;
3697 		asoc->nonce_wait_for_ecne = 0;
3698 		asoc->nonce_sum_check = 0;
3699 	}
3700 }
3701 
3702 struct sctp_tmit_chunk *
3703 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3704     struct sctp_association *asoc)
3705 {
3706 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3707 	struct timeval now;
3708 	int now_filled = 0;
3709 
3710 	if (asoc->peer_supports_prsctp == 0) {
3711 		return (NULL);
3712 	}
3713 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3714 	while (tp1) {
3715 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3716 		    tp1->sent != SCTP_DATAGRAM_RESEND) {
3717 			/* no chance to advance, out of here */
3718 			break;
3719 		}
3720 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3721 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3722 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3723 				    asoc->advanced_peer_ack_point,
3724 				    tp1->rec.data.TSN_seq, 0, 0);
3725 			}
3726 		}
3727 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3728 			/*
3729 			 * We can't fwd-tsn past any that are reliable aka
3730 			 * retransmitted until the asoc fails.
3731 			 */
3732 			break;
3733 		}
3734 		if (!now_filled) {
3735 			(void)SCTP_GETTIME_TIMEVAL(&now);
3736 			now_filled = 1;
3737 		}
3738 		tp2 = TAILQ_NEXT(tp1, sctp_next);
3739 		/*
3740 		 * now we got a chunk which is marked for another
3741 		 * retransmission to a PR-stream but has run out its chances
3742 		 * already maybe OR has been marked to skip now. Can we skip
3743 		 * it if its a resend?
3744 		 */
3745 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3746 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3747 			/*
3748 			 * Now is this one marked for resend and its time is
3749 			 * now up?
3750 			 */
3751 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3752 				/* Yes so drop it */
3753 				if (tp1->data) {
3754 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3755 					    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3756 					    SCTP_SO_NOT_LOCKED);
3757 				}
3758 			} else {
3759 				/*
3760 				 * No, we are done when hit one for resend
3761 				 * whos time as not expired.
3762 				 */
3763 				break;
3764 			}
3765 		}
3766 		/*
3767 		 * Ok now if this chunk is marked to drop it we can clean up
3768 		 * the chunk, advance our peer ack point and we can check
3769 		 * the next chunk.
3770 		 */
3771 		if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3772 			/* advance PeerAckPoint goes forward */
3773 			if (compare_with_wrap(tp1->rec.data.TSN_seq,
3774 			    asoc->advanced_peer_ack_point,
3775 			    MAX_TSN)) {
3776 
3777 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3778 				a_adv = tp1;
3779 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3780 				/* No update but we do save the chk */
3781 				a_adv = tp1;
3782 			}
3783 		} else {
3784 			/*
3785 			 * If it is still in RESEND we can advance no
3786 			 * further
3787 			 */
3788 			break;
3789 		}
3790 		/*
3791 		 * If we hit here we just dumped tp1, move to next tsn on
3792 		 * sent queue.
3793 		 */
3794 		tp1 = tp2;
3795 	}
3796 	return (a_adv);
3797 }
3798 
3799 static int
3800 sctp_fs_audit(struct sctp_association *asoc)
3801 {
3802 	struct sctp_tmit_chunk *chk;
3803 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3804 	int entry_flight, entry_cnt, ret;
3805 
3806 	entry_flight = asoc->total_flight;
3807 	entry_cnt = asoc->total_flight_count;
3808 	ret = 0;
3809 
3810 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3811 		return (0);
3812 
3813 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3814 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3815 			printf("Chk TSN:%u size:%d inflight cnt:%d\n",
3816 			    chk->rec.data.TSN_seq,
3817 			    chk->send_size,
3818 			    chk->snd_count
3819 			    );
3820 			inflight++;
3821 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3822 			resend++;
3823 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3824 			inbetween++;
3825 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3826 			above++;
3827 		} else {
3828 			acked++;
3829 		}
3830 	}
3831 
3832 	if ((inflight > 0) || (inbetween > 0)) {
3833 #ifdef INVARIANTS
3834 		panic("Flight size-express incorrect? \n");
3835 #else
3836 		printf("asoc->total_flight:%d cnt:%d\n",
3837 		    entry_flight, entry_cnt);
3838 
3839 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3840 		    inflight, inbetween, resend, above, acked);
3841 		ret = 1;
3842 #endif
3843 	}
3844 	return (ret);
3845 }
3846 
3847 
3848 static void
3849 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3850     struct sctp_association *asoc,
3851     struct sctp_nets *net,
3852     struct sctp_tmit_chunk *tp1)
3853 {
3854 	tp1->window_probe = 0;
3855 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3856 		/* TSN's skipped we do NOT move back. */
3857 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3858 		    tp1->whoTo->flight_size,
3859 		    tp1->book_size,
3860 		    (uintptr_t) tp1->whoTo,
3861 		    tp1->rec.data.TSN_seq);
3862 		return;
3863 	}
3864 	/* First setup this by shrinking flight */
3865 	sctp_flight_size_decrease(tp1);
3866 	sctp_total_flight_decrease(stcb, tp1);
3867 	/* Now mark for resend */
3868 	tp1->sent = SCTP_DATAGRAM_RESEND;
3869 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3870 
3871 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3872 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3873 		    tp1->whoTo->flight_size,
3874 		    tp1->book_size,
3875 		    (uintptr_t) tp1->whoTo,
3876 		    tp1->rec.data.TSN_seq);
3877 	}
3878 }
3879 
3880 void
3881 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3882     uint32_t rwnd, int nonce_sum_flag, int *abort_now)
3883 {
3884 	struct sctp_nets *net;
3885 	struct sctp_association *asoc;
3886 	struct sctp_tmit_chunk *tp1, *tp2;
3887 	uint32_t old_rwnd;
3888 	int win_probe_recovery = 0;
3889 	int win_probe_recovered = 0;
3890 	int j, done_once = 0;
3891 
3892 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3893 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3894 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3895 	}
3896 	SCTP_TCB_LOCK_ASSERT(stcb);
3897 #ifdef SCTP_ASOCLOG_OF_TSNS
3898 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3899 	stcb->asoc.cumack_log_at++;
3900 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3901 		stcb->asoc.cumack_log_at = 0;
3902 	}
3903 #endif
3904 	asoc = &stcb->asoc;
3905 	old_rwnd = asoc->peers_rwnd;
3906 	if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
3907 		/* old ack */
3908 		return;
3909 	} else if (asoc->last_acked_seq == cumack) {
3910 		/* Window update sack */
3911 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3912 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3913 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3914 			/* SWS sender side engages */
3915 			asoc->peers_rwnd = 0;
3916 		}
3917 		if (asoc->peers_rwnd > old_rwnd) {
3918 			goto again;
3919 		}
3920 		return;
3921 	}
3922 	/* First setup for CC stuff */
3923 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3924 		net->prev_cwnd = net->cwnd;
3925 		net->net_ack = 0;
3926 		net->net_ack2 = 0;
3927 
3928 		/*
3929 		 * CMT: Reset CUC and Fast recovery algo variables before
3930 		 * SACK processing
3931 		 */
3932 		net->new_pseudo_cumack = 0;
3933 		net->will_exit_fast_recovery = 0;
3934 	}
3935 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3936 		uint32_t send_s;
3937 
3938 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3939 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3940 			    sctpchunk_listhead);
3941 			send_s = tp1->rec.data.TSN_seq + 1;
3942 		} else {
3943 			send_s = asoc->sending_seq;
3944 		}
3945 		if ((cumack == send_s) ||
3946 		    compare_with_wrap(cumack, send_s, MAX_TSN)) {
3947 #ifndef INVARIANTS
3948 			struct mbuf *oper;
3949 
3950 #endif
3951 #ifdef INVARIANTS
3952 			panic("Impossible sack 1");
3953 #else
3954 
3955 			*abort_now = 1;
3956 			/* XXX */
3957 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3958 			    0, M_DONTWAIT, 1, MT_DATA);
3959 			if (oper) {
3960 				struct sctp_paramhdr *ph;
3961 				uint32_t *ippp;
3962 
3963 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3964 				    sizeof(uint32_t);
3965 				ph = mtod(oper, struct sctp_paramhdr *);
3966 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3967 				ph->param_length = htons(SCTP_BUF_LEN(oper));
3968 				ippp = (uint32_t *) (ph + 1);
3969 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3970 			}
3971 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3972 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
3973 			return;
3974 #endif
3975 		}
3976 	}
3977 	asoc->this_sack_highest_gap = cumack;
3978 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3979 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3980 		    stcb->asoc.overall_error_count,
3981 		    0,
3982 		    SCTP_FROM_SCTP_INDATA,
3983 		    __LINE__);
3984 	}
3985 	stcb->asoc.overall_error_count = 0;
3986 	if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
3987 		/* process the new consecutive TSN first */
3988 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
3989 		while (tp1) {
3990 			tp2 = TAILQ_NEXT(tp1, sctp_next);
3991 			if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
3992 			    MAX_TSN) ||
3993 			    cumack == tp1->rec.data.TSN_seq) {
3994 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3995 					printf("Warning, an unsent is now acked?\n");
3996 				}
3997 				/*
3998 				 * ECN Nonce: Add the nonce to the sender's
3999 				 * nonce sum
4000 				 */
4001 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4002 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4003 					/*
4004 					 * If it is less than ACKED, it is
4005 					 * now no-longer in flight. Higher
4006 					 * values may occur during marking
4007 					 */
4008 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4009 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4010 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4011 							    tp1->whoTo->flight_size,
4012 							    tp1->book_size,
4013 							    (uintptr_t) tp1->whoTo,
4014 							    tp1->rec.data.TSN_seq);
4015 						}
4016 						sctp_flight_size_decrease(tp1);
4017 						/* sa_ignore NO_NULL_CHK */
4018 						sctp_total_flight_decrease(stcb, tp1);
4019 					}
4020 					tp1->whoTo->net_ack += tp1->send_size;
4021 					if (tp1->snd_count < 2) {
4022 						/*
4023 						 * True non-retransmited
4024 						 * chunk
4025 						 */
4026 						tp1->whoTo->net_ack2 +=
4027 						    tp1->send_size;
4028 
4029 						/* update RTO too? */
4030 						if (tp1->do_rtt) {
4031 							tp1->whoTo->RTO =
4032 							/*
4033 							 * sa_ignore
4034 							 * NO_NULL_CHK
4035 							 */
4036 							    sctp_calculate_rto(stcb,
4037 							    asoc, tp1->whoTo,
4038 							    &tp1->sent_rcv_time,
4039 							    sctp_align_safe_nocopy);
4040 							tp1->do_rtt = 0;
4041 						}
4042 					}
4043 					/*
4044 					 * CMT: CUCv2 algorithm. From the
4045 					 * cumack'd TSNs, for each TSN being
4046 					 * acked for the first time, set the
4047 					 * following variables for the
4048 					 * corresp destination.
4049 					 * new_pseudo_cumack will trigger a
4050 					 * cwnd update.
4051 					 * find_(rtx_)pseudo_cumack will
4052 					 * trigger search for the next
4053 					 * expected (rtx-)pseudo-cumack.
4054 					 */
4055 					tp1->whoTo->new_pseudo_cumack = 1;
4056 					tp1->whoTo->find_pseudo_cumack = 1;
4057 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4058 
4059 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4060 						/* sa_ignore NO_NULL_CHK */
4061 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4062 					}
4063 				}
4064 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4065 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4066 				}
4067 				if (tp1->rec.data.chunk_was_revoked) {
4068 					/* deflate the cwnd */
4069 					tp1->whoTo->cwnd -= tp1->book_size;
4070 					tp1->rec.data.chunk_was_revoked = 0;
4071 				}
4072 				tp1->sent = SCTP_DATAGRAM_ACKED;
4073 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4074 				if (tp1->data) {
4075 					/* sa_ignore NO_NULL_CHK */
4076 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4077 					sctp_m_freem(tp1->data);
4078 				}
4079 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4080 					sctp_log_sack(asoc->last_acked_seq,
4081 					    cumack,
4082 					    tp1->rec.data.TSN_seq,
4083 					    0,
4084 					    0,
4085 					    SCTP_LOG_FREE_SENT);
4086 				}
4087 				tp1->data = NULL;
4088 				asoc->sent_queue_cnt--;
4089 				sctp_free_a_chunk(stcb, tp1);
4090 				tp1 = tp2;
4091 			} else {
4092 				break;
4093 			}
4094 		}
4095 
4096 	}
4097 	/* sa_ignore NO_NULL_CHK */
4098 	if (stcb->sctp_socket) {
4099 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4100 		struct socket *so;
4101 
4102 #endif
4103 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4104 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4105 			/* sa_ignore NO_NULL_CHK */
4106 			sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4107 		}
4108 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4109 		so = SCTP_INP_SO(stcb->sctp_ep);
4110 		atomic_add_int(&stcb->asoc.refcnt, 1);
4111 		SCTP_TCB_UNLOCK(stcb);
4112 		SCTP_SOCKET_LOCK(so, 1);
4113 		SCTP_TCB_LOCK(stcb);
4114 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4115 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4116 			/* assoc was freed while we were unlocked */
4117 			SCTP_SOCKET_UNLOCK(so, 1);
4118 			return;
4119 		}
4120 #endif
4121 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4122 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4123 		SCTP_SOCKET_UNLOCK(so, 1);
4124 #endif
4125 	} else {
4126 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4127 			sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4128 		}
4129 	}
4130 
4131 	/* JRS - Use the congestion control given in the CC module */
4132 	if (asoc->last_acked_seq != cumack)
4133 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4134 
4135 	asoc->last_acked_seq = cumack;
4136 
4137 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4138 		/* nothing left in-flight */
4139 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4140 			net->flight_size = 0;
4141 			net->partial_bytes_acked = 0;
4142 		}
4143 		asoc->total_flight = 0;
4144 		asoc->total_flight_count = 0;
4145 	}
4146 	/* ECN Nonce updates */
4147 	if (asoc->ecn_nonce_allowed) {
4148 		if (asoc->nonce_sum_check) {
4149 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4150 				if (asoc->nonce_wait_for_ecne == 0) {
4151 					struct sctp_tmit_chunk *lchk;
4152 
4153 					lchk = TAILQ_FIRST(&asoc->send_queue);
4154 					asoc->nonce_wait_for_ecne = 1;
4155 					if (lchk) {
4156 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4157 					} else {
4158 						asoc->nonce_wait_tsn = asoc->sending_seq;
4159 					}
4160 				} else {
4161 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4162 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4163 						/*
4164 						 * Misbehaving peer. We need
4165 						 * to react to this guy
4166 						 */
4167 						asoc->ecn_allowed = 0;
4168 						asoc->ecn_nonce_allowed = 0;
4169 					}
4170 				}
4171 			}
4172 		} else {
4173 			/* See if Resynchronization Possible */
4174 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4175 				asoc->nonce_sum_check = 1;
4176 				/*
4177 				 * Now we must calculate what the base is.
4178 				 * We do this based on two things, we know
4179 				 * the total's for all the segments
4180 				 * gap-acked in the SACK (none). We also
4181 				 * know the SACK's nonce sum, its in
4182 				 * nonce_sum_flag. So we can build a truth
4183 				 * table to back-calculate the new value of
4184 				 * asoc->nonce_sum_expect_base:
4185 				 *
4186 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
4187 				 * 1                    0 1 0 1 1 1
4188 				 * 1 0
4189 				 */
4190 				asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4191 			}
4192 		}
4193 	}
4194 	/* RWND update */
4195 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4196 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4197 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4198 		/* SWS sender side engages */
4199 		asoc->peers_rwnd = 0;
4200 	}
4201 	if (asoc->peers_rwnd > old_rwnd) {
4202 		win_probe_recovery = 1;
4203 	}
4204 	/* Now assure a timer where data is queued at */
4205 again:
4206 	j = 0;
4207 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4208 		int to_ticks;
4209 
4210 		if (win_probe_recovery && (net->window_probe)) {
4211 			win_probe_recovered = 1;
4212 			/*
4213 			 * Find first chunk that was used with window probe
4214 			 * and clear the sent
4215 			 */
4216 			/* sa_ignore FREED_MEMORY */
4217 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4218 				if (tp1->window_probe) {
4219 					/* move back to data send queue */
4220 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
4221 					break;
4222 				}
4223 			}
4224 		}
4225 		if (net->RTO == 0) {
4226 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4227 		} else {
4228 			to_ticks = MSEC_TO_TICKS(net->RTO);
4229 		}
4230 		if (net->flight_size) {
4231 			j++;
4232 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4233 			    sctp_timeout_handler, &net->rxt_timer);
4234 			if (net->window_probe) {
4235 				net->window_probe = 0;
4236 			}
4237 		} else {
4238 			if (net->window_probe) {
4239 				/*
4240 				 * In window probes we must assure a timer
4241 				 * is still running there
4242 				 */
4243 				net->window_probe = 0;
4244 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4245 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4246 					    sctp_timeout_handler, &net->rxt_timer);
4247 				}
4248 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4249 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4250 				    stcb, net,
4251 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4252 			}
4253 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4254 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4255 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4256 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4257 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4258 				}
4259 			}
4260 		}
4261 	}
4262 	if ((j == 0) &&
4263 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4264 	    (asoc->sent_queue_retran_cnt == 0) &&
4265 	    (win_probe_recovered == 0) &&
4266 	    (done_once == 0)) {
4267 		/*
4268 		 * huh, this should not happen unless all packets are
4269 		 * PR-SCTP and marked to skip of course.
4270 		 */
4271 		if (sctp_fs_audit(asoc)) {
4272 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4273 				net->flight_size = 0;
4274 			}
4275 			asoc->total_flight = 0;
4276 			asoc->total_flight_count = 0;
4277 			asoc->sent_queue_retran_cnt = 0;
4278 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4279 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4280 					sctp_flight_size_increase(tp1);
4281 					sctp_total_flight_increase(stcb, tp1);
4282 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4283 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4284 				}
4285 			}
4286 		}
4287 		done_once = 1;
4288 		goto again;
4289 	}
4290 	/**********************************/
4291 	/* Now what about shutdown issues */
4292 	/**********************************/
4293 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4294 		/* nothing left on sendqueue.. consider done */
4295 		/* clean up */
4296 		if ((asoc->stream_queue_cnt == 1) &&
4297 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4298 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4299 		    (asoc->locked_on_sending)
4300 		    ) {
4301 			struct sctp_stream_queue_pending *sp;
4302 
4303 			/*
4304 			 * I may be in a state where we got all across.. but
4305 			 * cannot write more due to a shutdown... we abort
4306 			 * since the user did not indicate EOR in this case.
4307 			 * The sp will be cleaned during free of the asoc.
4308 			 */
4309 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4310 			    sctp_streamhead);
4311 			if ((sp) && (sp->length == 0)) {
4312 				/* Let cleanup code purge it */
4313 				if (sp->msg_is_complete) {
4314 					asoc->stream_queue_cnt--;
4315 				} else {
4316 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4317 					asoc->locked_on_sending = NULL;
4318 					asoc->stream_queue_cnt--;
4319 				}
4320 			}
4321 		}
4322 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4323 		    (asoc->stream_queue_cnt == 0)) {
4324 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4325 				/* Need to abort here */
4326 				struct mbuf *oper;
4327 
4328 		abort_out_now:
4329 				*abort_now = 1;
4330 				/* XXX */
4331 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4332 				    0, M_DONTWAIT, 1, MT_DATA);
4333 				if (oper) {
4334 					struct sctp_paramhdr *ph;
4335 					uint32_t *ippp;
4336 
4337 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4338 					    sizeof(uint32_t);
4339 					ph = mtod(oper, struct sctp_paramhdr *);
4340 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4341 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4342 					ippp = (uint32_t *) (ph + 1);
4343 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4344 				}
4345 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4346 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4347 			} else {
4348 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4349 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4350 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4351 				}
4352 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4353 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4354 				sctp_stop_timers_for_shutdown(stcb);
4355 				sctp_send_shutdown(stcb,
4356 				    stcb->asoc.primary_destination);
4357 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4358 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4359 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4360 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4361 			}
4362 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4363 		    (asoc->stream_queue_cnt == 0)) {
4364 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4365 				goto abort_out_now;
4366 			}
4367 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4368 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4369 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4370 			sctp_send_shutdown_ack(stcb,
4371 			    stcb->asoc.primary_destination);
4372 			sctp_stop_timers_for_shutdown(stcb);
4373 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4374 			    stcb->sctp_ep, stcb, asoc->primary_destination);
4375 		}
4376 	}
4377 	/*********************************************/
4378 	/* Here we perform PR-SCTP procedures        */
4379 	/* (section 4.2)                             */
4380 	/*********************************************/
4381 	/* C1. update advancedPeerAckPoint */
4382 	if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4383 		asoc->advanced_peer_ack_point = cumack;
4384 	}
4385 	/* PR-Sctp issues need to be addressed too */
4386 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4387 		struct sctp_tmit_chunk *lchk;
4388 		uint32_t old_adv_peer_ack_point;
4389 
4390 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4391 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4392 		/* C3. See if we need to send a Fwd-TSN */
4393 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cumack,
4394 		    MAX_TSN)) {
4395 			/*
4396 			 * ISSUE with ECN, see FWD-TSN processing for notes
4397 			 * on issues that will occur when the ECN NONCE
4398 			 * stuff is put into SCTP for cross checking.
4399 			 */
4400 			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
4401 			    MAX_TSN)) {
4402 				send_forward_tsn(stcb, asoc);
4403 				/*
4404 				 * ECN Nonce: Disable Nonce Sum check when
4405 				 * FWD TSN is sent and store resync tsn
4406 				 */
4407 				asoc->nonce_sum_check = 0;
4408 				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4409 			} else if (lchk) {
4410 				/* try to FR fwd-tsn's that get lost too */
4411 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4412 					send_forward_tsn(stcb, asoc);
4413 				}
4414 			}
4415 		}
4416 		if (lchk) {
4417 			/* Assure a timer is up */
4418 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4419 			    stcb->sctp_ep, stcb, lchk->whoTo);
4420 		}
4421 	}
4422 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4423 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4424 		    rwnd,
4425 		    stcb->asoc.peers_rwnd,
4426 		    stcb->asoc.total_flight,
4427 		    stcb->asoc.total_output_queue_size);
4428 	}
4429 }
4430 
4431 void
4432 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4433     struct sctp_tcb *stcb, struct sctp_nets *net_from,
4434     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4435     int *abort_now, uint8_t flags,
4436     uint32_t cum_ack, uint32_t rwnd)
4437 {
4438 	struct sctp_association *asoc;
4439 	struct sctp_tmit_chunk *tp1, *tp2;
4440 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4441 	uint32_t sav_cum_ack;
4442 	uint16_t wake_him = 0;
4443 	uint32_t send_s = 0;
4444 	long j;
4445 	int accum_moved = 0;
4446 	int will_exit_fast_recovery = 0;
4447 	uint32_t a_rwnd, old_rwnd;
4448 	int win_probe_recovery = 0;
4449 	int win_probe_recovered = 0;
4450 	struct sctp_nets *net = NULL;
4451 	int nonce_sum_flag, ecn_seg_sums = 0;
4452 	int done_once;
4453 	uint8_t reneged_all = 0;
4454 	uint8_t cmt_dac_flag;
4455 
4456 	/*
4457 	 * we take any chance we can to service our queues since we cannot
4458 	 * get awoken when the socket is read from :<
4459 	 */
4460 	/*
4461 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4462 	 * old sack, if so discard. 2) If there is nothing left in the send
4463 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4464 	 * too, update any rwnd change and verify no timers are running.
4465 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4466 	 * moved process these first and note that it moved. 4) Process any
4467 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4468 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4469 	 * sync up flightsizes and things, stop all timers and also check
4470 	 * for shutdown_pending state. If so then go ahead and send off the
4471 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4472 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4473 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4474 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4475 	 * if in shutdown_recv state.
4476 	 */
4477 	SCTP_TCB_LOCK_ASSERT(stcb);
4478 	/* CMT DAC algo */
4479 	this_sack_lowest_newack = 0;
4480 	j = 0;
4481 	SCTP_STAT_INCR(sctps_slowpath_sack);
4482 	last_tsn = cum_ack;
4483 	nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
4484 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4485 #ifdef SCTP_ASOCLOG_OF_TSNS
4486 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4487 	stcb->asoc.cumack_log_at++;
4488 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4489 		stcb->asoc.cumack_log_at = 0;
4490 	}
4491 #endif
4492 	a_rwnd = rwnd;
4493 
4494 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4495 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4496 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4497 	}
4498 	old_rwnd = stcb->asoc.peers_rwnd;
4499 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4500 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4501 		    stcb->asoc.overall_error_count,
4502 		    0,
4503 		    SCTP_FROM_SCTP_INDATA,
4504 		    __LINE__);
4505 	}
4506 	stcb->asoc.overall_error_count = 0;
4507 	asoc = &stcb->asoc;
4508 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4509 		sctp_log_sack(asoc->last_acked_seq,
4510 		    cum_ack,
4511 		    0,
4512 		    num_seg,
4513 		    num_dup,
4514 		    SCTP_LOG_NEW_SACK);
4515 	}
4516 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4517 		uint16_t i;
4518 		uint32_t *dupdata, dblock;
4519 
4520 		for (i = 0; i < num_dup; i++) {
4521 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4522 			    sizeof(uint32_t), (uint8_t *) & dblock);
4523 			if (dupdata == NULL) {
4524 				break;
4525 			}
4526 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4527 		}
4528 	}
4529 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4530 		/* reality check */
4531 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4532 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4533 			    sctpchunk_listhead);
4534 			send_s = tp1->rec.data.TSN_seq + 1;
4535 		} else {
4536 			tp1 = NULL;
4537 			send_s = asoc->sending_seq;
4538 		}
4539 		if (cum_ack == send_s ||
4540 		    compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4541 			struct mbuf *oper;
4542 
4543 			/*
4544 			 * no way, we have not even sent this TSN out yet.
4545 			 * Peer is hopelessly messed up with us.
4546 			 */
4547 			printf("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4548 			    cum_ack, send_s);
4549 			if (tp1) {
4550 				printf("Got send_s from tsn:%x + 1 of tp1:%p\n",
4551 				    tp1->rec.data.TSN_seq, tp1);
4552 			}
4553 	hopeless_peer:
4554 			*abort_now = 1;
4555 			/* XXX */
4556 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4557 			    0, M_DONTWAIT, 1, MT_DATA);
4558 			if (oper) {
4559 				struct sctp_paramhdr *ph;
4560 				uint32_t *ippp;
4561 
4562 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4563 				    sizeof(uint32_t);
4564 				ph = mtod(oper, struct sctp_paramhdr *);
4565 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4566 				ph->param_length = htons(SCTP_BUF_LEN(oper));
4567 				ippp = (uint32_t *) (ph + 1);
4568 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4569 			}
4570 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4571 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4572 			return;
4573 		}
4574 	}
4575 	/**********************/
4576 	/* 1) check the range */
4577 	/**********************/
4578 	if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4579 		/* acking something behind */
4580 		return;
4581 	}
4582 	sav_cum_ack = asoc->last_acked_seq;
4583 
4584 	/* update the Rwnd of the peer */
4585 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4586 	    TAILQ_EMPTY(&asoc->send_queue) &&
4587 	    (asoc->stream_queue_cnt == 0)) {
4588 		/* nothing left on send/sent and strmq */
4589 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4590 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4591 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4592 		}
4593 		asoc->peers_rwnd = a_rwnd;
4594 		if (asoc->sent_queue_retran_cnt) {
4595 			asoc->sent_queue_retran_cnt = 0;
4596 		}
4597 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4598 			/* SWS sender side engages */
4599 			asoc->peers_rwnd = 0;
4600 		}
4601 		/* stop any timers */
4602 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4603 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4604 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4605 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4606 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4607 					SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4608 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4609 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4610 				}
4611 			}
4612 			net->partial_bytes_acked = 0;
4613 			net->flight_size = 0;
4614 		}
4615 		asoc->total_flight = 0;
4616 		asoc->total_flight_count = 0;
4617 		return;
4618 	}
4619 	/*
4620 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4621 	 * things. The total byte count acked is tracked in netAckSz AND
4622 	 * netAck2 is used to track the total bytes acked that are un-
4623 	 * amibguious and were never retransmitted. We track these on a per
4624 	 * destination address basis.
4625 	 */
4626 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4627 		net->prev_cwnd = net->cwnd;
4628 		net->net_ack = 0;
4629 		net->net_ack2 = 0;
4630 
4631 		/*
4632 		 * CMT: Reset CUC and Fast recovery algo variables before
4633 		 * SACK processing
4634 		 */
4635 		net->new_pseudo_cumack = 0;
4636 		net->will_exit_fast_recovery = 0;
4637 	}
4638 	/* process the new consecutive TSN first */
4639 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4640 	while (tp1) {
4641 		if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
4642 		    MAX_TSN) ||
4643 		    last_tsn == tp1->rec.data.TSN_seq) {
4644 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4645 				/*
4646 				 * ECN Nonce: Add the nonce to the sender's
4647 				 * nonce sum
4648 				 */
4649 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4650 				accum_moved = 1;
4651 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4652 					/*
4653 					 * If it is less than ACKED, it is
4654 					 * now no-longer in flight. Higher
4655 					 * values may occur during marking
4656 					 */
4657 					if ((tp1->whoTo->dest_state &
4658 					    SCTP_ADDR_UNCONFIRMED) &&
4659 					    (tp1->snd_count < 2)) {
4660 						/*
4661 						 * If there was no retran
4662 						 * and the address is
4663 						 * un-confirmed and we sent
4664 						 * there and are now
4665 						 * sacked.. its confirmed,
4666 						 * mark it so.
4667 						 */
4668 						tp1->whoTo->dest_state &=
4669 						    ~SCTP_ADDR_UNCONFIRMED;
4670 					}
4671 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4672 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4673 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4674 							    tp1->whoTo->flight_size,
4675 							    tp1->book_size,
4676 							    (uintptr_t) tp1->whoTo,
4677 							    tp1->rec.data.TSN_seq);
4678 						}
4679 						sctp_flight_size_decrease(tp1);
4680 						sctp_total_flight_decrease(stcb, tp1);
4681 					}
4682 					tp1->whoTo->net_ack += tp1->send_size;
4683 
4684 					/* CMT SFR and DAC algos */
4685 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4686 					tp1->whoTo->saw_newack = 1;
4687 
4688 					if (tp1->snd_count < 2) {
4689 						/*
4690 						 * True non-retransmited
4691 						 * chunk
4692 						 */
4693 						tp1->whoTo->net_ack2 +=
4694 						    tp1->send_size;
4695 
4696 						/* update RTO too? */
4697 						if (tp1->do_rtt) {
4698 							tp1->whoTo->RTO =
4699 							    sctp_calculate_rto(stcb,
4700 							    asoc, tp1->whoTo,
4701 							    &tp1->sent_rcv_time,
4702 							    sctp_align_safe_nocopy);
4703 							tp1->do_rtt = 0;
4704 						}
4705 					}
4706 					/*
4707 					 * CMT: CUCv2 algorithm. From the
4708 					 * cumack'd TSNs, for each TSN being
4709 					 * acked for the first time, set the
4710 					 * following variables for the
4711 					 * corresp destination.
4712 					 * new_pseudo_cumack will trigger a
4713 					 * cwnd update.
4714 					 * find_(rtx_)pseudo_cumack will
4715 					 * trigger search for the next
4716 					 * expected (rtx-)pseudo-cumack.
4717 					 */
4718 					tp1->whoTo->new_pseudo_cumack = 1;
4719 					tp1->whoTo->find_pseudo_cumack = 1;
4720 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4721 
4722 
4723 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4724 						sctp_log_sack(asoc->last_acked_seq,
4725 						    cum_ack,
4726 						    tp1->rec.data.TSN_seq,
4727 						    0,
4728 						    0,
4729 						    SCTP_LOG_TSN_ACKED);
4730 					}
4731 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4732 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4733 					}
4734 				}
4735 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4736 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4737 #ifdef SCTP_AUDITING_ENABLED
4738 					sctp_audit_log(0xB3,
4739 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4740 #endif
4741 				}
4742 				if (tp1->rec.data.chunk_was_revoked) {
4743 					/* deflate the cwnd */
4744 					tp1->whoTo->cwnd -= tp1->book_size;
4745 					tp1->rec.data.chunk_was_revoked = 0;
4746 				}
4747 				tp1->sent = SCTP_DATAGRAM_ACKED;
4748 			}
4749 		} else {
4750 			break;
4751 		}
4752 		tp1 = TAILQ_NEXT(tp1, sctp_next);
4753 	}
4754 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4755 	/* always set this up to cum-ack */
4756 	asoc->this_sack_highest_gap = last_tsn;
4757 
4758 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4759 
4760 		/*
4761 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4762 		 * to be greater than the cumack. Also reset saw_newack to 0
4763 		 * for all dests.
4764 		 */
4765 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4766 			net->saw_newack = 0;
4767 			net->this_sack_highest_newack = last_tsn;
4768 		}
4769 
4770 		/*
4771 		 * thisSackHighestGap will increase while handling NEW
4772 		 * segments this_sack_highest_newack will increase while
4773 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4774 		 * used for CMT DAC algo. saw_newack will also change.
4775 		 */
4776 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4777 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4778 		    num_seg, num_nr_seg, &ecn_seg_sums)) {
4779 			wake_him++;
4780 		}
4781 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4782 			/*
4783 			 * validate the biggest_tsn_acked in the gap acks if
4784 			 * strict adherence is wanted.
4785 			 */
4786 			if ((biggest_tsn_acked == send_s) ||
4787 			    (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
4788 				/*
4789 				 * peer is either confused or we are under
4790 				 * attack. We must abort.
4791 				 */
4792 				printf("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4793 				    biggest_tsn_acked,
4794 				    send_s);
4795 
4796 				goto hopeless_peer;
4797 			}
4798 		}
4799 	}
4800 	/*******************************************/
4801 	/* cancel ALL T3-send timer if accum moved */
4802 	/*******************************************/
4803 	if (asoc->sctp_cmt_on_off == 1) {
4804 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4805 			if (net->new_pseudo_cumack)
4806 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4807 				    stcb, net,
4808 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4809 
4810 		}
4811 	} else {
4812 		if (accum_moved) {
4813 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4814 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4815 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4816 			}
4817 		}
4818 	}
4819 	/********************************************/
4820 	/* drop the acked chunks from the sendqueue */
4821 	/********************************************/
4822 	asoc->last_acked_seq = cum_ack;
4823 
4824 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4825 	if (tp1 == NULL)
4826 		goto done_with_it;
4827 	do {
4828 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
4829 		    MAX_TSN)) {
4830 			break;
4831 		}
4832 		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4833 			/* no more sent on list */
4834 			printf("Warning, tp1->sent == %d and its now acked?\n",
4835 			    tp1->sent);
4836 		}
4837 		tp2 = TAILQ_NEXT(tp1, sctp_next);
4838 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4839 		if (tp1->pr_sctp_on) {
4840 			if (asoc->pr_sctp_cnt != 0)
4841 				asoc->pr_sctp_cnt--;
4842 		}
4843 		if (TAILQ_EMPTY(&asoc->sent_queue) &&
4844 		    (asoc->total_flight > 0)) {
4845 #ifdef INVARIANTS
4846 			panic("Warning flight size is postive and should be 0");
4847 #else
4848 			SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4849 			    asoc->total_flight);
4850 #endif
4851 			asoc->total_flight = 0;
4852 		}
4853 		if (tp1->data) {
4854 			/* sa_ignore NO_NULL_CHK */
4855 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4856 			sctp_m_freem(tp1->data);
4857 			if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4858 				asoc->sent_queue_cnt_removeable--;
4859 			}
4860 		}
4861 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4862 			sctp_log_sack(asoc->last_acked_seq,
4863 			    cum_ack,
4864 			    tp1->rec.data.TSN_seq,
4865 			    0,
4866 			    0,
4867 			    SCTP_LOG_FREE_SENT);
4868 		}
4869 		tp1->data = NULL;
4870 		asoc->sent_queue_cnt--;
4871 		sctp_free_a_chunk(stcb, tp1);
4872 		wake_him++;
4873 		tp1 = tp2;
4874 	} while (tp1 != NULL);
4875 
4876 done_with_it:
4877 	/* sa_ignore NO_NULL_CHK */
4878 	if ((wake_him) && (stcb->sctp_socket)) {
4879 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4880 		struct socket *so;
4881 
4882 #endif
4883 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4884 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4885 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
4886 		}
4887 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4888 		so = SCTP_INP_SO(stcb->sctp_ep);
4889 		atomic_add_int(&stcb->asoc.refcnt, 1);
4890 		SCTP_TCB_UNLOCK(stcb);
4891 		SCTP_SOCKET_LOCK(so, 1);
4892 		SCTP_TCB_LOCK(stcb);
4893 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4894 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4895 			/* assoc was freed while we were unlocked */
4896 			SCTP_SOCKET_UNLOCK(so, 1);
4897 			return;
4898 		}
4899 #endif
4900 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4901 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4902 		SCTP_SOCKET_UNLOCK(so, 1);
4903 #endif
4904 	} else {
4905 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4906 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
4907 		}
4908 	}
4909 
4910 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4911 		if (compare_with_wrap(asoc->last_acked_seq,
4912 		    asoc->fast_recovery_tsn, MAX_TSN) ||
4913 		    asoc->last_acked_seq == asoc->fast_recovery_tsn) {
4914 			/* Setup so we will exit RFC2582 fast recovery */
4915 			will_exit_fast_recovery = 1;
4916 		}
4917 	}
4918 	/*
4919 	 * Check for revoked fragments:
4920 	 *
4921 	 * if Previous sack - Had no frags then we can't have any revoked if
4922 	 * Previous sack - Had frag's then - If we now have frags aka
4923 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4924 	 * some of them. else - The peer revoked all ACKED fragments, since
4925 	 * we had some before and now we have NONE.
4926 	 */
4927 
4928 	if (num_seg)
4929 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4930 	else if (asoc->saw_sack_with_frags) {
4931 		int cnt_revoked = 0;
4932 
4933 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
4934 		if (tp1 != NULL) {
4935 			/* Peer revoked all dg's marked or acked */
4936 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4937 				if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4938 					tp1->sent = SCTP_DATAGRAM_SENT;
4939 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4940 						sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4941 						    tp1->whoTo->flight_size,
4942 						    tp1->book_size,
4943 						    (uintptr_t) tp1->whoTo,
4944 						    tp1->rec.data.TSN_seq);
4945 					}
4946 					sctp_flight_size_increase(tp1);
4947 					sctp_total_flight_increase(stcb, tp1);
4948 					tp1->rec.data.chunk_was_revoked = 1;
4949 					/*
4950 					 * To ensure that this increase in
4951 					 * flightsize, which is artificial,
4952 					 * does not throttle the sender, we
4953 					 * also increase the cwnd
4954 					 * artificially.
4955 					 */
4956 					tp1->whoTo->cwnd += tp1->book_size;
4957 					cnt_revoked++;
4958 				}
4959 			}
4960 			if (cnt_revoked) {
4961 				reneged_all = 1;
4962 			}
4963 		}
4964 		asoc->saw_sack_with_frags = 0;
4965 	}
4966 	if (num_seg || num_nr_seg)
4967 		asoc->saw_sack_with_frags = 1;
4968 	else
4969 		asoc->saw_sack_with_frags = 0;
4970 
4971 	/* JRS - Use the congestion control given in the CC module */
4972 	asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4973 
4974 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4975 		/* nothing left in-flight */
4976 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4977 			/* stop all timers */
4978 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4979 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4980 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4981 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4982 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4983 				}
4984 			}
4985 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4986 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4987 			net->flight_size = 0;
4988 			net->partial_bytes_acked = 0;
4989 		}
4990 		asoc->total_flight = 0;
4991 		asoc->total_flight_count = 0;
4992 	}
4993 	/**********************************/
4994 	/* Now what about shutdown issues */
4995 	/**********************************/
4996 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4997 		/* nothing left on sendqueue.. consider done */
4998 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4999 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5000 			    asoc->peers_rwnd, 0, 0, a_rwnd);
5001 		}
5002 		asoc->peers_rwnd = a_rwnd;
5003 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5004 			/* SWS sender side engages */
5005 			asoc->peers_rwnd = 0;
5006 		}
5007 		/* clean up */
5008 		if ((asoc->stream_queue_cnt == 1) &&
5009 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5010 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5011 		    (asoc->locked_on_sending)
5012 		    ) {
5013 			struct sctp_stream_queue_pending *sp;
5014 
5015 			/*
5016 			 * I may be in a state where we got all across.. but
5017 			 * cannot write more due to a shutdown... we abort
5018 			 * since the user did not indicate EOR in this case.
5019 			 */
5020 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
5021 			    sctp_streamhead);
5022 			if ((sp) && (sp->length == 0)) {
5023 				asoc->locked_on_sending = NULL;
5024 				if (sp->msg_is_complete) {
5025 					asoc->stream_queue_cnt--;
5026 				} else {
5027 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5028 					asoc->stream_queue_cnt--;
5029 				}
5030 			}
5031 		}
5032 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5033 		    (asoc->stream_queue_cnt == 0)) {
5034 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5035 				/* Need to abort here */
5036 				struct mbuf *oper;
5037 
5038 		abort_out_now:
5039 				*abort_now = 1;
5040 				/* XXX */
5041 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
5042 				    0, M_DONTWAIT, 1, MT_DATA);
5043 				if (oper) {
5044 					struct sctp_paramhdr *ph;
5045 					uint32_t *ippp;
5046 
5047 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5048 					    sizeof(uint32_t);
5049 					ph = mtod(oper, struct sctp_paramhdr *);
5050 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
5051 					ph->param_length = htons(SCTP_BUF_LEN(oper));
5052 					ippp = (uint32_t *) (ph + 1);
5053 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
5054 				}
5055 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
5056 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
5057 				return;
5058 			} else {
5059 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5060 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5061 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5062 				}
5063 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5064 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5065 				sctp_stop_timers_for_shutdown(stcb);
5066 				sctp_send_shutdown(stcb,
5067 				    stcb->asoc.primary_destination);
5068 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5069 				    stcb->sctp_ep, stcb, asoc->primary_destination);
5070 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5071 				    stcb->sctp_ep, stcb, asoc->primary_destination);
5072 			}
5073 			return;
5074 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5075 		    (asoc->stream_queue_cnt == 0)) {
5076 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5077 				goto abort_out_now;
5078 			}
5079 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5080 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5081 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5082 			sctp_send_shutdown_ack(stcb,
5083 			    stcb->asoc.primary_destination);
5084 			sctp_stop_timers_for_shutdown(stcb);
5085 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5086 			    stcb->sctp_ep, stcb, asoc->primary_destination);
5087 			return;
5088 		}
5089 	}
5090 	/*
5091 	 * Now here we are going to recycle net_ack for a different use...
5092 	 * HEADS UP.
5093 	 */
5094 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5095 		net->net_ack = 0;
5096 	}
5097 
5098 	/*
5099 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5100 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5101 	 * automatically ensure that.
5102 	 */
5103 	if ((asoc->sctp_cmt_on_off == 1) &&
5104 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5105 	    (cmt_dac_flag == 0)) {
5106 		this_sack_lowest_newack = cum_ack;
5107 	}
5108 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5109 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5110 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5111 	}
5112 	/* JRS - Use the congestion control given in the CC module */
5113 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5114 
5115 	/******************************************************************
5116 	 *  Here we do the stuff with ECN Nonce checking.
5117 	 *  We basically check to see if the nonce sum flag was incorrect
5118 	 *  or if resynchronization needs to be done. Also if we catch a
5119 	 *  misbehaving receiver we give him the kick.
5120 	 ******************************************************************/
5121 
5122 	if (asoc->ecn_nonce_allowed) {
5123 		if (asoc->nonce_sum_check) {
5124 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5125 				if (asoc->nonce_wait_for_ecne == 0) {
5126 					struct sctp_tmit_chunk *lchk;
5127 
5128 					lchk = TAILQ_FIRST(&asoc->send_queue);
5129 					asoc->nonce_wait_for_ecne = 1;
5130 					if (lchk) {
5131 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5132 					} else {
5133 						asoc->nonce_wait_tsn = asoc->sending_seq;
5134 					}
5135 				} else {
5136 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5137 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5138 						/*
5139 						 * Misbehaving peer. We need
5140 						 * to react to this guy
5141 						 */
5142 						asoc->ecn_allowed = 0;
5143 						asoc->ecn_nonce_allowed = 0;
5144 					}
5145 				}
5146 			}
5147 		} else {
5148 			/* See if Resynchronization Possible */
5149 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5150 				asoc->nonce_sum_check = 1;
5151 				/*
5152 				 * now we must calculate what the base is.
5153 				 * We do this based on two things, we know
5154 				 * the total's for all the segments
5155 				 * gap-acked in the SACK, its stored in
5156 				 * ecn_seg_sums. We also know the SACK's
5157 				 * nonce sum, its in nonce_sum_flag. So we
5158 				 * can build a truth table to back-calculate
5159 				 * the new value of
5160 				 * asoc->nonce_sum_expect_base:
5161 				 *
5162 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
5163 				 * 1                    0 1 0 1 1 1
5164 				 * 1 0
5165 				 */
5166 				asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5167 			}
5168 		}
5169 	}
5170 	/* Now are we exiting loss recovery ? */
5171 	if (will_exit_fast_recovery) {
5172 		/* Ok, we must exit fast recovery */
5173 		asoc->fast_retran_loss_recovery = 0;
5174 	}
5175 	if ((asoc->sat_t3_loss_recovery) &&
5176 	    ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5177 	    MAX_TSN) ||
5178 	    (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5179 		/* end satellite t3 loss recovery */
5180 		asoc->sat_t3_loss_recovery = 0;
5181 	}
5182 	/*
5183 	 * CMT Fast recovery
5184 	 */
5185 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5186 		if (net->will_exit_fast_recovery) {
5187 			/* Ok, we must exit fast recovery */
5188 			net->fast_retran_loss_recovery = 0;
5189 		}
5190 	}
5191 
5192 	/* Adjust and set the new rwnd value */
5193 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5194 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5195 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5196 	}
5197 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5198 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5199 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5200 		/* SWS sender side engages */
5201 		asoc->peers_rwnd = 0;
5202 	}
5203 	if (asoc->peers_rwnd > old_rwnd) {
5204 		win_probe_recovery = 1;
5205 	}
5206 	/*
5207 	 * Now we must setup so we have a timer up for anyone with
5208 	 * outstanding data.
5209 	 */
5210 	done_once = 0;
5211 again:
5212 	j = 0;
5213 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5214 		if (win_probe_recovery && (net->window_probe)) {
5215 			win_probe_recovered = 1;
5216 			/*-
5217 			 * Find first chunk that was used with
5218 			 * window probe and clear the event. Put
5219 			 * it back into the send queue as if has
5220 			 * not been sent.
5221 			 */
5222 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5223 				if (tp1->window_probe) {
5224 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
5225 					break;
5226 				}
5227 			}
5228 		}
5229 		if (net->flight_size) {
5230 			j++;
5231 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5232 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5233 				    stcb->sctp_ep, stcb, net);
5234 			}
5235 			if (net->window_probe) {
5236 				net->window_probe = 0;
5237 			}
5238 		} else {
5239 			if (net->window_probe) {
5240 				/*
5241 				 * In window probes we must assure a timer
5242 				 * is still running there
5243 				 */
5244 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5245 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5246 					    stcb->sctp_ep, stcb, net);
5247 
5248 				}
5249 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5250 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5251 				    stcb, net,
5252 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5253 			}
5254 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5255 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5256 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5257 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5258 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5259 				}
5260 			}
5261 		}
5262 	}
5263 	if ((j == 0) &&
5264 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5265 	    (asoc->sent_queue_retran_cnt == 0) &&
5266 	    (win_probe_recovered == 0) &&
5267 	    (done_once == 0)) {
5268 		/*
5269 		 * huh, this should not happen unless all packets are
5270 		 * PR-SCTP and marked to skip of course.
5271 		 */
5272 		if (sctp_fs_audit(asoc)) {
5273 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5274 				net->flight_size = 0;
5275 			}
5276 			asoc->total_flight = 0;
5277 			asoc->total_flight_count = 0;
5278 			asoc->sent_queue_retran_cnt = 0;
5279 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5280 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5281 					sctp_flight_size_increase(tp1);
5282 					sctp_total_flight_increase(stcb, tp1);
5283 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5284 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5285 				}
5286 			}
5287 		}
5288 		done_once = 1;
5289 		goto again;
5290 	}
5291 	/*********************************************/
5292 	/* Here we perform PR-SCTP procedures        */
5293 	/* (section 4.2)                             */
5294 	/*********************************************/
5295 	/* C1. update advancedPeerAckPoint */
5296 	if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5297 		asoc->advanced_peer_ack_point = cum_ack;
5298 	}
5299 	/* C2. try to further move advancedPeerAckPoint ahead */
5300 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5301 		struct sctp_tmit_chunk *lchk;
5302 		uint32_t old_adv_peer_ack_point;
5303 
5304 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5305 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5306 		/* C3. See if we need to send a Fwd-TSN */
5307 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5308 		    MAX_TSN)) {
5309 			/*
5310 			 * ISSUE with ECN, see FWD-TSN processing for notes
5311 			 * on issues that will occur when the ECN NONCE
5312 			 * stuff is put into SCTP for cross checking.
5313 			 */
5314 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5315 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5316 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5317 				    old_adv_peer_ack_point);
5318 			}
5319 			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
5320 			    MAX_TSN)) {
5321 
5322 				send_forward_tsn(stcb, asoc);
5323 				/*
5324 				 * ECN Nonce: Disable Nonce Sum check when
5325 				 * FWD TSN is sent and store resync tsn
5326 				 */
5327 				asoc->nonce_sum_check = 0;
5328 				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5329 			} else if (lchk) {
5330 				/* try to FR fwd-tsn's that get lost too */
5331 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5332 					send_forward_tsn(stcb, asoc);
5333 				}
5334 			}
5335 		}
5336 		if (lchk) {
5337 			/* Assure a timer is up */
5338 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5339 			    stcb->sctp_ep, stcb, lchk->whoTo);
5340 		}
5341 	}
5342 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5343 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5344 		    a_rwnd,
5345 		    stcb->asoc.peers_rwnd,
5346 		    stcb->asoc.total_flight,
5347 		    stcb->asoc.total_output_queue_size);
5348 	}
5349 }
5350 
5351 void
5352 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5353     struct sctp_nets *netp, int *abort_flag)
5354 {
5355 	/* Copy cum-ack */
5356 	uint32_t cum_ack, a_rwnd;
5357 
5358 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5359 	/* Arrange so a_rwnd does NOT change */
5360 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5361 
5362 	/* Now call the express sack handling */
5363 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5364 }
5365 
5366 static void
5367 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5368     struct sctp_stream_in *strmin)
5369 {
5370 	struct sctp_queued_to_read *ctl, *nctl;
5371 	struct sctp_association *asoc;
5372 	uint16_t tt;
5373 
5374 	asoc = &stcb->asoc;
5375 	tt = strmin->last_sequence_delivered;
5376 	/*
5377 	 * First deliver anything prior to and including the stream no that
5378 	 * came in
5379 	 */
5380 	ctl = TAILQ_FIRST(&strmin->inqueue);
5381 	while (ctl) {
5382 		nctl = TAILQ_NEXT(ctl, next);
5383 		if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5384 		    (tt == ctl->sinfo_ssn)) {
5385 			/* this is deliverable now */
5386 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5387 			/* subtract pending on streams */
5388 			asoc->size_on_all_streams -= ctl->length;
5389 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5390 			/* deliver it to at least the delivery-q */
5391 			if (stcb->sctp_socket) {
5392 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5393 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5394 				    ctl,
5395 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5396 			}
5397 		} else {
5398 			/* no more delivery now. */
5399 			break;
5400 		}
5401 		ctl = nctl;
5402 	}
5403 	/*
5404 	 * now we must deliver things in queue the normal way  if any are
5405 	 * now ready.
5406 	 */
5407 	tt = strmin->last_sequence_delivered + 1;
5408 	ctl = TAILQ_FIRST(&strmin->inqueue);
5409 	while (ctl) {
5410 		nctl = TAILQ_NEXT(ctl, next);
5411 		if (tt == ctl->sinfo_ssn) {
5412 			/* this is deliverable now */
5413 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5414 			/* subtract pending on streams */
5415 			asoc->size_on_all_streams -= ctl->length;
5416 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5417 			/* deliver it to at least the delivery-q */
5418 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5419 			if (stcb->sctp_socket) {
5420 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5421 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5422 				    ctl,
5423 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5424 
5425 			}
5426 			tt = strmin->last_sequence_delivered + 1;
5427 		} else {
5428 			break;
5429 		}
5430 		ctl = nctl;
5431 	}
5432 }
5433 
5434 static void
5435 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5436     struct sctp_association *asoc,
5437     uint16_t stream, uint16_t seq)
5438 {
5439 	struct sctp_tmit_chunk *chk, *at;
5440 
5441 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5442 		/* For each one on here see if we need to toss it */
5443 		/*
5444 		 * For now large messages held on the reasmqueue that are
5445 		 * complete will be tossed too. We could in theory do more
5446 		 * work to spin through and stop after dumping one msg aka
5447 		 * seeing the start of a new msg at the head, and call the
5448 		 * delivery function... to see if it can be delivered... But
5449 		 * for now we just dump everything on the queue.
5450 		 */
5451 		chk = TAILQ_FIRST(&asoc->reasmqueue);
5452 		while (chk) {
5453 			at = TAILQ_NEXT(chk, sctp_next);
5454 			/*
5455 			 * Do not toss it if on a different stream or marked
5456 			 * for unordered delivery in which case the stream
5457 			 * sequence number has no meaning.
5458 			 */
5459 			if ((chk->rec.data.stream_number != stream) ||
5460 			    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5461 				chk = at;
5462 				continue;
5463 			}
5464 			if (chk->rec.data.stream_seq == seq) {
5465 				/* It needs to be tossed */
5466 				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5467 				if (compare_with_wrap(chk->rec.data.TSN_seq,
5468 				    asoc->tsn_last_delivered, MAX_TSN)) {
5469 					asoc->tsn_last_delivered =
5470 					    chk->rec.data.TSN_seq;
5471 					asoc->str_of_pdapi =
5472 					    chk->rec.data.stream_number;
5473 					asoc->ssn_of_pdapi =
5474 					    chk->rec.data.stream_seq;
5475 					asoc->fragment_flags =
5476 					    chk->rec.data.rcv_flags;
5477 				}
5478 				asoc->size_on_reasm_queue -= chk->send_size;
5479 				sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5480 
5481 				/* Clear up any stream problem */
5482 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5483 				    SCTP_DATA_UNORDERED &&
5484 				    (compare_with_wrap(chk->rec.data.stream_seq,
5485 				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5486 				    MAX_SEQ))) {
5487 					/*
5488 					 * We must dump forward this streams
5489 					 * sequence number if the chunk is
5490 					 * not unordered that is being
5491 					 * skipped. There is a chance that
5492 					 * if the peer does not include the
5493 					 * last fragment in its FWD-TSN we
5494 					 * WILL have a problem here since
5495 					 * you would have a partial chunk in
5496 					 * queue that may not be
5497 					 * deliverable. Also if a Partial
5498 					 * delivery API as started the user
5499 					 * may get a partial chunk. The next
5500 					 * read returning a new chunk...
5501 					 * really ugly but I see no way
5502 					 * around it! Maybe a notify??
5503 					 */
5504 					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5505 					    chk->rec.data.stream_seq;
5506 				}
5507 				if (chk->data) {
5508 					sctp_m_freem(chk->data);
5509 					chk->data = NULL;
5510 				}
5511 				sctp_free_a_chunk(stcb, chk);
5512 			} else if (compare_with_wrap(chk->rec.data.stream_seq, seq, MAX_SEQ)) {
5513 				/*
5514 				 * If the stream_seq is > than the purging
5515 				 * one, we are done
5516 				 */
5517 				break;
5518 			}
5519 			chk = at;
5520 		}
5521 	}
5522 }
5523 
5524 
5525 void
5526 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5527     struct sctp_forward_tsn_chunk *fwd,
5528     int *abort_flag, struct mbuf *m, int offset)
5529 {
5530 	/*
5531 	 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5532 	 * forward TSN, when the SACK comes back that acknowledges the
5533 	 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5534 	 * get quite tricky since we may have sent more data interveneing
5535 	 * and must carefully account for what the SACK says on the nonce
5536 	 * and any gaps that are reported. This work will NOT be done here,
5537 	 * but I note it here since it is really related to PR-SCTP and
5538 	 * FWD-TSN's
5539 	 */
5540 
5541 	/* The pr-sctp fwd tsn */
5542 	/*
5543 	 * here we will perform all the data receiver side steps for
5544 	 * processing FwdTSN, as required in by pr-sctp draft:
5545 	 *
5546 	 * Assume we get FwdTSN(x):
5547 	 *
5548 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5549 	 * others we have 3) examine and update re-ordering queue on
5550 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5551 	 * report where we are.
5552 	 */
5553 	struct sctp_association *asoc;
5554 	uint32_t new_cum_tsn, gap;
5555 	unsigned int i, fwd_sz, cumack_set_flag, m_size;
5556 	uint32_t str_seq;
5557 	struct sctp_stream_in *strm;
5558 	struct sctp_tmit_chunk *chk, *at;
5559 	struct sctp_queued_to_read *ctl, *sv;
5560 
5561 	cumack_set_flag = 0;
5562 	asoc = &stcb->asoc;
5563 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5564 		SCTPDBG(SCTP_DEBUG_INDATA1,
5565 		    "Bad size too small/big fwd-tsn\n");
5566 		return;
5567 	}
5568 	m_size = (stcb->asoc.mapping_array_size << 3);
5569 	/*************************************************************/
5570 	/* 1. Here we update local cumTSN and shift the bitmap array */
5571 	/*************************************************************/
5572 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5573 
5574 	if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
5575 	    asoc->cumulative_tsn == new_cum_tsn) {
5576 		/* Already got there ... */
5577 		return;
5578 	}
5579 	/*
5580 	 * now we know the new TSN is more advanced, let's find the actual
5581 	 * gap
5582 	 */
5583 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5584 	asoc->cumulative_tsn = new_cum_tsn;
5585 	if (gap >= m_size) {
5586 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5587 			struct mbuf *oper;
5588 
5589 			/*
5590 			 * out of range (of single byte chunks in the rwnd I
5591 			 * give out). This must be an attacker.
5592 			 */
5593 			*abort_flag = 1;
5594 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5595 			    0, M_DONTWAIT, 1, MT_DATA);
5596 			if (oper) {
5597 				struct sctp_paramhdr *ph;
5598 				uint32_t *ippp;
5599 
5600 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5601 				    (sizeof(uint32_t) * 3);
5602 				ph = mtod(oper, struct sctp_paramhdr *);
5603 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5604 				ph->param_length = htons(SCTP_BUF_LEN(oper));
5605 				ippp = (uint32_t *) (ph + 1);
5606 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5607 				ippp++;
5608 				*ippp = asoc->highest_tsn_inside_map;
5609 				ippp++;
5610 				*ippp = new_cum_tsn;
5611 			}
5612 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5613 			sctp_abort_an_association(stcb->sctp_ep, stcb,
5614 			    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
5615 			return;
5616 		}
5617 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5618 
5619 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5620 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5621 		asoc->highest_tsn_inside_map = new_cum_tsn;
5622 
5623 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5624 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5625 
5626 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5627 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5628 		}
5629 		asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
5630 	} else {
5631 		SCTP_TCB_LOCK_ASSERT(stcb);
5632 		for (i = 0; i <= gap; i++) {
5633 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5634 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5635 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5636 				if (compare_with_wrap(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
5637 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5638 				}
5639 			}
5640 		}
5641 	}
5642 	/*************************************************************/
5643 	/* 2. Clear up re-assembly queue                             */
5644 	/*************************************************************/
5645 	/*
5646 	 * First service it if pd-api is up, just in case we can progress it
5647 	 * forward
5648 	 */
5649 	if (asoc->fragmented_delivery_inprogress) {
5650 		sctp_service_reassembly(stcb, asoc);
5651 	}
5652 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5653 		/* For each one on here see if we need to toss it */
5654 		/*
5655 		 * For now large messages held on the reasmqueue that are
5656 		 * complete will be tossed too. We could in theory do more
5657 		 * work to spin through and stop after dumping one msg aka
5658 		 * seeing the start of a new msg at the head, and call the
5659 		 * delivery function... to see if it can be delivered... But
5660 		 * for now we just dump everything on the queue.
5661 		 */
5662 		chk = TAILQ_FIRST(&asoc->reasmqueue);
5663 		while (chk) {
5664 			at = TAILQ_NEXT(chk, sctp_next);
5665 			if ((compare_with_wrap(new_cum_tsn,
5666 			    chk->rec.data.TSN_seq, MAX_TSN)) ||
5667 			    (new_cum_tsn == chk->rec.data.TSN_seq)) {
5668 				/* It needs to be tossed */
5669 				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5670 				if (compare_with_wrap(chk->rec.data.TSN_seq,
5671 				    asoc->tsn_last_delivered, MAX_TSN)) {
5672 					asoc->tsn_last_delivered =
5673 					    chk->rec.data.TSN_seq;
5674 					asoc->str_of_pdapi =
5675 					    chk->rec.data.stream_number;
5676 					asoc->ssn_of_pdapi =
5677 					    chk->rec.data.stream_seq;
5678 					asoc->fragment_flags =
5679 					    chk->rec.data.rcv_flags;
5680 				}
5681 				asoc->size_on_reasm_queue -= chk->send_size;
5682 				sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5683 
5684 				/* Clear up any stream problem */
5685 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5686 				    SCTP_DATA_UNORDERED &&
5687 				    (compare_with_wrap(chk->rec.data.stream_seq,
5688 				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5689 				    MAX_SEQ))) {
5690 					/*
5691 					 * We must dump forward this streams
5692 					 * sequence number if the chunk is
5693 					 * not unordered that is being
5694 					 * skipped. There is a chance that
5695 					 * if the peer does not include the
5696 					 * last fragment in its FWD-TSN we
5697 					 * WILL have a problem here since
5698 					 * you would have a partial chunk in
5699 					 * queue that may not be
5700 					 * deliverable. Also if a Partial
5701 					 * delivery API as started the user
5702 					 * may get a partial chunk. The next
5703 					 * read returning a new chunk...
5704 					 * really ugly but I see no way
5705 					 * around it! Maybe a notify??
5706 					 */
5707 					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5708 					    chk->rec.data.stream_seq;
5709 				}
5710 				if (chk->data) {
5711 					sctp_m_freem(chk->data);
5712 					chk->data = NULL;
5713 				}
5714 				sctp_free_a_chunk(stcb, chk);
5715 			} else {
5716 				/*
5717 				 * Ok we have gone beyond the end of the
5718 				 * fwd-tsn's mark.
5719 				 */
5720 				break;
5721 			}
5722 			chk = at;
5723 		}
5724 	}
5725 	/*******************************************************/
5726 	/* 3. Update the PR-stream re-ordering queues and fix  */
5727 	/* delivery issues as needed.                       */
5728 	/*******************************************************/
5729 	fwd_sz -= sizeof(*fwd);
5730 	if (m && fwd_sz) {
5731 		/* New method. */
5732 		unsigned int num_str;
5733 		struct sctp_strseq *stseq, strseqbuf;
5734 
5735 		offset += sizeof(*fwd);
5736 
5737 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5738 		num_str = fwd_sz / sizeof(struct sctp_strseq);
5739 		for (i = 0; i < num_str; i++) {
5740 			uint16_t st;
5741 
5742 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5743 			    sizeof(struct sctp_strseq),
5744 			    (uint8_t *) & strseqbuf);
5745 			offset += sizeof(struct sctp_strseq);
5746 			if (stseq == NULL) {
5747 				break;
5748 			}
5749 			/* Convert */
5750 			st = ntohs(stseq->stream);
5751 			stseq->stream = st;
5752 			st = ntohs(stseq->sequence);
5753 			stseq->sequence = st;
5754 
5755 			/* now process */
5756 
5757 			/*
5758 			 * Ok we now look for the stream/seq on the read
5759 			 * queue where its not all delivered. If we find it
5760 			 * we transmute the read entry into a PDI_ABORTED.
5761 			 */
5762 			if (stseq->stream >= asoc->streamincnt) {
5763 				/* screwed up streams, stop!  */
5764 				break;
5765 			}
5766 			if ((asoc->str_of_pdapi == stseq->stream) &&
5767 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5768 				/*
5769 				 * If this is the one we were partially
5770 				 * delivering now then we no longer are.
5771 				 * Note this will change with the reassembly
5772 				 * re-write.
5773 				 */
5774 				asoc->fragmented_delivery_inprogress = 0;
5775 			}
5776 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5777 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5778 				if ((ctl->sinfo_stream == stseq->stream) &&
5779 				    (ctl->sinfo_ssn == stseq->sequence)) {
5780 					str_seq = (stseq->stream << 16) | stseq->sequence;
5781 					ctl->end_added = 1;
5782 					ctl->pdapi_aborted = 1;
5783 					sv = stcb->asoc.control_pdapi;
5784 					stcb->asoc.control_pdapi = ctl;
5785 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5786 					    stcb,
5787 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5788 					    (void *)&str_seq,
5789 					    SCTP_SO_NOT_LOCKED);
5790 					stcb->asoc.control_pdapi = sv;
5791 					break;
5792 				} else if ((ctl->sinfo_stream == stseq->stream) &&
5793 				    (compare_with_wrap(ctl->sinfo_ssn, stseq->sequence, MAX_SEQ))) {
5794 					/* We are past our victim SSN */
5795 					break;
5796 				}
5797 			}
5798 			strm = &asoc->strmin[stseq->stream];
5799 			if (compare_with_wrap(stseq->sequence,
5800 			    strm->last_sequence_delivered, MAX_SEQ)) {
5801 				/* Update the sequence number */
5802 				strm->last_sequence_delivered =
5803 				    stseq->sequence;
5804 			}
5805 			/* now kick the stream the new way */
5806 			/* sa_ignore NO_NULL_CHK */
5807 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5808 		}
5809 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5810 	}
5811 	/*
5812 	 * Now slide thing forward.
5813 	 */
5814 	sctp_slide_mapping_arrays(stcb);
5815 
5816 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5817 		/* now lets kick out and check for more fragmented delivery */
5818 		/* sa_ignore NO_NULL_CHK */
5819 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5820 	}
5821 }
5822