xref: /freebsd/sys/netinet/sctp_indata.c (revision aa64588d28258aef88cc33b8043112e8856948d0)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 
48 
49 /*
50  * NOTES: On the outbound side of things I need to check the sack timer to
51  * see if I should generate a sack into the chunk queue (if I have data to
52  * send that is and will be sending it .. for bundling.
53  *
54  * The callback in sctp_usrreq.c will get called when the socket is read from.
55  * This will cause sctp_service_queues() to get called on the top entry in
56  * the list.
57  */
58 
59 void
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61 {
62 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63 }
64 
65 /* Calculate what the rwnd would be */
66 uint32_t
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68 {
69 	uint32_t calc = 0;
70 
71 	/*
72 	 * This is really set wrong with respect to a 1-2-m socket. Since
73 	 * the sb_cc is the count that everyone as put up. When we re-write
74 	 * sctp_soreceive then we will fix this so that ONLY this
75 	 * associations data is taken into account.
76 	 */
77 	if (stcb->sctp_socket == NULL)
78 		return (calc);
79 
80 	if (stcb->asoc.sb_cc == 0 &&
81 	    asoc->size_on_reasm_queue == 0 &&
82 	    asoc->size_on_all_streams == 0) {
83 		/* Full rwnd granted */
84 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85 		return (calc);
86 	}
87 	/* get actual space */
88 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89 
90 	/*
91 	 * take out what has NOT been put on socket queue and we yet hold
92 	 * for putting up.
93 	 */
94 	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
95 	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
96 
97 	if (calc == 0) {
98 		/* out of space */
99 		return (calc);
100 	}
101 	/* what is the overhead of all these rwnd's */
102 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
103 	/*
104 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
105 	 * even it is 0. SWS engaged
106 	 */
107 	if (calc < stcb->asoc.my_rwnd_control_len) {
108 		calc = 1;
109 	}
110 	return (calc);
111 }
112 
113 
114 
115 /*
116  * Build out our readq entry based on the incoming packet.
117  */
118 struct sctp_queued_to_read *
119 sctp_build_readq_entry(struct sctp_tcb *stcb,
120     struct sctp_nets *net,
121     uint32_t tsn, uint32_t ppid,
122     uint32_t context, uint16_t stream_no,
123     uint16_t stream_seq, uint8_t flags,
124     struct mbuf *dm)
125 {
126 	struct sctp_queued_to_read *read_queue_e = NULL;
127 
128 	sctp_alloc_a_readq(stcb, read_queue_e);
129 	if (read_queue_e == NULL) {
130 		goto failed_build;
131 	}
132 	read_queue_e->sinfo_stream = stream_no;
133 	read_queue_e->sinfo_ssn = stream_seq;
134 	read_queue_e->sinfo_flags = (flags << 8);
135 	read_queue_e->sinfo_ppid = ppid;
136 	read_queue_e->sinfo_context = stcb->asoc.context;
137 	read_queue_e->sinfo_timetolive = 0;
138 	read_queue_e->sinfo_tsn = tsn;
139 	read_queue_e->sinfo_cumtsn = tsn;
140 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
141 	read_queue_e->whoFrom = net;
142 	read_queue_e->length = 0;
143 	atomic_add_int(&net->ref_count, 1);
144 	read_queue_e->data = dm;
145 	read_queue_e->spec_flags = 0;
146 	read_queue_e->tail_mbuf = NULL;
147 	read_queue_e->aux_data = NULL;
148 	read_queue_e->stcb = stcb;
149 	read_queue_e->port_from = stcb->rport;
150 	read_queue_e->do_not_ref_stcb = 0;
151 	read_queue_e->end_added = 0;
152 	read_queue_e->some_taken = 0;
153 	read_queue_e->pdapi_aborted = 0;
154 failed_build:
155 	return (read_queue_e);
156 }
157 
158 
159 /*
160  * Build out our readq entry based on the incoming packet.
161  */
162 static struct sctp_queued_to_read *
163 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
164     struct sctp_tmit_chunk *chk)
165 {
166 	struct sctp_queued_to_read *read_queue_e = NULL;
167 
168 	sctp_alloc_a_readq(stcb, read_queue_e);
169 	if (read_queue_e == NULL) {
170 		goto failed_build;
171 	}
172 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
173 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
174 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
175 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
176 	read_queue_e->sinfo_context = stcb->asoc.context;
177 	read_queue_e->sinfo_timetolive = 0;
178 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
179 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
180 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
181 	read_queue_e->whoFrom = chk->whoTo;
182 	read_queue_e->aux_data = NULL;
183 	read_queue_e->length = 0;
184 	atomic_add_int(&chk->whoTo->ref_count, 1);
185 	read_queue_e->data = chk->data;
186 	read_queue_e->tail_mbuf = NULL;
187 	read_queue_e->stcb = stcb;
188 	read_queue_e->port_from = stcb->rport;
189 	read_queue_e->spec_flags = 0;
190 	read_queue_e->do_not_ref_stcb = 0;
191 	read_queue_e->end_added = 0;
192 	read_queue_e->some_taken = 0;
193 	read_queue_e->pdapi_aborted = 0;
194 failed_build:
195 	return (read_queue_e);
196 }
197 
198 
199 struct mbuf *
200 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
201     struct sctp_sndrcvinfo *sinfo)
202 {
203 	struct sctp_sndrcvinfo *outinfo;
204 	struct cmsghdr *cmh;
205 	struct mbuf *ret;
206 	int len;
207 	int use_extended = 0;
208 
209 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
210 		/* user does not want the sndrcv ctl */
211 		return (NULL);
212 	}
213 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
214 		use_extended = 1;
215 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
216 	} else {
217 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
218 	}
219 
220 
221 	ret = sctp_get_mbuf_for_msg(len,
222 	    0, M_DONTWAIT, 1, MT_DATA);
223 
224 	if (ret == NULL) {
225 		/* No space */
226 		return (ret);
227 	}
228 	/* We need a CMSG header followed by the struct  */
229 	cmh = mtod(ret, struct cmsghdr *);
230 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
231 	cmh->cmsg_level = IPPROTO_SCTP;
232 	if (use_extended) {
233 		cmh->cmsg_type = SCTP_EXTRCV;
234 		cmh->cmsg_len = len;
235 		memcpy(outinfo, sinfo, len);
236 	} else {
237 		cmh->cmsg_type = SCTP_SNDRCV;
238 		cmh->cmsg_len = len;
239 		*outinfo = *sinfo;
240 	}
241 	SCTP_BUF_LEN(ret) = cmh->cmsg_len;
242 	return (ret);
243 }
244 
245 
246 char *
247 sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
248     int *control_len,
249     struct sctp_sndrcvinfo *sinfo)
250 {
251 	struct sctp_sndrcvinfo *outinfo;
252 	struct cmsghdr *cmh;
253 	char *buf;
254 	int len;
255 	int use_extended = 0;
256 
257 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
258 		/* user does not want the sndrcv ctl */
259 		return (NULL);
260 	}
261 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
262 		use_extended = 1;
263 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
264 	} else {
265 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
266 	}
267 	SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
268 	if (buf == NULL) {
269 		/* No space */
270 		return (buf);
271 	}
272 	/* We need a CMSG header followed by the struct  */
273 	cmh = (struct cmsghdr *)buf;
274 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
275 	cmh->cmsg_level = IPPROTO_SCTP;
276 	if (use_extended) {
277 		cmh->cmsg_type = SCTP_EXTRCV;
278 		cmh->cmsg_len = len;
279 		memcpy(outinfo, sinfo, len);
280 	} else {
281 		cmh->cmsg_type = SCTP_SNDRCV;
282 		cmh->cmsg_len = len;
283 		*outinfo = *sinfo;
284 	}
285 	*control_len = len;
286 	return (buf);
287 }
288 
289 static void
290 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
291 {
292 	uint32_t gap, i, cumackp1;
293 	int fnd = 0;
294 
295 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
296 		return;
297 	}
298 	cumackp1 = asoc->cumulative_tsn + 1;
299 	if (compare_with_wrap(cumackp1, tsn, MAX_TSN)) {
300 		/*
301 		 * this tsn is behind the cum ack and thus we don't need to
302 		 * worry about it being moved from one to the other.
303 		 */
304 		return;
305 	}
306 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
307 	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
308 		printf("gap:%x tsn:%x\n", gap, tsn);
309 		sctp_print_mapping_array(asoc);
310 #ifdef INVARIANTS
311 		panic("Things are really messed up now!!");
312 #endif
313 	}
314 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
315 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
316 	if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
317 		asoc->highest_tsn_inside_nr_map = tsn;
318 	}
319 	if (tsn == asoc->highest_tsn_inside_map) {
320 		/* We must back down to see what the new highest is */
321 		for (i = tsn - 1; (compare_with_wrap(i, asoc->mapping_array_base_tsn, MAX_TSN) ||
322 		    (i == asoc->mapping_array_base_tsn)); i--) {
323 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
324 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
325 				asoc->highest_tsn_inside_map = i;
326 				fnd = 1;
327 				break;
328 			}
329 		}
330 		if (!fnd) {
331 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
332 		}
333 	}
334 }
335 
336 
337 /*
338  * We are delivering currently from the reassembly queue. We must continue to
339  * deliver until we either: 1) run out of space. 2) run out of sequential
340  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
341  */
342 static void
343 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
344 {
345 	struct sctp_tmit_chunk *chk;
346 	uint16_t nxt_todel;
347 	uint16_t stream_no;
348 	int end = 0;
349 	int cntDel;
350 
351 	struct sctp_queued_to_read *control, *ctl, *ctlat;
352 
353 	if (stcb == NULL)
354 		return;
355 
356 	cntDel = stream_no = 0;
357 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
358 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
359 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
360 		/* socket above is long gone or going.. */
361 abandon:
362 		asoc->fragmented_delivery_inprogress = 0;
363 		chk = TAILQ_FIRST(&asoc->reasmqueue);
364 		while (chk) {
365 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
366 			asoc->size_on_reasm_queue -= chk->send_size;
367 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
368 			/*
369 			 * Lose the data pointer, since its in the socket
370 			 * buffer
371 			 */
372 			if (chk->data) {
373 				sctp_m_freem(chk->data);
374 				chk->data = NULL;
375 			}
376 			/* Now free the address and data */
377 			sctp_free_a_chunk(stcb, chk);
378 			/* sa_ignore FREED_MEMORY */
379 			chk = TAILQ_FIRST(&asoc->reasmqueue);
380 		}
381 		return;
382 	}
383 	SCTP_TCB_LOCK_ASSERT(stcb);
384 	do {
385 		chk = TAILQ_FIRST(&asoc->reasmqueue);
386 		if (chk == NULL) {
387 			return;
388 		}
389 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
390 			/* Can't deliver more :< */
391 			return;
392 		}
393 		stream_no = chk->rec.data.stream_number;
394 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
395 		if (nxt_todel != chk->rec.data.stream_seq &&
396 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
397 			/*
398 			 * Not the next sequence to deliver in its stream OR
399 			 * unordered
400 			 */
401 			return;
402 		}
403 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
404 
405 			control = sctp_build_readq_entry_chk(stcb, chk);
406 			if (control == NULL) {
407 				/* out of memory? */
408 				return;
409 			}
410 			/* save it off for our future deliveries */
411 			stcb->asoc.control_pdapi = control;
412 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
413 				end = 1;
414 			else
415 				end = 0;
416 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
417 			sctp_add_to_readq(stcb->sctp_ep,
418 			    stcb, control, &stcb->sctp_socket->so_rcv, end,
419 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
420 			cntDel++;
421 		} else {
422 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
423 				end = 1;
424 			else
425 				end = 0;
426 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
427 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
428 			    stcb->asoc.control_pdapi,
429 			    chk->data, end, chk->rec.data.TSN_seq,
430 			    &stcb->sctp_socket->so_rcv)) {
431 				/*
432 				 * something is very wrong, either
433 				 * control_pdapi is NULL, or the tail_mbuf
434 				 * is corrupt, or there is a EOM already on
435 				 * the mbuf chain.
436 				 */
437 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
438 					goto abandon;
439 				} else {
440 #ifdef INVARIANTS
441 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
442 						panic("This should not happen control_pdapi NULL?");
443 					}
444 					/* if we did not panic, it was a EOM */
445 					panic("Bad chunking ??");
446 #else
447 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
448 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
449 					}
450 					SCTP_PRINTF("Bad chunking ??\n");
451 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
452 
453 #endif
454 					goto abandon;
455 				}
456 			}
457 			cntDel++;
458 		}
459 		/* pull it we did it */
460 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
461 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
462 			asoc->fragmented_delivery_inprogress = 0;
463 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
464 				asoc->strmin[stream_no].last_sequence_delivered++;
465 			}
466 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
467 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
468 			}
469 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
470 			/*
471 			 * turn the flag back on since we just  delivered
472 			 * yet another one.
473 			 */
474 			asoc->fragmented_delivery_inprogress = 1;
475 		}
476 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
477 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
478 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
479 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
480 
481 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
482 		asoc->size_on_reasm_queue -= chk->send_size;
483 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
484 		/* free up the chk */
485 		chk->data = NULL;
486 		sctp_free_a_chunk(stcb, chk);
487 
488 		if (asoc->fragmented_delivery_inprogress == 0) {
489 			/*
490 			 * Now lets see if we can deliver the next one on
491 			 * the stream
492 			 */
493 			struct sctp_stream_in *strm;
494 
495 			strm = &asoc->strmin[stream_no];
496 			nxt_todel = strm->last_sequence_delivered + 1;
497 			ctl = TAILQ_FIRST(&strm->inqueue);
498 			if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
499 				while (ctl != NULL) {
500 					/* Deliver more if we can. */
501 					if (nxt_todel == ctl->sinfo_ssn) {
502 						ctlat = TAILQ_NEXT(ctl, next);
503 						TAILQ_REMOVE(&strm->inqueue, ctl, next);
504 						asoc->size_on_all_streams -= ctl->length;
505 						sctp_ucount_decr(asoc->cnt_on_all_streams);
506 						strm->last_sequence_delivered++;
507 						sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
508 						sctp_add_to_readq(stcb->sctp_ep, stcb,
509 						    ctl,
510 						    &stcb->sctp_socket->so_rcv, 1,
511 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
512 						ctl = ctlat;
513 					} else {
514 						break;
515 					}
516 					nxt_todel = strm->last_sequence_delivered + 1;
517 				}
518 			}
519 			break;
520 		}
521 		/* sa_ignore FREED_MEMORY */
522 		chk = TAILQ_FIRST(&asoc->reasmqueue);
523 	} while (chk);
524 }
525 
526 /*
527  * Queue the chunk either right into the socket buffer if it is the next one
528  * to go OR put it in the correct place in the delivery queue.  If we do
529  * append to the so_buf, keep doing so until we are out of order. One big
530  * question still remains, what to do when the socket buffer is FULL??
531  */
532 static void
533 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
534     struct sctp_queued_to_read *control, int *abort_flag)
535 {
536 	/*
537 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
538 	 * all the data in one stream this could happen quite rapidly. One
539 	 * could use the TSN to keep track of things, but this scheme breaks
540 	 * down in the other type of stream useage that could occur. Send a
541 	 * single msg to stream 0, send 4Billion messages to stream 1, now
542 	 * send a message to stream 0. You have a situation where the TSN
543 	 * has wrapped but not in the stream. Is this worth worrying about
544 	 * or should we just change our queue sort at the bottom to be by
545 	 * TSN.
546 	 *
547 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
548 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
549 	 * assignment this could happen... and I don't see how this would be
550 	 * a violation. So for now I am undecided an will leave the sort by
551 	 * SSN alone. Maybe a hybred approach is the answer
552 	 *
553 	 */
554 	struct sctp_stream_in *strm;
555 	struct sctp_queued_to_read *at;
556 	int queue_needed;
557 	uint16_t nxt_todel;
558 	struct mbuf *oper;
559 
560 	queue_needed = 1;
561 	asoc->size_on_all_streams += control->length;
562 	sctp_ucount_incr(asoc->cnt_on_all_streams);
563 	strm = &asoc->strmin[control->sinfo_stream];
564 	nxt_todel = strm->last_sequence_delivered + 1;
565 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
566 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
567 	}
568 	SCTPDBG(SCTP_DEBUG_INDATA1,
569 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
570 	    (uint32_t) control->sinfo_stream,
571 	    (uint32_t) strm->last_sequence_delivered,
572 	    (uint32_t) nxt_todel);
573 	if (compare_with_wrap(strm->last_sequence_delivered,
574 	    control->sinfo_ssn, MAX_SEQ) ||
575 	    (strm->last_sequence_delivered == control->sinfo_ssn)) {
576 		/* The incoming sseq is behind where we last delivered? */
577 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
578 		    control->sinfo_ssn, strm->last_sequence_delivered);
579 protocol_error:
580 		/*
581 		 * throw it in the stream so it gets cleaned up in
582 		 * association destruction
583 		 */
584 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
585 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
586 		    0, M_DONTWAIT, 1, MT_DATA);
587 		if (oper) {
588 			struct sctp_paramhdr *ph;
589 			uint32_t *ippp;
590 
591 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
592 			    (sizeof(uint32_t) * 3);
593 			ph = mtod(oper, struct sctp_paramhdr *);
594 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
595 			ph->param_length = htons(SCTP_BUF_LEN(oper));
596 			ippp = (uint32_t *) (ph + 1);
597 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
598 			ippp++;
599 			*ippp = control->sinfo_tsn;
600 			ippp++;
601 			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
602 		}
603 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
604 		sctp_abort_an_association(stcb->sctp_ep, stcb,
605 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
606 
607 		*abort_flag = 1;
608 		return;
609 
610 	}
611 	if (nxt_todel == control->sinfo_ssn) {
612 		/* can be delivered right away? */
613 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
614 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
615 		}
616 		/* EY it wont be queued if it could be delivered directly */
617 		queue_needed = 0;
618 		asoc->size_on_all_streams -= control->length;
619 		sctp_ucount_decr(asoc->cnt_on_all_streams);
620 		strm->last_sequence_delivered++;
621 
622 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
623 		sctp_add_to_readq(stcb->sctp_ep, stcb,
624 		    control,
625 		    &stcb->sctp_socket->so_rcv, 1,
626 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
627 		control = TAILQ_FIRST(&strm->inqueue);
628 		while (control != NULL) {
629 			/* all delivered */
630 			nxt_todel = strm->last_sequence_delivered + 1;
631 			if (nxt_todel == control->sinfo_ssn) {
632 				at = TAILQ_NEXT(control, next);
633 				TAILQ_REMOVE(&strm->inqueue, control, next);
634 				asoc->size_on_all_streams -= control->length;
635 				sctp_ucount_decr(asoc->cnt_on_all_streams);
636 				strm->last_sequence_delivered++;
637 				/*
638 				 * We ignore the return of deliver_data here
639 				 * since we always can hold the chunk on the
640 				 * d-queue. And we have a finite number that
641 				 * can be delivered from the strq.
642 				 */
643 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
644 					sctp_log_strm_del(control, NULL,
645 					    SCTP_STR_LOG_FROM_IMMED_DEL);
646 				}
647 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
648 				sctp_add_to_readq(stcb->sctp_ep, stcb,
649 				    control,
650 				    &stcb->sctp_socket->so_rcv, 1,
651 				    SCTP_READ_LOCK_NOT_HELD,
652 				    SCTP_SO_NOT_LOCKED);
653 				control = at;
654 				continue;
655 			}
656 			break;
657 		}
658 	}
659 	if (queue_needed) {
660 		/*
661 		 * Ok, we did not deliver this guy, find the correct place
662 		 * to put it on the queue.
663 		 */
664 		if ((compare_with_wrap(asoc->cumulative_tsn,
665 		    control->sinfo_tsn, MAX_TSN)) ||
666 		    (control->sinfo_tsn == asoc->cumulative_tsn)) {
667 			goto protocol_error;
668 		}
669 		if (TAILQ_EMPTY(&strm->inqueue)) {
670 			/* Empty queue */
671 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
672 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
673 			}
674 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
675 		} else {
676 			TAILQ_FOREACH(at, &strm->inqueue, next) {
677 				if (compare_with_wrap(at->sinfo_ssn,
678 				    control->sinfo_ssn, MAX_SEQ)) {
679 					/*
680 					 * one in queue is bigger than the
681 					 * new one, insert before this one
682 					 */
683 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
684 						sctp_log_strm_del(control, at,
685 						    SCTP_STR_LOG_FROM_INSERT_MD);
686 					}
687 					TAILQ_INSERT_BEFORE(at, control, next);
688 					break;
689 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
690 					/*
691 					 * Gak, He sent me a duplicate str
692 					 * seq number
693 					 */
694 					/*
695 					 * foo bar, I guess I will just free
696 					 * this new guy, should we abort
697 					 * too? FIX ME MAYBE? Or it COULD be
698 					 * that the SSN's have wrapped.
699 					 * Maybe I should compare to TSN
700 					 * somehow... sigh for now just blow
701 					 * away the chunk!
702 					 */
703 
704 					if (control->data)
705 						sctp_m_freem(control->data);
706 					control->data = NULL;
707 					asoc->size_on_all_streams -= control->length;
708 					sctp_ucount_decr(asoc->cnt_on_all_streams);
709 					if (control->whoFrom)
710 						sctp_free_remote_addr(control->whoFrom);
711 					control->whoFrom = NULL;
712 					sctp_free_a_readq(stcb, control);
713 					return;
714 				} else {
715 					if (TAILQ_NEXT(at, next) == NULL) {
716 						/*
717 						 * We are at the end, insert
718 						 * it after this one
719 						 */
720 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
721 							sctp_log_strm_del(control, at,
722 							    SCTP_STR_LOG_FROM_INSERT_TL);
723 						}
724 						TAILQ_INSERT_AFTER(&strm->inqueue,
725 						    at, control, next);
726 						break;
727 					}
728 				}
729 			}
730 		}
731 	}
732 }
733 
734 /*
735  * Returns two things: You get the total size of the deliverable parts of the
736  * first fragmented message on the reassembly queue. And you get a 1 back if
737  * all of the message is ready or a 0 back if the message is still incomplete
738  */
739 static int
740 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
741 {
742 	struct sctp_tmit_chunk *chk;
743 	uint32_t tsn;
744 
745 	*t_size = 0;
746 	chk = TAILQ_FIRST(&asoc->reasmqueue);
747 	if (chk == NULL) {
748 		/* nothing on the queue */
749 		return (0);
750 	}
751 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
752 		/* Not a first on the queue */
753 		return (0);
754 	}
755 	tsn = chk->rec.data.TSN_seq;
756 	while (chk) {
757 		if (tsn != chk->rec.data.TSN_seq) {
758 			return (0);
759 		}
760 		*t_size += chk->send_size;
761 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
762 			return (1);
763 		}
764 		tsn++;
765 		chk = TAILQ_NEXT(chk, sctp_next);
766 	}
767 	return (0);
768 }
769 
770 static void
771 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
772 {
773 	struct sctp_tmit_chunk *chk;
774 	uint16_t nxt_todel;
775 	uint32_t tsize, pd_point;
776 
777 doit_again:
778 	chk = TAILQ_FIRST(&asoc->reasmqueue);
779 	if (chk == NULL) {
780 		/* Huh? */
781 		asoc->size_on_reasm_queue = 0;
782 		asoc->cnt_on_reasm_queue = 0;
783 		return;
784 	}
785 	if (asoc->fragmented_delivery_inprogress == 0) {
786 		nxt_todel =
787 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
788 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
789 		    (nxt_todel == chk->rec.data.stream_seq ||
790 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
791 			/*
792 			 * Yep the first one is here and its ok to deliver
793 			 * but should we?
794 			 */
795 			if (stcb->sctp_socket) {
796 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
797 				    stcb->sctp_ep->partial_delivery_point);
798 			} else {
799 				pd_point = stcb->sctp_ep->partial_delivery_point;
800 			}
801 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
802 
803 				/*
804 				 * Yes, we setup to start reception, by
805 				 * backing down the TSN just in case we
806 				 * can't deliver. If we
807 				 */
808 				asoc->fragmented_delivery_inprogress = 1;
809 				asoc->tsn_last_delivered =
810 				    chk->rec.data.TSN_seq - 1;
811 				asoc->str_of_pdapi =
812 				    chk->rec.data.stream_number;
813 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
814 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
815 				asoc->fragment_flags = chk->rec.data.rcv_flags;
816 				sctp_service_reassembly(stcb, asoc);
817 			}
818 		}
819 	} else {
820 		/*
821 		 * Service re-assembly will deliver stream data queued at
822 		 * the end of fragmented delivery.. but it wont know to go
823 		 * back and call itself again... we do that here with the
824 		 * got doit_again
825 		 */
826 		sctp_service_reassembly(stcb, asoc);
827 		if (asoc->fragmented_delivery_inprogress == 0) {
828 			/*
829 			 * finished our Fragmented delivery, could be more
830 			 * waiting?
831 			 */
832 			goto doit_again;
833 		}
834 	}
835 }
836 
837 /*
838  * Dump onto the re-assembly queue, in its proper place. After dumping on the
839  * queue, see if anthing can be delivered. If so pull it off (or as much as
840  * we can. If we run out of space then we must dump what we can and set the
841  * appropriate flag to say we queued what we could.
842  */
843 static void
844 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
845     struct sctp_tmit_chunk *chk, int *abort_flag)
846 {
847 	struct mbuf *oper;
848 	uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
849 	u_char last_flags;
850 	struct sctp_tmit_chunk *at, *prev, *next;
851 
852 	prev = next = NULL;
853 	cum_ackp1 = asoc->tsn_last_delivered + 1;
854 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
855 		/* This is the first one on the queue */
856 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
857 		/*
858 		 * we do not check for delivery of anything when only one
859 		 * fragment is here
860 		 */
861 		asoc->size_on_reasm_queue = chk->send_size;
862 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
863 		if (chk->rec.data.TSN_seq == cum_ackp1) {
864 			if (asoc->fragmented_delivery_inprogress == 0 &&
865 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
866 			    SCTP_DATA_FIRST_FRAG) {
867 				/*
868 				 * An empty queue, no delivery inprogress,
869 				 * we hit the next one and it does NOT have
870 				 * a FIRST fragment mark.
871 				 */
872 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
873 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
874 				    0, M_DONTWAIT, 1, MT_DATA);
875 
876 				if (oper) {
877 					struct sctp_paramhdr *ph;
878 					uint32_t *ippp;
879 
880 					SCTP_BUF_LEN(oper) =
881 					    sizeof(struct sctp_paramhdr) +
882 					    (sizeof(uint32_t) * 3);
883 					ph = mtod(oper, struct sctp_paramhdr *);
884 					ph->param_type =
885 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
886 					ph->param_length = htons(SCTP_BUF_LEN(oper));
887 					ippp = (uint32_t *) (ph + 1);
888 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
889 					ippp++;
890 					*ippp = chk->rec.data.TSN_seq;
891 					ippp++;
892 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
893 
894 				}
895 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
896 				sctp_abort_an_association(stcb->sctp_ep, stcb,
897 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
898 				*abort_flag = 1;
899 			} else if (asoc->fragmented_delivery_inprogress &&
900 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
901 				/*
902 				 * We are doing a partial delivery and the
903 				 * NEXT chunk MUST be either the LAST or
904 				 * MIDDLE fragment NOT a FIRST
905 				 */
906 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
907 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
908 				    0, M_DONTWAIT, 1, MT_DATA);
909 				if (oper) {
910 					struct sctp_paramhdr *ph;
911 					uint32_t *ippp;
912 
913 					SCTP_BUF_LEN(oper) =
914 					    sizeof(struct sctp_paramhdr) +
915 					    (3 * sizeof(uint32_t));
916 					ph = mtod(oper, struct sctp_paramhdr *);
917 					ph->param_type =
918 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
919 					ph->param_length = htons(SCTP_BUF_LEN(oper));
920 					ippp = (uint32_t *) (ph + 1);
921 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
922 					ippp++;
923 					*ippp = chk->rec.data.TSN_seq;
924 					ippp++;
925 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
926 				}
927 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
928 				sctp_abort_an_association(stcb->sctp_ep, stcb,
929 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
930 				*abort_flag = 1;
931 			} else if (asoc->fragmented_delivery_inprogress) {
932 				/*
933 				 * Here we are ok with a MIDDLE or LAST
934 				 * piece
935 				 */
936 				if (chk->rec.data.stream_number !=
937 				    asoc->str_of_pdapi) {
938 					/* Got to be the right STR No */
939 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
940 					    chk->rec.data.stream_number,
941 					    asoc->str_of_pdapi);
942 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
943 					    0, M_DONTWAIT, 1, MT_DATA);
944 					if (oper) {
945 						struct sctp_paramhdr *ph;
946 						uint32_t *ippp;
947 
948 						SCTP_BUF_LEN(oper) =
949 						    sizeof(struct sctp_paramhdr) +
950 						    (sizeof(uint32_t) * 3);
951 						ph = mtod(oper,
952 						    struct sctp_paramhdr *);
953 						ph->param_type =
954 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
955 						ph->param_length =
956 						    htons(SCTP_BUF_LEN(oper));
957 						ippp = (uint32_t *) (ph + 1);
958 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
959 						ippp++;
960 						*ippp = chk->rec.data.TSN_seq;
961 						ippp++;
962 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
963 					}
964 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
965 					sctp_abort_an_association(stcb->sctp_ep,
966 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
967 					*abort_flag = 1;
968 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
969 					    SCTP_DATA_UNORDERED &&
970 				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
971 					/* Got to be the right STR Seq */
972 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
973 					    chk->rec.data.stream_seq,
974 					    asoc->ssn_of_pdapi);
975 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
976 					    0, M_DONTWAIT, 1, MT_DATA);
977 					if (oper) {
978 						struct sctp_paramhdr *ph;
979 						uint32_t *ippp;
980 
981 						SCTP_BUF_LEN(oper) =
982 						    sizeof(struct sctp_paramhdr) +
983 						    (3 * sizeof(uint32_t));
984 						ph = mtod(oper,
985 						    struct sctp_paramhdr *);
986 						ph->param_type =
987 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
988 						ph->param_length =
989 						    htons(SCTP_BUF_LEN(oper));
990 						ippp = (uint32_t *) (ph + 1);
991 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
992 						ippp++;
993 						*ippp = chk->rec.data.TSN_seq;
994 						ippp++;
995 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
996 
997 					}
998 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
999 					sctp_abort_an_association(stcb->sctp_ep,
1000 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1001 					*abort_flag = 1;
1002 				}
1003 			}
1004 		}
1005 		return;
1006 	}
1007 	/* Find its place */
1008 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1009 		if (compare_with_wrap(at->rec.data.TSN_seq,
1010 		    chk->rec.data.TSN_seq, MAX_TSN)) {
1011 			/*
1012 			 * one in queue is bigger than the new one, insert
1013 			 * before this one
1014 			 */
1015 			/* A check */
1016 			asoc->size_on_reasm_queue += chk->send_size;
1017 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1018 			next = at;
1019 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1020 			break;
1021 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1022 			/* Gak, He sent me a duplicate str seq number */
1023 			/*
1024 			 * foo bar, I guess I will just free this new guy,
1025 			 * should we abort too? FIX ME MAYBE? Or it COULD be
1026 			 * that the SSN's have wrapped. Maybe I should
1027 			 * compare to TSN somehow... sigh for now just blow
1028 			 * away the chunk!
1029 			 */
1030 			if (chk->data) {
1031 				sctp_m_freem(chk->data);
1032 				chk->data = NULL;
1033 			}
1034 			sctp_free_a_chunk(stcb, chk);
1035 			return;
1036 		} else {
1037 			last_flags = at->rec.data.rcv_flags;
1038 			last_tsn = at->rec.data.TSN_seq;
1039 			prev = at;
1040 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1041 				/*
1042 				 * We are at the end, insert it after this
1043 				 * one
1044 				 */
1045 				/* check it first */
1046 				asoc->size_on_reasm_queue += chk->send_size;
1047 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1048 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1049 				break;
1050 			}
1051 		}
1052 	}
1053 	/* Now the audits */
1054 	if (prev) {
1055 		prev_tsn = chk->rec.data.TSN_seq - 1;
1056 		if (prev_tsn == prev->rec.data.TSN_seq) {
1057 			/*
1058 			 * Ok the one I am dropping onto the end is the
1059 			 * NEXT. A bit of valdiation here.
1060 			 */
1061 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1062 			    SCTP_DATA_FIRST_FRAG ||
1063 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1064 			    SCTP_DATA_MIDDLE_FRAG) {
1065 				/*
1066 				 * Insert chk MUST be a MIDDLE or LAST
1067 				 * fragment
1068 				 */
1069 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1070 				    SCTP_DATA_FIRST_FRAG) {
1071 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1072 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1073 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1074 					    0, M_DONTWAIT, 1, MT_DATA);
1075 					if (oper) {
1076 						struct sctp_paramhdr *ph;
1077 						uint32_t *ippp;
1078 
1079 						SCTP_BUF_LEN(oper) =
1080 						    sizeof(struct sctp_paramhdr) +
1081 						    (3 * sizeof(uint32_t));
1082 						ph = mtod(oper,
1083 						    struct sctp_paramhdr *);
1084 						ph->param_type =
1085 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1086 						ph->param_length =
1087 						    htons(SCTP_BUF_LEN(oper));
1088 						ippp = (uint32_t *) (ph + 1);
1089 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1090 						ippp++;
1091 						*ippp = chk->rec.data.TSN_seq;
1092 						ippp++;
1093 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1094 
1095 					}
1096 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1097 					sctp_abort_an_association(stcb->sctp_ep,
1098 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1099 					*abort_flag = 1;
1100 					return;
1101 				}
1102 				if (chk->rec.data.stream_number !=
1103 				    prev->rec.data.stream_number) {
1104 					/*
1105 					 * Huh, need the correct STR here,
1106 					 * they must be the same.
1107 					 */
1108 					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1109 					    chk->rec.data.stream_number,
1110 					    prev->rec.data.stream_number);
1111 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1112 					    0, M_DONTWAIT, 1, MT_DATA);
1113 					if (oper) {
1114 						struct sctp_paramhdr *ph;
1115 						uint32_t *ippp;
1116 
1117 						SCTP_BUF_LEN(oper) =
1118 						    sizeof(struct sctp_paramhdr) +
1119 						    (3 * sizeof(uint32_t));
1120 						ph = mtod(oper,
1121 						    struct sctp_paramhdr *);
1122 						ph->param_type =
1123 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1124 						ph->param_length =
1125 						    htons(SCTP_BUF_LEN(oper));
1126 						ippp = (uint32_t *) (ph + 1);
1127 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1128 						ippp++;
1129 						*ippp = chk->rec.data.TSN_seq;
1130 						ippp++;
1131 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1132 					}
1133 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1134 					sctp_abort_an_association(stcb->sctp_ep,
1135 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1136 
1137 					*abort_flag = 1;
1138 					return;
1139 				}
1140 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1141 				    chk->rec.data.stream_seq !=
1142 				    prev->rec.data.stream_seq) {
1143 					/*
1144 					 * Huh, need the correct STR here,
1145 					 * they must be the same.
1146 					 */
1147 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1148 					    chk->rec.data.stream_seq,
1149 					    prev->rec.data.stream_seq);
1150 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1151 					    0, M_DONTWAIT, 1, MT_DATA);
1152 					if (oper) {
1153 						struct sctp_paramhdr *ph;
1154 						uint32_t *ippp;
1155 
1156 						SCTP_BUF_LEN(oper) =
1157 						    sizeof(struct sctp_paramhdr) +
1158 						    (3 * sizeof(uint32_t));
1159 						ph = mtod(oper,
1160 						    struct sctp_paramhdr *);
1161 						ph->param_type =
1162 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1163 						ph->param_length =
1164 						    htons(SCTP_BUF_LEN(oper));
1165 						ippp = (uint32_t *) (ph + 1);
1166 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1167 						ippp++;
1168 						*ippp = chk->rec.data.TSN_seq;
1169 						ippp++;
1170 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1171 					}
1172 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1173 					sctp_abort_an_association(stcb->sctp_ep,
1174 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1175 
1176 					*abort_flag = 1;
1177 					return;
1178 				}
1179 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1180 			    SCTP_DATA_LAST_FRAG) {
1181 				/* Insert chk MUST be a FIRST */
1182 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1183 				    SCTP_DATA_FIRST_FRAG) {
1184 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1185 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1186 					    0, M_DONTWAIT, 1, MT_DATA);
1187 					if (oper) {
1188 						struct sctp_paramhdr *ph;
1189 						uint32_t *ippp;
1190 
1191 						SCTP_BUF_LEN(oper) =
1192 						    sizeof(struct sctp_paramhdr) +
1193 						    (3 * sizeof(uint32_t));
1194 						ph = mtod(oper,
1195 						    struct sctp_paramhdr *);
1196 						ph->param_type =
1197 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1198 						ph->param_length =
1199 						    htons(SCTP_BUF_LEN(oper));
1200 						ippp = (uint32_t *) (ph + 1);
1201 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1202 						ippp++;
1203 						*ippp = chk->rec.data.TSN_seq;
1204 						ippp++;
1205 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1206 
1207 					}
1208 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1209 					sctp_abort_an_association(stcb->sctp_ep,
1210 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1211 
1212 					*abort_flag = 1;
1213 					return;
1214 				}
1215 			}
1216 		}
1217 	}
1218 	if (next) {
1219 		post_tsn = chk->rec.data.TSN_seq + 1;
1220 		if (post_tsn == next->rec.data.TSN_seq) {
1221 			/*
1222 			 * Ok the one I am inserting ahead of is my NEXT
1223 			 * one. A bit of valdiation here.
1224 			 */
1225 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1226 				/* Insert chk MUST be a last fragment */
1227 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1228 				    != SCTP_DATA_LAST_FRAG) {
1229 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1230 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1231 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1232 					    0, M_DONTWAIT, 1, MT_DATA);
1233 					if (oper) {
1234 						struct sctp_paramhdr *ph;
1235 						uint32_t *ippp;
1236 
1237 						SCTP_BUF_LEN(oper) =
1238 						    sizeof(struct sctp_paramhdr) +
1239 						    (3 * sizeof(uint32_t));
1240 						ph = mtod(oper,
1241 						    struct sctp_paramhdr *);
1242 						ph->param_type =
1243 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1244 						ph->param_length =
1245 						    htons(SCTP_BUF_LEN(oper));
1246 						ippp = (uint32_t *) (ph + 1);
1247 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1248 						ippp++;
1249 						*ippp = chk->rec.data.TSN_seq;
1250 						ippp++;
1251 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1252 					}
1253 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1254 					sctp_abort_an_association(stcb->sctp_ep,
1255 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1256 
1257 					*abort_flag = 1;
1258 					return;
1259 				}
1260 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1261 				    SCTP_DATA_MIDDLE_FRAG ||
1262 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1263 			    SCTP_DATA_LAST_FRAG) {
1264 				/*
1265 				 * Insert chk CAN be MIDDLE or FIRST NOT
1266 				 * LAST
1267 				 */
1268 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1269 				    SCTP_DATA_LAST_FRAG) {
1270 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1271 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1272 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1273 					    0, M_DONTWAIT, 1, MT_DATA);
1274 					if (oper) {
1275 						struct sctp_paramhdr *ph;
1276 						uint32_t *ippp;
1277 
1278 						SCTP_BUF_LEN(oper) =
1279 						    sizeof(struct sctp_paramhdr) +
1280 						    (3 * sizeof(uint32_t));
1281 						ph = mtod(oper,
1282 						    struct sctp_paramhdr *);
1283 						ph->param_type =
1284 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1285 						ph->param_length =
1286 						    htons(SCTP_BUF_LEN(oper));
1287 						ippp = (uint32_t *) (ph + 1);
1288 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1289 						ippp++;
1290 						*ippp = chk->rec.data.TSN_seq;
1291 						ippp++;
1292 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1293 
1294 					}
1295 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1296 					sctp_abort_an_association(stcb->sctp_ep,
1297 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1298 
1299 					*abort_flag = 1;
1300 					return;
1301 				}
1302 				if (chk->rec.data.stream_number !=
1303 				    next->rec.data.stream_number) {
1304 					/*
1305 					 * Huh, need the correct STR here,
1306 					 * they must be the same.
1307 					 */
1308 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1309 					    chk->rec.data.stream_number,
1310 					    next->rec.data.stream_number);
1311 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1312 					    0, M_DONTWAIT, 1, MT_DATA);
1313 					if (oper) {
1314 						struct sctp_paramhdr *ph;
1315 						uint32_t *ippp;
1316 
1317 						SCTP_BUF_LEN(oper) =
1318 						    sizeof(struct sctp_paramhdr) +
1319 						    (3 * sizeof(uint32_t));
1320 						ph = mtod(oper,
1321 						    struct sctp_paramhdr *);
1322 						ph->param_type =
1323 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1324 						ph->param_length =
1325 						    htons(SCTP_BUF_LEN(oper));
1326 						ippp = (uint32_t *) (ph + 1);
1327 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1328 						ippp++;
1329 						*ippp = chk->rec.data.TSN_seq;
1330 						ippp++;
1331 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1332 
1333 					}
1334 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1335 					sctp_abort_an_association(stcb->sctp_ep,
1336 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1337 
1338 					*abort_flag = 1;
1339 					return;
1340 				}
1341 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1342 				    chk->rec.data.stream_seq !=
1343 				    next->rec.data.stream_seq) {
1344 					/*
1345 					 * Huh, need the correct STR here,
1346 					 * they must be the same.
1347 					 */
1348 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1349 					    chk->rec.data.stream_seq,
1350 					    next->rec.data.stream_seq);
1351 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1352 					    0, M_DONTWAIT, 1, MT_DATA);
1353 					if (oper) {
1354 						struct sctp_paramhdr *ph;
1355 						uint32_t *ippp;
1356 
1357 						SCTP_BUF_LEN(oper) =
1358 						    sizeof(struct sctp_paramhdr) +
1359 						    (3 * sizeof(uint32_t));
1360 						ph = mtod(oper,
1361 						    struct sctp_paramhdr *);
1362 						ph->param_type =
1363 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1364 						ph->param_length =
1365 						    htons(SCTP_BUF_LEN(oper));
1366 						ippp = (uint32_t *) (ph + 1);
1367 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1368 						ippp++;
1369 						*ippp = chk->rec.data.TSN_seq;
1370 						ippp++;
1371 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1372 					}
1373 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1374 					sctp_abort_an_association(stcb->sctp_ep,
1375 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1376 
1377 					*abort_flag = 1;
1378 					return;
1379 				}
1380 			}
1381 		}
1382 	}
1383 	/* Do we need to do some delivery? check */
1384 	sctp_deliver_reasm_check(stcb, asoc);
1385 }
1386 
1387 /*
1388  * This is an unfortunate routine. It checks to make sure a evil guy is not
1389  * stuffing us full of bad packet fragments. A broken peer could also do this
1390  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1391  * :< more cycles.
1392  */
1393 static int
1394 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1395     uint32_t TSN_seq)
1396 {
1397 	struct sctp_tmit_chunk *at;
1398 	uint32_t tsn_est;
1399 
1400 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1401 		if (compare_with_wrap(TSN_seq,
1402 		    at->rec.data.TSN_seq, MAX_TSN)) {
1403 			/* is it one bigger? */
1404 			tsn_est = at->rec.data.TSN_seq + 1;
1405 			if (tsn_est == TSN_seq) {
1406 				/* yep. It better be a last then */
1407 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1408 				    SCTP_DATA_LAST_FRAG) {
1409 					/*
1410 					 * Ok this guy belongs next to a guy
1411 					 * that is NOT last, it should be a
1412 					 * middle/last, not a complete
1413 					 * chunk.
1414 					 */
1415 					return (1);
1416 				} else {
1417 					/*
1418 					 * This guy is ok since its a LAST
1419 					 * and the new chunk is a fully
1420 					 * self- contained one.
1421 					 */
1422 					return (0);
1423 				}
1424 			}
1425 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1426 			/* Software error since I have a dup? */
1427 			return (1);
1428 		} else {
1429 			/*
1430 			 * Ok, 'at' is larger than new chunk but does it
1431 			 * need to be right before it.
1432 			 */
1433 			tsn_est = TSN_seq + 1;
1434 			if (tsn_est == at->rec.data.TSN_seq) {
1435 				/* Yep, It better be a first */
1436 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1437 				    SCTP_DATA_FIRST_FRAG) {
1438 					return (1);
1439 				} else {
1440 					return (0);
1441 				}
1442 			}
1443 		}
1444 	}
1445 	return (0);
1446 }
1447 
1448 
1449 static int
1450 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1451     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1452     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1453     int *break_flag, int last_chunk)
1454 {
1455 	/* Process a data chunk */
1456 	/* struct sctp_tmit_chunk *chk; */
1457 	struct sctp_tmit_chunk *chk;
1458 	uint32_t tsn, gap;
1459 	struct mbuf *dmbuf;
1460 	int indx, the_len;
1461 	int need_reasm_check = 0;
1462 	uint16_t strmno, strmseq;
1463 	struct mbuf *oper;
1464 	struct sctp_queued_to_read *control;
1465 	int ordered;
1466 	uint32_t protocol_id;
1467 	uint8_t chunk_flags;
1468 	struct sctp_stream_reset_list *liste;
1469 
1470 	chk = NULL;
1471 	tsn = ntohl(ch->dp.tsn);
1472 	chunk_flags = ch->ch.chunk_flags;
1473 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1474 		asoc->send_sack = 1;
1475 	}
1476 	protocol_id = ch->dp.protocol_id;
1477 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1478 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1479 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1480 	}
1481 	if (stcb == NULL) {
1482 		return (0);
1483 	}
1484 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1485 	if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1486 	    asoc->cumulative_tsn == tsn) {
1487 		/* It is a duplicate */
1488 		SCTP_STAT_INCR(sctps_recvdupdata);
1489 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1490 			/* Record a dup for the next outbound sack */
1491 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1492 			asoc->numduptsns++;
1493 		}
1494 		asoc->send_sack = 1;
1495 		return (0);
1496 	}
1497 	/* Calculate the number of TSN's between the base and this TSN */
1498 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1499 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1500 		/* Can't hold the bit in the mapping at max array, toss it */
1501 		return (0);
1502 	}
1503 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1504 		SCTP_TCB_LOCK_ASSERT(stcb);
1505 		if (sctp_expand_mapping_array(asoc, gap)) {
1506 			/* Can't expand, drop it */
1507 			return (0);
1508 		}
1509 	}
1510 	if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1511 		*high_tsn = tsn;
1512 	}
1513 	/* See if we have received this one already */
1514 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1515 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1516 		SCTP_STAT_INCR(sctps_recvdupdata);
1517 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1518 			/* Record a dup for the next outbound sack */
1519 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1520 			asoc->numduptsns++;
1521 		}
1522 		asoc->send_sack = 1;
1523 		return (0);
1524 	}
1525 	/*
1526 	 * Check to see about the GONE flag, duplicates would cause a sack
1527 	 * to be sent up above
1528 	 */
1529 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1530 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1531 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1532 	    ) {
1533 		/*
1534 		 * wait a minute, this guy is gone, there is no longer a
1535 		 * receiver. Send peer an ABORT!
1536 		 */
1537 		struct mbuf *op_err;
1538 
1539 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1540 		sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1541 		*abort_flag = 1;
1542 		return (0);
1543 	}
1544 	/*
1545 	 * Now before going further we see if there is room. If NOT then we
1546 	 * MAY let one through only IF this TSN is the one we are waiting
1547 	 * for on a partial delivery API.
1548 	 */
1549 
1550 	/* now do the tests */
1551 	if (((asoc->cnt_on_all_streams +
1552 	    asoc->cnt_on_reasm_queue +
1553 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1554 	    (((int)asoc->my_rwnd) <= 0)) {
1555 		/*
1556 		 * When we have NO room in the rwnd we check to make sure
1557 		 * the reader is doing its job...
1558 		 */
1559 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1560 			/* some to read, wake-up */
1561 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1562 			struct socket *so;
1563 
1564 			so = SCTP_INP_SO(stcb->sctp_ep);
1565 			atomic_add_int(&stcb->asoc.refcnt, 1);
1566 			SCTP_TCB_UNLOCK(stcb);
1567 			SCTP_SOCKET_LOCK(so, 1);
1568 			SCTP_TCB_LOCK(stcb);
1569 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1570 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1571 				/* assoc was freed while we were unlocked */
1572 				SCTP_SOCKET_UNLOCK(so, 1);
1573 				return (0);
1574 			}
1575 #endif
1576 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1577 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1578 			SCTP_SOCKET_UNLOCK(so, 1);
1579 #endif
1580 		}
1581 		/* now is it in the mapping array of what we have accepted? */
1582 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
1583 		    compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1584 			/* Nope not in the valid range dump it */
1585 			sctp_set_rwnd(stcb, asoc);
1586 			if ((asoc->cnt_on_all_streams +
1587 			    asoc->cnt_on_reasm_queue +
1588 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1589 				SCTP_STAT_INCR(sctps_datadropchklmt);
1590 			} else {
1591 				SCTP_STAT_INCR(sctps_datadroprwnd);
1592 			}
1593 			indx = *break_flag;
1594 			*break_flag = 1;
1595 			return (0);
1596 		}
1597 	}
1598 	strmno = ntohs(ch->dp.stream_id);
1599 	if (strmno >= asoc->streamincnt) {
1600 		struct sctp_paramhdr *phdr;
1601 		struct mbuf *mb;
1602 
1603 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1604 		    0, M_DONTWAIT, 1, MT_DATA);
1605 		if (mb != NULL) {
1606 			/* add some space up front so prepend will work well */
1607 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1608 			phdr = mtod(mb, struct sctp_paramhdr *);
1609 			/*
1610 			 * Error causes are just param's and this one has
1611 			 * two back to back phdr, one with the error type
1612 			 * and size, the other with the streamid and a rsvd
1613 			 */
1614 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1615 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1616 			phdr->param_length =
1617 			    htons(sizeof(struct sctp_paramhdr) * 2);
1618 			phdr++;
1619 			/* We insert the stream in the type field */
1620 			phdr->param_type = ch->dp.stream_id;
1621 			/* And set the length to 0 for the rsvd field */
1622 			phdr->param_length = 0;
1623 			sctp_queue_op_err(stcb, mb);
1624 		}
1625 		SCTP_STAT_INCR(sctps_badsid);
1626 		SCTP_TCB_LOCK_ASSERT(stcb);
1627 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1628 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1629 			asoc->highest_tsn_inside_nr_map = tsn;
1630 		}
1631 		if (tsn == (asoc->cumulative_tsn + 1)) {
1632 			/* Update cum-ack */
1633 			asoc->cumulative_tsn = tsn;
1634 		}
1635 		return (0);
1636 	}
1637 	/*
1638 	 * Before we continue lets validate that we are not being fooled by
1639 	 * an evil attacker. We can only have 4k chunks based on our TSN
1640 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1641 	 * way our stream sequence numbers could have wrapped. We of course
1642 	 * only validate the FIRST fragment so the bit must be set.
1643 	 */
1644 	strmseq = ntohs(ch->dp.stream_sequence);
1645 #ifdef SCTP_ASOCLOG_OF_TSNS
1646 	SCTP_TCB_LOCK_ASSERT(stcb);
1647 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1648 		asoc->tsn_in_at = 0;
1649 		asoc->tsn_in_wrapped = 1;
1650 	}
1651 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1652 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1653 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1654 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1655 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1656 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1657 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1658 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1659 	asoc->tsn_in_at++;
1660 #endif
1661 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1662 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1663 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1664 	    (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1665 	    strmseq, MAX_SEQ) ||
1666 	    asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1667 		/* The incoming sseq is behind where we last delivered? */
1668 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1669 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1670 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1671 		    0, M_DONTWAIT, 1, MT_DATA);
1672 		if (oper) {
1673 			struct sctp_paramhdr *ph;
1674 			uint32_t *ippp;
1675 
1676 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1677 			    (3 * sizeof(uint32_t));
1678 			ph = mtod(oper, struct sctp_paramhdr *);
1679 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1680 			ph->param_length = htons(SCTP_BUF_LEN(oper));
1681 			ippp = (uint32_t *) (ph + 1);
1682 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1683 			ippp++;
1684 			*ippp = tsn;
1685 			ippp++;
1686 			*ippp = ((strmno << 16) | strmseq);
1687 
1688 		}
1689 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1690 		sctp_abort_an_association(stcb->sctp_ep, stcb,
1691 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1692 		*abort_flag = 1;
1693 		return (0);
1694 	}
1695 	/************************************
1696 	 * From here down we may find ch-> invalid
1697 	 * so its a good idea NOT to use it.
1698 	 *************************************/
1699 
1700 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1701 	if (last_chunk == 0) {
1702 		dmbuf = SCTP_M_COPYM(*m,
1703 		    (offset + sizeof(struct sctp_data_chunk)),
1704 		    the_len, M_DONTWAIT);
1705 #ifdef SCTP_MBUF_LOGGING
1706 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1707 			struct mbuf *mat;
1708 
1709 			mat = dmbuf;
1710 			while (mat) {
1711 				if (SCTP_BUF_IS_EXTENDED(mat)) {
1712 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1713 				}
1714 				mat = SCTP_BUF_NEXT(mat);
1715 			}
1716 		}
1717 #endif
1718 	} else {
1719 		/* We can steal the last chunk */
1720 		int l_len;
1721 
1722 		dmbuf = *m;
1723 		/* lop off the top part */
1724 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1725 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1726 			l_len = SCTP_BUF_LEN(dmbuf);
1727 		} else {
1728 			/*
1729 			 * need to count up the size hopefully does not hit
1730 			 * this to often :-0
1731 			 */
1732 			struct mbuf *lat;
1733 
1734 			l_len = 0;
1735 			lat = dmbuf;
1736 			while (lat) {
1737 				l_len += SCTP_BUF_LEN(lat);
1738 				lat = SCTP_BUF_NEXT(lat);
1739 			}
1740 		}
1741 		if (l_len > the_len) {
1742 			/* Trim the end round bytes off  too */
1743 			m_adj(dmbuf, -(l_len - the_len));
1744 		}
1745 	}
1746 	if (dmbuf == NULL) {
1747 		SCTP_STAT_INCR(sctps_nomem);
1748 		return (0);
1749 	}
1750 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1751 	    asoc->fragmented_delivery_inprogress == 0 &&
1752 	    TAILQ_EMPTY(&asoc->resetHead) &&
1753 	    ((ordered == 0) ||
1754 	    ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1755 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1756 		/* Candidate for express delivery */
1757 		/*
1758 		 * Its not fragmented, No PD-API is up, Nothing in the
1759 		 * delivery queue, Its un-ordered OR ordered and the next to
1760 		 * deliver AND nothing else is stuck on the stream queue,
1761 		 * And there is room for it in the socket buffer. Lets just
1762 		 * stuff it up the buffer....
1763 		 */
1764 
1765 		/* It would be nice to avoid this copy if we could :< */
1766 		sctp_alloc_a_readq(stcb, control);
1767 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1768 		    protocol_id,
1769 		    stcb->asoc.context,
1770 		    strmno, strmseq,
1771 		    chunk_flags,
1772 		    dmbuf);
1773 		if (control == NULL) {
1774 			goto failed_express_del;
1775 		}
1776 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1777 		    control, &stcb->sctp_socket->so_rcv,
1778 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1779 
1780 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1781 			/* for ordered, bump what we delivered */
1782 			asoc->strmin[strmno].last_sequence_delivered++;
1783 		}
1784 		SCTP_STAT_INCR(sctps_recvexpress);
1785 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1786 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1787 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1788 		}
1789 		control = NULL;
1790 
1791 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1792 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1793 			asoc->highest_tsn_inside_nr_map = tsn;
1794 		}
1795 		goto finish_express_del;
1796 	}
1797 failed_express_del:
1798 	/* If we reach here this is a new chunk */
1799 	chk = NULL;
1800 	control = NULL;
1801 	/* Express for fragmented delivery? */
1802 	if ((asoc->fragmented_delivery_inprogress) &&
1803 	    (stcb->asoc.control_pdapi) &&
1804 	    (asoc->str_of_pdapi == strmno) &&
1805 	    (asoc->ssn_of_pdapi == strmseq)
1806 	    ) {
1807 		control = stcb->asoc.control_pdapi;
1808 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1809 			/* Can't be another first? */
1810 			goto failed_pdapi_express_del;
1811 		}
1812 		if (tsn == (control->sinfo_tsn + 1)) {
1813 			/* Yep, we can add it on */
1814 			int end = 0;
1815 			uint32_t cumack;
1816 
1817 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1818 				end = 1;
1819 			}
1820 			cumack = asoc->cumulative_tsn;
1821 			if ((cumack + 1) == tsn)
1822 				cumack = tsn;
1823 
1824 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1825 			    tsn,
1826 			    &stcb->sctp_socket->so_rcv)) {
1827 				SCTP_PRINTF("Append fails end:%d\n", end);
1828 				goto failed_pdapi_express_del;
1829 			}
1830 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1831 			if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1832 				asoc->highest_tsn_inside_nr_map = tsn;
1833 			}
1834 			SCTP_STAT_INCR(sctps_recvexpressm);
1835 			control->sinfo_tsn = tsn;
1836 			asoc->tsn_last_delivered = tsn;
1837 			asoc->fragment_flags = chunk_flags;
1838 			asoc->tsn_of_pdapi_last_delivered = tsn;
1839 			asoc->last_flags_delivered = chunk_flags;
1840 			asoc->last_strm_seq_delivered = strmseq;
1841 			asoc->last_strm_no_delivered = strmno;
1842 			if (end) {
1843 				/* clean up the flags and such */
1844 				asoc->fragmented_delivery_inprogress = 0;
1845 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1846 					asoc->strmin[strmno].last_sequence_delivered++;
1847 				}
1848 				stcb->asoc.control_pdapi = NULL;
1849 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1850 					/*
1851 					 * There could be another message
1852 					 * ready
1853 					 */
1854 					need_reasm_check = 1;
1855 				}
1856 			}
1857 			control = NULL;
1858 			goto finish_express_del;
1859 		}
1860 	}
1861 failed_pdapi_express_del:
1862 	control = NULL;
1863 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1864 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1865 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1866 			asoc->highest_tsn_inside_nr_map = tsn;
1867 		}
1868 	} else {
1869 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1870 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1871 			asoc->highest_tsn_inside_map = tsn;
1872 		}
1873 	}
1874 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1875 		sctp_alloc_a_chunk(stcb, chk);
1876 		if (chk == NULL) {
1877 			/* No memory so we drop the chunk */
1878 			SCTP_STAT_INCR(sctps_nomem);
1879 			if (last_chunk == 0) {
1880 				/* we copied it, free the copy */
1881 				sctp_m_freem(dmbuf);
1882 			}
1883 			return (0);
1884 		}
1885 		chk->rec.data.TSN_seq = tsn;
1886 		chk->no_fr_allowed = 0;
1887 		chk->rec.data.stream_seq = strmseq;
1888 		chk->rec.data.stream_number = strmno;
1889 		chk->rec.data.payloadtype = protocol_id;
1890 		chk->rec.data.context = stcb->asoc.context;
1891 		chk->rec.data.doing_fast_retransmit = 0;
1892 		chk->rec.data.rcv_flags = chunk_flags;
1893 		chk->asoc = asoc;
1894 		chk->send_size = the_len;
1895 		chk->whoTo = net;
1896 		atomic_add_int(&net->ref_count, 1);
1897 		chk->data = dmbuf;
1898 	} else {
1899 		sctp_alloc_a_readq(stcb, control);
1900 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1901 		    protocol_id,
1902 		    stcb->asoc.context,
1903 		    strmno, strmseq,
1904 		    chunk_flags,
1905 		    dmbuf);
1906 		if (control == NULL) {
1907 			/* No memory so we drop the chunk */
1908 			SCTP_STAT_INCR(sctps_nomem);
1909 			if (last_chunk == 0) {
1910 				/* we copied it, free the copy */
1911 				sctp_m_freem(dmbuf);
1912 			}
1913 			return (0);
1914 		}
1915 		control->length = the_len;
1916 	}
1917 
1918 	/* Mark it as received */
1919 	/* Now queue it where it belongs */
1920 	if (control != NULL) {
1921 		/* First a sanity check */
1922 		if (asoc->fragmented_delivery_inprogress) {
1923 			/*
1924 			 * Ok, we have a fragmented delivery in progress if
1925 			 * this chunk is next to deliver OR belongs in our
1926 			 * view to the reassembly, the peer is evil or
1927 			 * broken.
1928 			 */
1929 			uint32_t estimate_tsn;
1930 
1931 			estimate_tsn = asoc->tsn_last_delivered + 1;
1932 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1933 			    (estimate_tsn == control->sinfo_tsn)) {
1934 				/* Evil/Broke peer */
1935 				sctp_m_freem(control->data);
1936 				control->data = NULL;
1937 				if (control->whoFrom) {
1938 					sctp_free_remote_addr(control->whoFrom);
1939 					control->whoFrom = NULL;
1940 				}
1941 				sctp_free_a_readq(stcb, control);
1942 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1943 				    0, M_DONTWAIT, 1, MT_DATA);
1944 				if (oper) {
1945 					struct sctp_paramhdr *ph;
1946 					uint32_t *ippp;
1947 
1948 					SCTP_BUF_LEN(oper) =
1949 					    sizeof(struct sctp_paramhdr) +
1950 					    (3 * sizeof(uint32_t));
1951 					ph = mtod(oper, struct sctp_paramhdr *);
1952 					ph->param_type =
1953 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1954 					ph->param_length = htons(SCTP_BUF_LEN(oper));
1955 					ippp = (uint32_t *) (ph + 1);
1956 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1957 					ippp++;
1958 					*ippp = tsn;
1959 					ippp++;
1960 					*ippp = ((strmno << 16) | strmseq);
1961 				}
1962 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1963 				sctp_abort_an_association(stcb->sctp_ep, stcb,
1964 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1965 
1966 				*abort_flag = 1;
1967 				return (0);
1968 			} else {
1969 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1970 					sctp_m_freem(control->data);
1971 					control->data = NULL;
1972 					if (control->whoFrom) {
1973 						sctp_free_remote_addr(control->whoFrom);
1974 						control->whoFrom = NULL;
1975 					}
1976 					sctp_free_a_readq(stcb, control);
1977 
1978 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1979 					    0, M_DONTWAIT, 1, MT_DATA);
1980 					if (oper) {
1981 						struct sctp_paramhdr *ph;
1982 						uint32_t *ippp;
1983 
1984 						SCTP_BUF_LEN(oper) =
1985 						    sizeof(struct sctp_paramhdr) +
1986 						    (3 * sizeof(uint32_t));
1987 						ph = mtod(oper,
1988 						    struct sctp_paramhdr *);
1989 						ph->param_type =
1990 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1991 						ph->param_length =
1992 						    htons(SCTP_BUF_LEN(oper));
1993 						ippp = (uint32_t *) (ph + 1);
1994 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1995 						ippp++;
1996 						*ippp = tsn;
1997 						ippp++;
1998 						*ippp = ((strmno << 16) | strmseq);
1999 					}
2000 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2001 					sctp_abort_an_association(stcb->sctp_ep,
2002 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2003 
2004 					*abort_flag = 1;
2005 					return (0);
2006 				}
2007 			}
2008 		} else {
2009 			/* No PDAPI running */
2010 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2011 				/*
2012 				 * Reassembly queue is NOT empty validate
2013 				 * that this tsn does not need to be in
2014 				 * reasembly queue. If it does then our peer
2015 				 * is broken or evil.
2016 				 */
2017 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2018 					sctp_m_freem(control->data);
2019 					control->data = NULL;
2020 					if (control->whoFrom) {
2021 						sctp_free_remote_addr(control->whoFrom);
2022 						control->whoFrom = NULL;
2023 					}
2024 					sctp_free_a_readq(stcb, control);
2025 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2026 					    0, M_DONTWAIT, 1, MT_DATA);
2027 					if (oper) {
2028 						struct sctp_paramhdr *ph;
2029 						uint32_t *ippp;
2030 
2031 						SCTP_BUF_LEN(oper) =
2032 						    sizeof(struct sctp_paramhdr) +
2033 						    (3 * sizeof(uint32_t));
2034 						ph = mtod(oper,
2035 						    struct sctp_paramhdr *);
2036 						ph->param_type =
2037 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2038 						ph->param_length =
2039 						    htons(SCTP_BUF_LEN(oper));
2040 						ippp = (uint32_t *) (ph + 1);
2041 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2042 						ippp++;
2043 						*ippp = tsn;
2044 						ippp++;
2045 						*ippp = ((strmno << 16) | strmseq);
2046 					}
2047 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2048 					sctp_abort_an_association(stcb->sctp_ep,
2049 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2050 
2051 					*abort_flag = 1;
2052 					return (0);
2053 				}
2054 			}
2055 		}
2056 		/* ok, if we reach here we have passed the sanity checks */
2057 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2058 			/* queue directly into socket buffer */
2059 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2060 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2061 			    control,
2062 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2063 		} else {
2064 			/*
2065 			 * Special check for when streams are resetting. We
2066 			 * could be more smart about this and check the
2067 			 * actual stream to see if it is not being reset..
2068 			 * that way we would not create a HOLB when amongst
2069 			 * streams being reset and those not being reset.
2070 			 *
2071 			 * We take complete messages that have a stream reset
2072 			 * intervening (aka the TSN is after where our
2073 			 * cum-ack needs to be) off and put them on a
2074 			 * pending_reply_queue. The reassembly ones we do
2075 			 * not have to worry about since they are all sorted
2076 			 * and proceessed by TSN order. It is only the
2077 			 * singletons I must worry about.
2078 			 */
2079 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2080 			    ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)))
2081 			    ) {
2082 				/*
2083 				 * yep its past where we need to reset... go
2084 				 * ahead and queue it.
2085 				 */
2086 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2087 					/* first one on */
2088 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2089 				} else {
2090 					struct sctp_queued_to_read *ctlOn;
2091 					unsigned char inserted = 0;
2092 
2093 					ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2094 					while (ctlOn) {
2095 						if (compare_with_wrap(control->sinfo_tsn,
2096 						    ctlOn->sinfo_tsn, MAX_TSN)) {
2097 							ctlOn = TAILQ_NEXT(ctlOn, next);
2098 						} else {
2099 							/* found it */
2100 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2101 							inserted = 1;
2102 							break;
2103 						}
2104 					}
2105 					if (inserted == 0) {
2106 						/*
2107 						 * must be put at end, use
2108 						 * prevP (all setup from
2109 						 * loop) to setup nextP.
2110 						 */
2111 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2112 					}
2113 				}
2114 			} else {
2115 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2116 				if (*abort_flag) {
2117 					return (0);
2118 				}
2119 			}
2120 		}
2121 	} else {
2122 		/* Into the re-assembly queue */
2123 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2124 		if (*abort_flag) {
2125 			/*
2126 			 * the assoc is now gone and chk was put onto the
2127 			 * reasm queue, which has all been freed.
2128 			 */
2129 			*m = NULL;
2130 			return (0);
2131 		}
2132 	}
2133 finish_express_del:
2134 	if (tsn == (asoc->cumulative_tsn + 1)) {
2135 		/* Update cum-ack */
2136 		asoc->cumulative_tsn = tsn;
2137 	}
2138 	if (last_chunk) {
2139 		*m = NULL;
2140 	}
2141 	if (ordered) {
2142 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2143 	} else {
2144 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2145 	}
2146 	SCTP_STAT_INCR(sctps_recvdata);
2147 	/* Set it present please */
2148 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2149 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2150 	}
2151 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2152 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2153 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2154 	}
2155 	/* check the special flag for stream resets */
2156 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2157 	    ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2158 	    (asoc->cumulative_tsn == liste->tsn))
2159 	    ) {
2160 		/*
2161 		 * we have finished working through the backlogged TSN's now
2162 		 * time to reset streams. 1: call reset function. 2: free
2163 		 * pending_reply space 3: distribute any chunks in
2164 		 * pending_reply_queue.
2165 		 */
2166 		struct sctp_queued_to_read *ctl;
2167 
2168 		sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2169 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2170 		SCTP_FREE(liste, SCTP_M_STRESET);
2171 		/* sa_ignore FREED_MEMORY */
2172 		liste = TAILQ_FIRST(&asoc->resetHead);
2173 		ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2174 		if (ctl && (liste == NULL)) {
2175 			/* All can be removed */
2176 			while (ctl) {
2177 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2178 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2179 				if (*abort_flag) {
2180 					return (0);
2181 				}
2182 				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2183 			}
2184 		} else if (ctl) {
2185 			/* more than one in queue */
2186 			while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2187 				/*
2188 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2189 				 * process it which is the NOT of
2190 				 * ctl->sinfo_tsn > liste->tsn
2191 				 */
2192 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2193 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2194 				if (*abort_flag) {
2195 					return (0);
2196 				}
2197 				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2198 			}
2199 		}
2200 		/*
2201 		 * Now service re-assembly to pick up anything that has been
2202 		 * held on reassembly queue?
2203 		 */
2204 		sctp_deliver_reasm_check(stcb, asoc);
2205 		need_reasm_check = 0;
2206 	}
2207 	if (need_reasm_check) {
2208 		/* Another one waits ? */
2209 		sctp_deliver_reasm_check(stcb, asoc);
2210 	}
2211 	return (1);
2212 }
2213 
2214 int8_t sctp_map_lookup_tab[256] = {
2215 	0, 1, 0, 2, 0, 1, 0, 3,
2216 	0, 1, 0, 2, 0, 1, 0, 4,
2217 	0, 1, 0, 2, 0, 1, 0, 3,
2218 	0, 1, 0, 2, 0, 1, 0, 5,
2219 	0, 1, 0, 2, 0, 1, 0, 3,
2220 	0, 1, 0, 2, 0, 1, 0, 4,
2221 	0, 1, 0, 2, 0, 1, 0, 3,
2222 	0, 1, 0, 2, 0, 1, 0, 6,
2223 	0, 1, 0, 2, 0, 1, 0, 3,
2224 	0, 1, 0, 2, 0, 1, 0, 4,
2225 	0, 1, 0, 2, 0, 1, 0, 3,
2226 	0, 1, 0, 2, 0, 1, 0, 5,
2227 	0, 1, 0, 2, 0, 1, 0, 3,
2228 	0, 1, 0, 2, 0, 1, 0, 4,
2229 	0, 1, 0, 2, 0, 1, 0, 3,
2230 	0, 1, 0, 2, 0, 1, 0, 7,
2231 	0, 1, 0, 2, 0, 1, 0, 3,
2232 	0, 1, 0, 2, 0, 1, 0, 4,
2233 	0, 1, 0, 2, 0, 1, 0, 3,
2234 	0, 1, 0, 2, 0, 1, 0, 5,
2235 	0, 1, 0, 2, 0, 1, 0, 3,
2236 	0, 1, 0, 2, 0, 1, 0, 4,
2237 	0, 1, 0, 2, 0, 1, 0, 3,
2238 	0, 1, 0, 2, 0, 1, 0, 6,
2239 	0, 1, 0, 2, 0, 1, 0, 3,
2240 	0, 1, 0, 2, 0, 1, 0, 4,
2241 	0, 1, 0, 2, 0, 1, 0, 3,
2242 	0, 1, 0, 2, 0, 1, 0, 5,
2243 	0, 1, 0, 2, 0, 1, 0, 3,
2244 	0, 1, 0, 2, 0, 1, 0, 4,
2245 	0, 1, 0, 2, 0, 1, 0, 3,
2246 	0, 1, 0, 2, 0, 1, 0, 8
2247 };
2248 
2249 
2250 void
2251 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2252 {
2253 	/*
2254 	 * Now we also need to check the mapping array in a couple of ways.
2255 	 * 1) Did we move the cum-ack point?
2256 	 *
2257 	 * When you first glance at this you might think that all entries that
2258 	 * make up the postion of the cum-ack would be in the nr-mapping
2259 	 * array only.. i.e. things up to the cum-ack are always
2260 	 * deliverable. Thats true with one exception, when its a fragmented
2261 	 * message we may not deliver the data until some threshold (or all
2262 	 * of it) is in place. So we must OR the nr_mapping_array and
2263 	 * mapping_array to get a true picture of the cum-ack.
2264 	 */
2265 	struct sctp_association *asoc;
2266 	int at;
2267 	uint8_t val;
2268 	int slide_from, slide_end, lgap, distance;
2269 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2270 	int type;
2271 
2272 	asoc = &stcb->asoc;
2273 	at = 0;
2274 
2275 	old_cumack = asoc->cumulative_tsn;
2276 	old_base = asoc->mapping_array_base_tsn;
2277 	old_highest = asoc->highest_tsn_inside_map;
2278 	/*
2279 	 * We could probably improve this a small bit by calculating the
2280 	 * offset of the current cum-ack as the starting point.
2281 	 */
2282 	if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
2283 	    stcb->asoc.peer_supports_nr_sack) {
2284 		type = SCTP_NR_SELECTIVE_ACK;
2285 	} else {
2286 		type = SCTP_SELECTIVE_ACK;
2287 	}
2288 	at = 0;
2289 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2290 		if (type == SCTP_NR_SELECTIVE_ACK)
2291 			val = asoc->nr_mapping_array[slide_from];
2292 		else
2293 			val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2294 		if (val == 0xff) {
2295 			at += 8;
2296 		} else {
2297 			/* there is a 0 bit */
2298 			at += sctp_map_lookup_tab[val];
2299 			break;
2300 		}
2301 	}
2302 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2303 
2304 	if (compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
2305 	    compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
2306 #ifdef INVARIANTS
2307 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2308 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2309 #else
2310 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2311 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2312 		sctp_print_mapping_array(asoc);
2313 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2314 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2315 		}
2316 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2317 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2318 #endif
2319 	}
2320 	if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
2321 	    asoc->highest_tsn_inside_map,
2322 	    MAX_TSN)) {
2323 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2324 	} else {
2325 		highest_tsn = asoc->highest_tsn_inside_map;
2326 	}
2327 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2328 		/* The complete array was completed by a single FR */
2329 		/* highest becomes the cum-ack */
2330 		int clr;
2331 
2332 #ifdef INVARIANTS
2333 		unsigned int i;
2334 
2335 #endif
2336 
2337 		/* clear the array */
2338 		clr = ((at + 7) >> 3);
2339 		if (clr > asoc->mapping_array_size) {
2340 			clr = asoc->mapping_array_size;
2341 		}
2342 		memset(asoc->mapping_array, 0, clr);
2343 		memset(asoc->nr_mapping_array, 0, clr);
2344 #ifdef INVARIANTS
2345 		for (i = 0; i < asoc->mapping_array_size; i++) {
2346 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2347 				printf("Error Mapping array's not clean at clear\n");
2348 				sctp_print_mapping_array(asoc);
2349 			}
2350 		}
2351 #endif
2352 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2353 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2354 	} else if (at >= 8) {
2355 		/* we can slide the mapping array down */
2356 		/* slide_from holds where we hit the first NON 0xff byte */
2357 
2358 		/*
2359 		 * now calculate the ceiling of the move using our highest
2360 		 * TSN value
2361 		 */
2362 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2363 		slide_end = (lgap >> 3);
2364 		if (slide_end < slide_from) {
2365 			sctp_print_mapping_array(asoc);
2366 #ifdef INVARIANTS
2367 			panic("impossible slide");
2368 #else
2369 			printf("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2370 			    lgap, slide_end, slide_from, at);
2371 			return;
2372 #endif
2373 		}
2374 		if (slide_end > asoc->mapping_array_size) {
2375 #ifdef INVARIANTS
2376 			panic("would overrun buffer");
2377 #else
2378 			printf("Gak, would have overrun map end:%d slide_end:%d\n",
2379 			    asoc->mapping_array_size, slide_end);
2380 			slide_end = asoc->mapping_array_size;
2381 #endif
2382 		}
2383 		distance = (slide_end - slide_from) + 1;
2384 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2385 			sctp_log_map(old_base, old_cumack, old_highest,
2386 			    SCTP_MAP_PREPARE_SLIDE);
2387 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2388 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2389 		}
2390 		if (distance + slide_from > asoc->mapping_array_size ||
2391 		    distance < 0) {
2392 			/*
2393 			 * Here we do NOT slide forward the array so that
2394 			 * hopefully when more data comes in to fill it up
2395 			 * we will be able to slide it forward. Really I
2396 			 * don't think this should happen :-0
2397 			 */
2398 
2399 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2400 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2401 				    (uint32_t) asoc->mapping_array_size,
2402 				    SCTP_MAP_SLIDE_NONE);
2403 			}
2404 		} else {
2405 			int ii;
2406 
2407 			for (ii = 0; ii < distance; ii++) {
2408 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2409 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2410 
2411 			}
2412 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2413 				asoc->mapping_array[ii] = 0;
2414 				asoc->nr_mapping_array[ii] = 0;
2415 			}
2416 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2417 				asoc->highest_tsn_inside_map += (slide_from << 3);
2418 			}
2419 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2420 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2421 			}
2422 			asoc->mapping_array_base_tsn += (slide_from << 3);
2423 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2424 				sctp_log_map(asoc->mapping_array_base_tsn,
2425 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2426 				    SCTP_MAP_SLIDE_RESULT);
2427 			}
2428 		}
2429 	}
2430 }
2431 
2432 
2433 void
2434 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap, int *abort_flag)
2435 {
2436 	struct sctp_association *asoc;
2437 	uint32_t highest_tsn;
2438 
2439 	asoc = &stcb->asoc;
2440 	if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
2441 	    asoc->highest_tsn_inside_map,
2442 	    MAX_TSN)) {
2443 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2444 	} else {
2445 		highest_tsn = asoc->highest_tsn_inside_map;
2446 	}
2447 
2448 	/*
2449 	 * Now we need to see if we need to queue a sack or just start the
2450 	 * timer (if allowed).
2451 	 */
2452 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2453 		/*
2454 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2455 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2456 		 * SACK
2457 		 */
2458 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2459 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2460 			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2461 		}
2462 		sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2463 		sctp_send_sack(stcb);
2464 	} else {
2465 		int is_a_gap;
2466 
2467 		/* is there a gap now ? */
2468 		is_a_gap = compare_with_wrap(highest_tsn, stcb->asoc.cumulative_tsn, MAX_TSN);
2469 
2470 		/*
2471 		 * CMT DAC algorithm: increase number of packets received
2472 		 * since last ack
2473 		 */
2474 		stcb->asoc.cmt_dac_pkts_rcvd++;
2475 
2476 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2477 							 * SACK */
2478 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2479 							 * longer is one */
2480 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2481 		    (is_a_gap) ||	/* is still a gap */
2482 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2483 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2484 		    ) {
2485 
2486 			if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off)) &&
2487 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2488 			    (stcb->asoc.send_sack == 0) &&
2489 			    (stcb->asoc.numduptsns == 0) &&
2490 			    (stcb->asoc.delayed_ack) &&
2491 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2492 
2493 				/*
2494 				 * CMT DAC algorithm: With CMT, delay acks
2495 				 * even in the face of
2496 				 *
2497 				 * reordering. Therefore, if acks that do not
2498 				 * have to be sent because of the above
2499 				 * reasons, will be delayed. That is, acks
2500 				 * that would have been sent due to gap
2501 				 * reports will be delayed with DAC. Start
2502 				 * the delayed ack timer.
2503 				 */
2504 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2505 				    stcb->sctp_ep, stcb, NULL);
2506 			} else {
2507 				/*
2508 				 * Ok we must build a SACK since the timer
2509 				 * is pending, we got our first packet OR
2510 				 * there are gaps or duplicates.
2511 				 */
2512 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2513 				sctp_send_sack(stcb);
2514 			}
2515 		} else {
2516 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2517 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2518 				    stcb->sctp_ep, stcb, NULL);
2519 			}
2520 		}
2521 	}
2522 }
2523 
2524 void
2525 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2526 {
2527 	struct sctp_tmit_chunk *chk;
2528 	uint32_t tsize, pd_point;
2529 	uint16_t nxt_todel;
2530 
2531 	if (asoc->fragmented_delivery_inprogress) {
2532 		sctp_service_reassembly(stcb, asoc);
2533 	}
2534 	/* Can we proceed further, i.e. the PD-API is complete */
2535 	if (asoc->fragmented_delivery_inprogress) {
2536 		/* no */
2537 		return;
2538 	}
2539 	/*
2540 	 * Now is there some other chunk I can deliver from the reassembly
2541 	 * queue.
2542 	 */
2543 doit_again:
2544 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2545 	if (chk == NULL) {
2546 		asoc->size_on_reasm_queue = 0;
2547 		asoc->cnt_on_reasm_queue = 0;
2548 		return;
2549 	}
2550 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2551 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2552 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2553 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2554 		/*
2555 		 * Yep the first one is here. We setup to start reception,
2556 		 * by backing down the TSN just in case we can't deliver.
2557 		 */
2558 
2559 		/*
2560 		 * Before we start though either all of the message should
2561 		 * be here or the socket buffer max or nothing on the
2562 		 * delivery queue and something can be delivered.
2563 		 */
2564 		if (stcb->sctp_socket) {
2565 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2566 			    stcb->sctp_ep->partial_delivery_point);
2567 		} else {
2568 			pd_point = stcb->sctp_ep->partial_delivery_point;
2569 		}
2570 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2571 			asoc->fragmented_delivery_inprogress = 1;
2572 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2573 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2574 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2575 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2576 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2577 			sctp_service_reassembly(stcb, asoc);
2578 			if (asoc->fragmented_delivery_inprogress == 0) {
2579 				goto doit_again;
2580 			}
2581 		}
2582 	}
2583 }
2584 
2585 int
2586 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2587     struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2588     struct sctp_nets *net, uint32_t * high_tsn)
2589 {
2590 	struct sctp_data_chunk *ch, chunk_buf;
2591 	struct sctp_association *asoc;
2592 	int num_chunks = 0;	/* number of control chunks processed */
2593 	int stop_proc = 0;
2594 	int chk_length, break_flag, last_chunk;
2595 	int abort_flag = 0, was_a_gap = 0;
2596 	struct mbuf *m;
2597 
2598 	/* set the rwnd */
2599 	sctp_set_rwnd(stcb, &stcb->asoc);
2600 
2601 	m = *mm;
2602 	SCTP_TCB_LOCK_ASSERT(stcb);
2603 	asoc = &stcb->asoc;
2604 	if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2605 	    stcb->asoc.cumulative_tsn, MAX_TSN)) {
2606 		/* there was a gap before this data was processed */
2607 		was_a_gap = 1;
2608 	}
2609 	/*
2610 	 * setup where we got the last DATA packet from for any SACK that
2611 	 * may need to go out. Don't bump the net. This is done ONLY when a
2612 	 * chunk is assigned.
2613 	 */
2614 	asoc->last_data_chunk_from = net;
2615 
2616 	/*-
2617 	 * Now before we proceed we must figure out if this is a wasted
2618 	 * cluster... i.e. it is a small packet sent in and yet the driver
2619 	 * underneath allocated a full cluster for it. If so we must copy it
2620 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2621 	 * with cluster starvation. Note for __Panda__ we don't do this
2622 	 * since it has clusters all the way down to 64 bytes.
2623 	 */
2624 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2625 		/* we only handle mbufs that are singletons.. not chains */
2626 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2627 		if (m) {
2628 			/* ok lets see if we can copy the data up */
2629 			caddr_t *from, *to;
2630 
2631 			/* get the pointers and copy */
2632 			to = mtod(m, caddr_t *);
2633 			from = mtod((*mm), caddr_t *);
2634 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2635 			/* copy the length and free up the old */
2636 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2637 			sctp_m_freem(*mm);
2638 			/* sucess, back copy */
2639 			*mm = m;
2640 		} else {
2641 			/* We are in trouble in the mbuf world .. yikes */
2642 			m = *mm;
2643 		}
2644 	}
2645 	/* get pointer to the first chunk header */
2646 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2647 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2648 	if (ch == NULL) {
2649 		return (1);
2650 	}
2651 	/*
2652 	 * process all DATA chunks...
2653 	 */
2654 	*high_tsn = asoc->cumulative_tsn;
2655 	break_flag = 0;
2656 	asoc->data_pkts_seen++;
2657 	while (stop_proc == 0) {
2658 		/* validate chunk length */
2659 		chk_length = ntohs(ch->ch.chunk_length);
2660 		if (length - *offset < chk_length) {
2661 			/* all done, mutulated chunk */
2662 			stop_proc = 1;
2663 			break;
2664 		}
2665 		if (ch->ch.chunk_type == SCTP_DATA) {
2666 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2667 				/*
2668 				 * Need to send an abort since we had a
2669 				 * invalid data chunk.
2670 				 */
2671 				struct mbuf *op_err;
2672 
2673 				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2674 				    0, M_DONTWAIT, 1, MT_DATA);
2675 
2676 				if (op_err) {
2677 					struct sctp_paramhdr *ph;
2678 					uint32_t *ippp;
2679 
2680 					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2681 					    (2 * sizeof(uint32_t));
2682 					ph = mtod(op_err, struct sctp_paramhdr *);
2683 					ph->param_type =
2684 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2685 					ph->param_length = htons(SCTP_BUF_LEN(op_err));
2686 					ippp = (uint32_t *) (ph + 1);
2687 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2688 					ippp++;
2689 					*ippp = asoc->cumulative_tsn;
2690 
2691 				}
2692 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2693 				sctp_abort_association(inp, stcb, m, iphlen, sh,
2694 				    op_err, 0, net->port);
2695 				return (2);
2696 			}
2697 #ifdef SCTP_AUDITING_ENABLED
2698 			sctp_audit_log(0xB1, 0);
2699 #endif
2700 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2701 				last_chunk = 1;
2702 			} else {
2703 				last_chunk = 0;
2704 			}
2705 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2706 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2707 			    last_chunk)) {
2708 				num_chunks++;
2709 			}
2710 			if (abort_flag)
2711 				return (2);
2712 
2713 			if (break_flag) {
2714 				/*
2715 				 * Set because of out of rwnd space and no
2716 				 * drop rep space left.
2717 				 */
2718 				stop_proc = 1;
2719 				break;
2720 			}
2721 		} else {
2722 			/* not a data chunk in the data region */
2723 			switch (ch->ch.chunk_type) {
2724 			case SCTP_INITIATION:
2725 			case SCTP_INITIATION_ACK:
2726 			case SCTP_SELECTIVE_ACK:
2727 			case SCTP_NR_SELECTIVE_ACK:	/* EY */
2728 			case SCTP_HEARTBEAT_REQUEST:
2729 			case SCTP_HEARTBEAT_ACK:
2730 			case SCTP_ABORT_ASSOCIATION:
2731 			case SCTP_SHUTDOWN:
2732 			case SCTP_SHUTDOWN_ACK:
2733 			case SCTP_OPERATION_ERROR:
2734 			case SCTP_COOKIE_ECHO:
2735 			case SCTP_COOKIE_ACK:
2736 			case SCTP_ECN_ECHO:
2737 			case SCTP_ECN_CWR:
2738 			case SCTP_SHUTDOWN_COMPLETE:
2739 			case SCTP_AUTHENTICATION:
2740 			case SCTP_ASCONF_ACK:
2741 			case SCTP_PACKET_DROPPED:
2742 			case SCTP_STREAM_RESET:
2743 			case SCTP_FORWARD_CUM_TSN:
2744 			case SCTP_ASCONF:
2745 				/*
2746 				 * Now, what do we do with KNOWN chunks that
2747 				 * are NOT in the right place?
2748 				 *
2749 				 * For now, I do nothing but ignore them. We
2750 				 * may later want to add sysctl stuff to
2751 				 * switch out and do either an ABORT() or
2752 				 * possibly process them.
2753 				 */
2754 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2755 					struct mbuf *op_err;
2756 
2757 					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2758 					sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
2759 					return (2);
2760 				}
2761 				break;
2762 			default:
2763 				/* unknown chunk type, use bit rules */
2764 				if (ch->ch.chunk_type & 0x40) {
2765 					/* Add a error report to the queue */
2766 					struct mbuf *merr;
2767 					struct sctp_paramhdr *phd;
2768 
2769 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2770 					if (merr) {
2771 						phd = mtod(merr, struct sctp_paramhdr *);
2772 						/*
2773 						 * We cheat and use param
2774 						 * type since we did not
2775 						 * bother to define a error
2776 						 * cause struct. They are
2777 						 * the same basic format
2778 						 * with different names.
2779 						 */
2780 						phd->param_type =
2781 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2782 						phd->param_length =
2783 						    htons(chk_length + sizeof(*phd));
2784 						SCTP_BUF_LEN(merr) = sizeof(*phd);
2785 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
2786 						    SCTP_SIZE32(chk_length),
2787 						    M_DONTWAIT);
2788 						if (SCTP_BUF_NEXT(merr)) {
2789 							sctp_queue_op_err(stcb, merr);
2790 						} else {
2791 							sctp_m_freem(merr);
2792 						}
2793 					}
2794 				}
2795 				if ((ch->ch.chunk_type & 0x80) == 0) {
2796 					/* discard the rest of this packet */
2797 					stop_proc = 1;
2798 				}	/* else skip this bad chunk and
2799 					 * continue... */
2800 				break;
2801 			};	/* switch of chunk type */
2802 		}
2803 		*offset += SCTP_SIZE32(chk_length);
2804 		if ((*offset >= length) || stop_proc) {
2805 			/* no more data left in the mbuf chain */
2806 			stop_proc = 1;
2807 			continue;
2808 		}
2809 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2810 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2811 		if (ch == NULL) {
2812 			*offset = length;
2813 			stop_proc = 1;
2814 			break;
2815 
2816 		}
2817 	}			/* while */
2818 	if (break_flag) {
2819 		/*
2820 		 * we need to report rwnd overrun drops.
2821 		 */
2822 		sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2823 	}
2824 	if (num_chunks) {
2825 		/*
2826 		 * Did we get data, if so update the time for auto-close and
2827 		 * give peer credit for being alive.
2828 		 */
2829 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2830 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2831 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2832 			    stcb->asoc.overall_error_count,
2833 			    0,
2834 			    SCTP_FROM_SCTP_INDATA,
2835 			    __LINE__);
2836 		}
2837 		stcb->asoc.overall_error_count = 0;
2838 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2839 	}
2840 	/* now service all of the reassm queue if needed */
2841 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2842 		sctp_service_queues(stcb, asoc);
2843 
2844 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2845 		/* Assure that we ack right away */
2846 		stcb->asoc.send_sack = 1;
2847 	}
2848 	/* Start a sack timer or QUEUE a SACK for sending */
2849 	sctp_sack_check(stcb, was_a_gap, &abort_flag);
2850 	if (abort_flag)
2851 		return (2);
2852 
2853 	return (0);
2854 }
2855 
2856 static int
2857 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2858     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2859     int *num_frs,
2860     uint32_t * biggest_newly_acked_tsn,
2861     uint32_t * this_sack_lowest_newack,
2862     int *ecn_seg_sums)
2863 {
2864 	struct sctp_tmit_chunk *tp1;
2865 	unsigned int theTSN;
2866 	int j, wake_him = 0, circled = 0;
2867 
2868 	/* Recover the tp1 we last saw */
2869 	tp1 = *p_tp1;
2870 	if (tp1 == NULL) {
2871 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2872 	}
2873 	for (j = frag_strt; j <= frag_end; j++) {
2874 		theTSN = j + last_tsn;
2875 		while (tp1) {
2876 			if (tp1->rec.data.doing_fast_retransmit)
2877 				(*num_frs) += 1;
2878 
2879 			/*-
2880 			 * CMT: CUCv2 algorithm. For each TSN being
2881 			 * processed from the sent queue, track the
2882 			 * next expected pseudo-cumack, or
2883 			 * rtx_pseudo_cumack, if required. Separate
2884 			 * cumack trackers for first transmissions,
2885 			 * and retransmissions.
2886 			 */
2887 			if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2888 			    (tp1->snd_count == 1)) {
2889 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2890 				tp1->whoTo->find_pseudo_cumack = 0;
2891 			}
2892 			if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2893 			    (tp1->snd_count > 1)) {
2894 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2895 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2896 			}
2897 			if (tp1->rec.data.TSN_seq == theTSN) {
2898 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2899 					/*-
2900 					 * must be held until
2901 					 * cum-ack passes
2902 					 */
2903 					/*-
2904 					 * ECN Nonce: Add the nonce
2905 					 * value to the sender's
2906 					 * nonce sum
2907 					 */
2908 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2909 						/*-
2910 						 * If it is less than RESEND, it is
2911 						 * now no-longer in flight.
2912 						 * Higher values may already be set
2913 						 * via previous Gap Ack Blocks...
2914 						 * i.e. ACKED or RESEND.
2915 						 */
2916 						if (compare_with_wrap(tp1->rec.data.TSN_seq,
2917 						    *biggest_newly_acked_tsn, MAX_TSN)) {
2918 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2919 						}
2920 						/*-
2921 						 * CMT: SFR algo (and HTNA) - set
2922 						 * saw_newack to 1 for dest being
2923 						 * newly acked. update
2924 						 * this_sack_highest_newack if
2925 						 * appropriate.
2926 						 */
2927 						if (tp1->rec.data.chunk_was_revoked == 0)
2928 							tp1->whoTo->saw_newack = 1;
2929 
2930 						if (compare_with_wrap(tp1->rec.data.TSN_seq,
2931 						    tp1->whoTo->this_sack_highest_newack,
2932 						    MAX_TSN)) {
2933 							tp1->whoTo->this_sack_highest_newack =
2934 							    tp1->rec.data.TSN_seq;
2935 						}
2936 						/*-
2937 						 * CMT DAC algo: also update
2938 						 * this_sack_lowest_newack
2939 						 */
2940 						if (*this_sack_lowest_newack == 0) {
2941 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2942 								sctp_log_sack(*this_sack_lowest_newack,
2943 								    last_tsn,
2944 								    tp1->rec.data.TSN_seq,
2945 								    0,
2946 								    0,
2947 								    SCTP_LOG_TSN_ACKED);
2948 							}
2949 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2950 						}
2951 						/*-
2952 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2953 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2954 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2955 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2956 						 * Separate pseudo_cumack trackers for first transmissions and
2957 						 * retransmissions.
2958 						 */
2959 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2960 							if (tp1->rec.data.chunk_was_revoked == 0) {
2961 								tp1->whoTo->new_pseudo_cumack = 1;
2962 							}
2963 							tp1->whoTo->find_pseudo_cumack = 1;
2964 						}
2965 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2966 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2967 						}
2968 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2969 							if (tp1->rec.data.chunk_was_revoked == 0) {
2970 								tp1->whoTo->new_pseudo_cumack = 1;
2971 							}
2972 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2973 						}
2974 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2975 							sctp_log_sack(*biggest_newly_acked_tsn,
2976 							    last_tsn,
2977 							    tp1->rec.data.TSN_seq,
2978 							    frag_strt,
2979 							    frag_end,
2980 							    SCTP_LOG_TSN_ACKED);
2981 						}
2982 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2983 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2984 							    tp1->whoTo->flight_size,
2985 							    tp1->book_size,
2986 							    (uintptr_t) tp1->whoTo,
2987 							    tp1->rec.data.TSN_seq);
2988 						}
2989 						sctp_flight_size_decrease(tp1);
2990 						sctp_total_flight_decrease(stcb, tp1);
2991 
2992 						tp1->whoTo->net_ack += tp1->send_size;
2993 						if (tp1->snd_count < 2) {
2994 							/*-
2995 							 * True non-retransmited chunk
2996 							 */
2997 							tp1->whoTo->net_ack2 += tp1->send_size;
2998 
2999 							/*-
3000 							 * update RTO too ?
3001 							 */
3002 							if (tp1->do_rtt) {
3003 								tp1->whoTo->RTO =
3004 								    sctp_calculate_rto(stcb,
3005 								    &stcb->asoc,
3006 								    tp1->whoTo,
3007 								    &tp1->sent_rcv_time,
3008 								    sctp_align_safe_nocopy);
3009 								tp1->do_rtt = 0;
3010 							}
3011 						}
3012 					}
3013 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3014 						(*ecn_seg_sums) += tp1->rec.data.ect_nonce;
3015 						(*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
3016 						if (compare_with_wrap(tp1->rec.data.TSN_seq,
3017 						    stcb->asoc.this_sack_highest_gap,
3018 						    MAX_TSN)) {
3019 							stcb->asoc.this_sack_highest_gap =
3020 							    tp1->rec.data.TSN_seq;
3021 						}
3022 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3023 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3024 #ifdef SCTP_AUDITING_ENABLED
3025 							sctp_audit_log(0xB2,
3026 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3027 #endif
3028 						}
3029 					}
3030 					/*-
3031 					 * All chunks NOT UNSENT fall through here and are marked
3032 					 * (leave PR-SCTP ones that are to skip alone though)
3033 					 */
3034 					if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
3035 						tp1->sent = SCTP_DATAGRAM_MARKED;
3036 
3037 					if (tp1->rec.data.chunk_was_revoked) {
3038 						/* deflate the cwnd */
3039 						tp1->whoTo->cwnd -= tp1->book_size;
3040 						tp1->rec.data.chunk_was_revoked = 0;
3041 					}
3042 					/* NR Sack code here */
3043 					if (nr_sacking) {
3044 						if (tp1->data) {
3045 							/*
3046 							 * sa_ignore
3047 							 * NO_NULL_CHK
3048 							 */
3049 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3050 							sctp_m_freem(tp1->data);
3051 							tp1->data = NULL;
3052 						}
3053 						wake_him++;
3054 					}
3055 				}
3056 				break;
3057 			}	/* if (tp1->TSN_seq == theTSN) */
3058 			if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
3059 			    MAX_TSN))
3060 				break;
3061 
3062 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3063 			if ((tp1 == NULL) && (circled == 0)) {
3064 				circled++;
3065 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3066 			}
3067 		}		/* end while (tp1) */
3068 		if (tp1 == NULL) {
3069 			circled = 0;
3070 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3071 		}
3072 		/* In case the fragments were not in order we must reset */
3073 	}			/* end for (j = fragStart */
3074 	*p_tp1 = tp1;
3075 	return (wake_him);	/* Return value only used for nr-sack */
3076 }
3077 
3078 
3079 static int
3080 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3081     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3082     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3083     int num_seg, int num_nr_seg, int *ecn_seg_sums)
3084 {
3085 	struct sctp_gap_ack_block *frag, block;
3086 	struct sctp_tmit_chunk *tp1;
3087 	int i;
3088 	int num_frs = 0;
3089 	int chunk_freed;
3090 	int non_revocable;
3091 	uint16_t frag_strt, frag_end;
3092 	uint32_t last_frag_high;
3093 
3094 	tp1 = NULL;
3095 	last_frag_high = 0;
3096 	chunk_freed = 0;
3097 
3098 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3099 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3100 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3101 		*offset += sizeof(block);
3102 		if (frag == NULL) {
3103 			return (chunk_freed);
3104 		}
3105 		frag_strt = ntohs(frag->start);
3106 		frag_end = ntohs(frag->end);
3107 		/* some sanity checks on the fragment offsets */
3108 		if (frag_strt > frag_end) {
3109 			/* this one is malformed, skip */
3110 			continue;
3111 		}
3112 		if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
3113 		    MAX_TSN))
3114 			*biggest_tsn_acked = frag_end + last_tsn;
3115 
3116 		/* mark acked dgs and find out the highestTSN being acked */
3117 		if (tp1 == NULL) {
3118 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3119 			/* save the locations of the last frags */
3120 			last_frag_high = frag_end + last_tsn;
3121 		} else {
3122 			/*
3123 			 * now lets see if we need to reset the queue due to
3124 			 * a out-of-order SACK fragment
3125 			 */
3126 			if (compare_with_wrap(frag_strt + last_tsn,
3127 			    last_frag_high, MAX_TSN)) {
3128 				/*
3129 				 * if the new frag starts after the last TSN
3130 				 * frag covered, we are ok and this one is
3131 				 * beyond the last one
3132 				 */
3133 				;
3134 			} else {
3135 				/*
3136 				 * ok, they have reset us, so we need to
3137 				 * reset the queue this will cause extra
3138 				 * hunting but hey, they chose the
3139 				 * performance hit when they failed to order
3140 				 * their gaps
3141 				 */
3142 				tp1 = TAILQ_FIRST(&asoc->sent_queue);
3143 			}
3144 			last_frag_high = frag_end + last_tsn;
3145 		}
3146 		if (i < num_seg) {
3147 			non_revocable = 0;
3148 		} else {
3149 			non_revocable = 1;
3150 		}
3151 		if (i == num_seg) {
3152 			tp1 = NULL;
3153 		}
3154 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3155 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3156 		    this_sack_lowest_newack, ecn_seg_sums)) {
3157 			chunk_freed = 1;
3158 		}
3159 	}
3160 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3161 		if (num_frs)
3162 			sctp_log_fr(*biggest_tsn_acked,
3163 			    *biggest_newly_acked_tsn,
3164 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3165 	}
3166 	return (chunk_freed);
3167 }
3168 
3169 static void
3170 sctp_check_for_revoked(struct sctp_tcb *stcb,
3171     struct sctp_association *asoc, uint32_t cumack,
3172     uint32_t biggest_tsn_acked)
3173 {
3174 	struct sctp_tmit_chunk *tp1;
3175 	int tot_revoked = 0;
3176 
3177 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3178 	while (tp1) {
3179 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3180 		    MAX_TSN)) {
3181 			/*
3182 			 * ok this guy is either ACK or MARKED. If it is
3183 			 * ACKED it has been previously acked but not this
3184 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3185 			 * again.
3186 			 */
3187 			if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3188 			    MAX_TSN))
3189 				break;
3190 
3191 
3192 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3193 				/* it has been revoked */
3194 				tp1->sent = SCTP_DATAGRAM_SENT;
3195 				tp1->rec.data.chunk_was_revoked = 1;
3196 				/*
3197 				 * We must add this stuff back in to assure
3198 				 * timers and such get started.
3199 				 */
3200 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3201 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3202 					    tp1->whoTo->flight_size,
3203 					    tp1->book_size,
3204 					    (uintptr_t) tp1->whoTo,
3205 					    tp1->rec.data.TSN_seq);
3206 				}
3207 				sctp_flight_size_increase(tp1);
3208 				sctp_total_flight_increase(stcb, tp1);
3209 				/*
3210 				 * We inflate the cwnd to compensate for our
3211 				 * artificial inflation of the flight_size.
3212 				 */
3213 				tp1->whoTo->cwnd += tp1->book_size;
3214 				tot_revoked++;
3215 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3216 					sctp_log_sack(asoc->last_acked_seq,
3217 					    cumack,
3218 					    tp1->rec.data.TSN_seq,
3219 					    0,
3220 					    0,
3221 					    SCTP_LOG_TSN_REVOKED);
3222 				}
3223 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3224 				/* it has been re-acked in this SACK */
3225 				tp1->sent = SCTP_DATAGRAM_ACKED;
3226 			}
3227 		}
3228 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3229 			break;
3230 		tp1 = TAILQ_NEXT(tp1, sctp_next);
3231 	}
3232 	if (tot_revoked > 0) {
3233 		/*
3234 		 * Setup the ecn nonce re-sync point. We do this since once
3235 		 * data is revoked we begin to retransmit things, which do
3236 		 * NOT have the ECN bits set. This means we are now out of
3237 		 * sync and must wait until we get back in sync with the
3238 		 * peer to check ECN bits.
3239 		 */
3240 		tp1 = TAILQ_FIRST(&asoc->send_queue);
3241 		if (tp1 == NULL) {
3242 			asoc->nonce_resync_tsn = asoc->sending_seq;
3243 		} else {
3244 			asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3245 		}
3246 		asoc->nonce_wait_for_ecne = 0;
3247 		asoc->nonce_sum_check = 0;
3248 	}
3249 }
3250 
3251 
3252 static void
3253 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3254     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3255 {
3256 	struct sctp_tmit_chunk *tp1;
3257 	int strike_flag = 0;
3258 	struct timeval now;
3259 	int tot_retrans = 0;
3260 	uint32_t sending_seq;
3261 	struct sctp_nets *net;
3262 	int num_dests_sacked = 0;
3263 
3264 	/*
3265 	 * select the sending_seq, this is either the next thing ready to be
3266 	 * sent but not transmitted, OR, the next seq we assign.
3267 	 */
3268 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3269 	if (tp1 == NULL) {
3270 		sending_seq = asoc->sending_seq;
3271 	} else {
3272 		sending_seq = tp1->rec.data.TSN_seq;
3273 	}
3274 
3275 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3276 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3277 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3278 			if (net->saw_newack)
3279 				num_dests_sacked++;
3280 		}
3281 	}
3282 	if (stcb->asoc.peer_supports_prsctp) {
3283 		(void)SCTP_GETTIME_TIMEVAL(&now);
3284 	}
3285 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3286 	while (tp1) {
3287 		strike_flag = 0;
3288 		if (tp1->no_fr_allowed) {
3289 			/* this one had a timeout or something */
3290 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3291 			continue;
3292 		}
3293 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3294 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3295 				sctp_log_fr(biggest_tsn_newly_acked,
3296 				    tp1->rec.data.TSN_seq,
3297 				    tp1->sent,
3298 				    SCTP_FR_LOG_CHECK_STRIKE);
3299 		}
3300 		if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3301 		    MAX_TSN) ||
3302 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3303 			/* done */
3304 			break;
3305 		}
3306 		if (stcb->asoc.peer_supports_prsctp) {
3307 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3308 				/* Is it expired? */
3309 				if (
3310 				/*
3311 				 * TODO sctp_constants.h needs alternative
3312 				 * time macros when _KERNEL is undefined.
3313 				 */
3314 				    (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3315 				    ) {
3316 					/* Yes so drop it */
3317 					if (tp1->data != NULL) {
3318 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3319 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3320 						    SCTP_SO_NOT_LOCKED);
3321 					}
3322 					tp1 = TAILQ_NEXT(tp1, sctp_next);
3323 					continue;
3324 				}
3325 			}
3326 		}
3327 		if (compare_with_wrap(tp1->rec.data.TSN_seq,
3328 		    asoc->this_sack_highest_gap, MAX_TSN)) {
3329 			/* we are beyond the tsn in the sack  */
3330 			break;
3331 		}
3332 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3333 			/* either a RESEND, ACKED, or MARKED */
3334 			/* skip */
3335 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3336 			continue;
3337 		}
3338 		/*
3339 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3340 		 */
3341 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3342 			/*
3343 			 * No new acks were receieved for data sent to this
3344 			 * dest. Therefore, according to the SFR algo for
3345 			 * CMT, no data sent to this dest can be marked for
3346 			 * FR using this SACK.
3347 			 */
3348 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3349 			continue;
3350 		} else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq,
3351 		    tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3352 			/*
3353 			 * CMT: New acks were receieved for data sent to
3354 			 * this dest. But no new acks were seen for data
3355 			 * sent after tp1. Therefore, according to the SFR
3356 			 * algo for CMT, tp1 cannot be marked for FR using
3357 			 * this SACK. This step covers part of the DAC algo
3358 			 * and the HTNA algo as well.
3359 			 */
3360 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3361 			continue;
3362 		}
3363 		/*
3364 		 * Here we check to see if we were have already done a FR
3365 		 * and if so we see if the biggest TSN we saw in the sack is
3366 		 * smaller than the recovery point. If so we don't strike
3367 		 * the tsn... otherwise we CAN strike the TSN.
3368 		 */
3369 		/*
3370 		 * @@@ JRI: Check for CMT if (accum_moved &&
3371 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3372 		 * 0)) {
3373 		 */
3374 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3375 			/*
3376 			 * Strike the TSN if in fast-recovery and cum-ack
3377 			 * moved.
3378 			 */
3379 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3380 				sctp_log_fr(biggest_tsn_newly_acked,
3381 				    tp1->rec.data.TSN_seq,
3382 				    tp1->sent,
3383 				    SCTP_FR_LOG_STRIKE_CHUNK);
3384 			}
3385 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3386 				tp1->sent++;
3387 			}
3388 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3389 				/*
3390 				 * CMT DAC algorithm: If SACK flag is set to
3391 				 * 0, then lowest_newack test will not pass
3392 				 * because it would have been set to the
3393 				 * cumack earlier. If not already to be
3394 				 * rtx'd, If not a mixed sack and if tp1 is
3395 				 * not between two sacked TSNs, then mark by
3396 				 * one more. NOTE that we are marking by one
3397 				 * additional time since the SACK DAC flag
3398 				 * indicates that two packets have been
3399 				 * received after this missing TSN.
3400 				 */
3401 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3402 				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3403 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3404 						sctp_log_fr(16 + num_dests_sacked,
3405 						    tp1->rec.data.TSN_seq,
3406 						    tp1->sent,
3407 						    SCTP_FR_LOG_STRIKE_CHUNK);
3408 					}
3409 					tp1->sent++;
3410 				}
3411 			}
3412 		} else if ((tp1->rec.data.doing_fast_retransmit) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
3413 			/*
3414 			 * For those that have done a FR we must take
3415 			 * special consideration if we strike. I.e the
3416 			 * biggest_newly_acked must be higher than the
3417 			 * sending_seq at the time we did the FR.
3418 			 */
3419 			if (
3420 #ifdef SCTP_FR_TO_ALTERNATE
3421 			/*
3422 			 * If FR's go to new networks, then we must only do
3423 			 * this for singly homed asoc's. However if the FR's
3424 			 * go to the same network (Armando's work) then its
3425 			 * ok to FR multiple times.
3426 			 */
3427 			    (asoc->numnets < 2)
3428 #else
3429 			    (1)
3430 #endif
3431 			    ) {
3432 
3433 				if ((compare_with_wrap(biggest_tsn_newly_acked,
3434 				    tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3435 				    (biggest_tsn_newly_acked ==
3436 				    tp1->rec.data.fast_retran_tsn)) {
3437 					/*
3438 					 * Strike the TSN, since this ack is
3439 					 * beyond where things were when we
3440 					 * did a FR.
3441 					 */
3442 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3443 						sctp_log_fr(biggest_tsn_newly_acked,
3444 						    tp1->rec.data.TSN_seq,
3445 						    tp1->sent,
3446 						    SCTP_FR_LOG_STRIKE_CHUNK);
3447 					}
3448 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3449 						tp1->sent++;
3450 					}
3451 					strike_flag = 1;
3452 					if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3453 						/*
3454 						 * CMT DAC algorithm: If
3455 						 * SACK flag is set to 0,
3456 						 * then lowest_newack test
3457 						 * will not pass because it
3458 						 * would have been set to
3459 						 * the cumack earlier. If
3460 						 * not already to be rtx'd,
3461 						 * If not a mixed sack and
3462 						 * if tp1 is not between two
3463 						 * sacked TSNs, then mark by
3464 						 * one more. NOTE that we
3465 						 * are marking by one
3466 						 * additional time since the
3467 						 * SACK DAC flag indicates
3468 						 * that two packets have
3469 						 * been received after this
3470 						 * missing TSN.
3471 						 */
3472 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3473 						    (num_dests_sacked == 1) &&
3474 						    compare_with_wrap(this_sack_lowest_newack,
3475 						    tp1->rec.data.TSN_seq, MAX_TSN)) {
3476 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3477 								sctp_log_fr(32 + num_dests_sacked,
3478 								    tp1->rec.data.TSN_seq,
3479 								    tp1->sent,
3480 								    SCTP_FR_LOG_STRIKE_CHUNK);
3481 							}
3482 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3483 								tp1->sent++;
3484 							}
3485 						}
3486 					}
3487 				}
3488 			}
3489 			/*
3490 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3491 			 * algo covers HTNA.
3492 			 */
3493 		} else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3494 		    biggest_tsn_newly_acked, MAX_TSN)) {
3495 			/*
3496 			 * We don't strike these: This is the  HTNA
3497 			 * algorithm i.e. we don't strike If our TSN is
3498 			 * larger than the Highest TSN Newly Acked.
3499 			 */
3500 			;
3501 		} else {
3502 			/* Strike the TSN */
3503 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3504 				sctp_log_fr(biggest_tsn_newly_acked,
3505 				    tp1->rec.data.TSN_seq,
3506 				    tp1->sent,
3507 				    SCTP_FR_LOG_STRIKE_CHUNK);
3508 			}
3509 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3510 				tp1->sent++;
3511 			}
3512 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3513 				/*
3514 				 * CMT DAC algorithm: If SACK flag is set to
3515 				 * 0, then lowest_newack test will not pass
3516 				 * because it would have been set to the
3517 				 * cumack earlier. If not already to be
3518 				 * rtx'd, If not a mixed sack and if tp1 is
3519 				 * not between two sacked TSNs, then mark by
3520 				 * one more. NOTE that we are marking by one
3521 				 * additional time since the SACK DAC flag
3522 				 * indicates that two packets have been
3523 				 * received after this missing TSN.
3524 				 */
3525 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3526 				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3527 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3528 						sctp_log_fr(48 + num_dests_sacked,
3529 						    tp1->rec.data.TSN_seq,
3530 						    tp1->sent,
3531 						    SCTP_FR_LOG_STRIKE_CHUNK);
3532 					}
3533 					tp1->sent++;
3534 				}
3535 			}
3536 		}
3537 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3538 			struct sctp_nets *alt;
3539 
3540 			/* fix counts and things */
3541 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3542 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3543 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3544 				    tp1->book_size,
3545 				    (uintptr_t) tp1->whoTo,
3546 				    tp1->rec.data.TSN_seq);
3547 			}
3548 			if (tp1->whoTo) {
3549 				tp1->whoTo->net_ack++;
3550 				sctp_flight_size_decrease(tp1);
3551 			}
3552 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3553 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3554 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3555 			}
3556 			/* add back to the rwnd */
3557 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3558 
3559 			/* remove from the total flight */
3560 			sctp_total_flight_decrease(stcb, tp1);
3561 
3562 			if ((stcb->asoc.peer_supports_prsctp) &&
3563 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3564 				/*
3565 				 * Has it been retransmitted tv_sec times? -
3566 				 * we store the retran count there.
3567 				 */
3568 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3569 					/* Yes, so drop it */
3570 					if (tp1->data != NULL) {
3571 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3572 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3573 						    SCTP_SO_NOT_LOCKED);
3574 					}
3575 					/* Make sure to flag we had a FR */
3576 					tp1->whoTo->net_ack++;
3577 					tp1 = TAILQ_NEXT(tp1, sctp_next);
3578 					continue;
3579 				}
3580 			}
3581 			/* printf("OK, we are now ready to FR this guy\n"); */
3582 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3583 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3584 				    0, SCTP_FR_MARKED);
3585 			}
3586 			if (strike_flag) {
3587 				/* This is a subsequent FR */
3588 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3589 			}
3590 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3591 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
3592 				/*
3593 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3594 				 * If CMT is being used, then pick dest with
3595 				 * largest ssthresh for any retransmission.
3596 				 */
3597 				tp1->no_fr_allowed = 1;
3598 				alt = tp1->whoTo;
3599 				/* sa_ignore NO_NULL_CHK */
3600 				if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
3601 					/*
3602 					 * JRS 5/18/07 - If CMT PF is on,
3603 					 * use the PF version of
3604 					 * find_alt_net()
3605 					 */
3606 					alt = sctp_find_alternate_net(stcb, alt, 2);
3607 				} else {
3608 					/*
3609 					 * JRS 5/18/07 - If only CMT is on,
3610 					 * use the CMT version of
3611 					 * find_alt_net()
3612 					 */
3613 					/* sa_ignore NO_NULL_CHK */
3614 					alt = sctp_find_alternate_net(stcb, alt, 1);
3615 				}
3616 				if (alt == NULL) {
3617 					alt = tp1->whoTo;
3618 				}
3619 				/*
3620 				 * CUCv2: If a different dest is picked for
3621 				 * the retransmission, then new
3622 				 * (rtx-)pseudo_cumack needs to be tracked
3623 				 * for orig dest. Let CUCv2 track new (rtx-)
3624 				 * pseudo-cumack always.
3625 				 */
3626 				if (tp1->whoTo) {
3627 					tp1->whoTo->find_pseudo_cumack = 1;
3628 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3629 				}
3630 			} else {/* CMT is OFF */
3631 
3632 #ifdef SCTP_FR_TO_ALTERNATE
3633 				/* Can we find an alternate? */
3634 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3635 #else
3636 				/*
3637 				 * default behavior is to NOT retransmit
3638 				 * FR's to an alternate. Armando Caro's
3639 				 * paper details why.
3640 				 */
3641 				alt = tp1->whoTo;
3642 #endif
3643 			}
3644 
3645 			tp1->rec.data.doing_fast_retransmit = 1;
3646 			tot_retrans++;
3647 			/* mark the sending seq for possible subsequent FR's */
3648 			/*
3649 			 * printf("Marking TSN for FR new value %x\n",
3650 			 * (uint32_t)tpi->rec.data.TSN_seq);
3651 			 */
3652 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3653 				/*
3654 				 * If the queue of send is empty then its
3655 				 * the next sequence number that will be
3656 				 * assigned so we subtract one from this to
3657 				 * get the one we last sent.
3658 				 */
3659 				tp1->rec.data.fast_retran_tsn = sending_seq;
3660 			} else {
3661 				/*
3662 				 * If there are chunks on the send queue
3663 				 * (unsent data that has made it from the
3664 				 * stream queues but not out the door, we
3665 				 * take the first one (which will have the
3666 				 * lowest TSN) and subtract one to get the
3667 				 * one we last sent.
3668 				 */
3669 				struct sctp_tmit_chunk *ttt;
3670 
3671 				ttt = TAILQ_FIRST(&asoc->send_queue);
3672 				tp1->rec.data.fast_retran_tsn =
3673 				    ttt->rec.data.TSN_seq;
3674 			}
3675 
3676 			if (tp1->do_rtt) {
3677 				/*
3678 				 * this guy had a RTO calculation pending on
3679 				 * it, cancel it
3680 				 */
3681 				tp1->do_rtt = 0;
3682 			}
3683 			if (alt != tp1->whoTo) {
3684 				/* yes, there is an alternate. */
3685 				sctp_free_remote_addr(tp1->whoTo);
3686 				/* sa_ignore FREED_MEMORY */
3687 				tp1->whoTo = alt;
3688 				atomic_add_int(&alt->ref_count, 1);
3689 			}
3690 		}
3691 		tp1 = TAILQ_NEXT(tp1, sctp_next);
3692 	}			/* while (tp1) */
3693 
3694 	if (tot_retrans > 0) {
3695 		/*
3696 		 * Setup the ecn nonce re-sync point. We do this since once
3697 		 * we go to FR something we introduce a Karn's rule scenario
3698 		 * and won't know the totals for the ECN bits.
3699 		 */
3700 		asoc->nonce_resync_tsn = sending_seq;
3701 		asoc->nonce_wait_for_ecne = 0;
3702 		asoc->nonce_sum_check = 0;
3703 	}
3704 }
3705 
3706 struct sctp_tmit_chunk *
3707 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3708     struct sctp_association *asoc)
3709 {
3710 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3711 	struct timeval now;
3712 	int now_filled = 0;
3713 
3714 	if (asoc->peer_supports_prsctp == 0) {
3715 		return (NULL);
3716 	}
3717 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3718 	while (tp1) {
3719 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3720 		    tp1->sent != SCTP_DATAGRAM_ACKED &&
3721 		    tp1->sent != SCTP_DATAGRAM_RESEND) {
3722 			/* no chance to advance, out of here */
3723 			break;
3724 		}
3725 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3726 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3727 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3728 				    asoc->advanced_peer_ack_point,
3729 				    tp1->rec.data.TSN_seq, 0, 0);
3730 			}
3731 		}
3732 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3733 			/*
3734 			 * We can't fwd-tsn past any that are reliable aka
3735 			 * retransmitted until the asoc fails.
3736 			 */
3737 			break;
3738 		}
3739 		if (!now_filled) {
3740 			(void)SCTP_GETTIME_TIMEVAL(&now);
3741 			now_filled = 1;
3742 		}
3743 		tp2 = TAILQ_NEXT(tp1, sctp_next);
3744 		/*
3745 		 * now we got a chunk which is marked for another
3746 		 * retransmission to a PR-stream but has run out its chances
3747 		 * already maybe OR has been marked to skip now. Can we skip
3748 		 * it if its a resend?
3749 		 */
3750 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3751 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3752 			/*
3753 			 * Now is this one marked for resend and its time is
3754 			 * now up?
3755 			 */
3756 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3757 				/* Yes so drop it */
3758 				if (tp1->data) {
3759 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3760 					    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3761 					    SCTP_SO_NOT_LOCKED);
3762 				}
3763 			} else {
3764 				/*
3765 				 * No, we are done when hit one for resend
3766 				 * whos time as not expired.
3767 				 */
3768 				break;
3769 			}
3770 		}
3771 		/*
3772 		 * Ok now if this chunk is marked to drop it we can clean up
3773 		 * the chunk, advance our peer ack point and we can check
3774 		 * the next chunk.
3775 		 */
3776 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3777 		    (tp1->sent == SCTP_DATAGRAM_ACKED)) {
3778 			/* advance PeerAckPoint goes forward */
3779 			if (compare_with_wrap(tp1->rec.data.TSN_seq,
3780 			    asoc->advanced_peer_ack_point,
3781 			    MAX_TSN)) {
3782 
3783 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3784 				a_adv = tp1;
3785 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3786 				/* No update but we do save the chk */
3787 				a_adv = tp1;
3788 			}
3789 		} else {
3790 			/*
3791 			 * If it is still in RESEND we can advance no
3792 			 * further
3793 			 */
3794 			break;
3795 		}
3796 		/*
3797 		 * If we hit here we just dumped tp1, move to next tsn on
3798 		 * sent queue.
3799 		 */
3800 		tp1 = tp2;
3801 	}
3802 	return (a_adv);
3803 }
3804 
3805 static int
3806 sctp_fs_audit(struct sctp_association *asoc)
3807 {
3808 	struct sctp_tmit_chunk *chk;
3809 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3810 	int entry_flight, entry_cnt, ret;
3811 
3812 	entry_flight = asoc->total_flight;
3813 	entry_cnt = asoc->total_flight_count;
3814 	ret = 0;
3815 
3816 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3817 		return (0);
3818 
3819 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3820 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3821 			printf("Chk TSN:%u size:%d inflight cnt:%d\n",
3822 			    chk->rec.data.TSN_seq,
3823 			    chk->send_size,
3824 			    chk->snd_count
3825 			    );
3826 			inflight++;
3827 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3828 			resend++;
3829 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3830 			inbetween++;
3831 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3832 			above++;
3833 		} else {
3834 			acked++;
3835 		}
3836 	}
3837 
3838 	if ((inflight > 0) || (inbetween > 0)) {
3839 #ifdef INVARIANTS
3840 		panic("Flight size-express incorrect? \n");
3841 #else
3842 		printf("asoc->total_flight:%d cnt:%d\n",
3843 		    entry_flight, entry_cnt);
3844 
3845 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3846 		    inflight, inbetween, resend, above, acked);
3847 		ret = 1;
3848 #endif
3849 	}
3850 	return (ret);
3851 }
3852 
3853 
3854 static void
3855 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3856     struct sctp_association *asoc,
3857     struct sctp_nets *net,
3858     struct sctp_tmit_chunk *tp1)
3859 {
3860 	tp1->window_probe = 0;
3861 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3862 		/* TSN's skipped we do NOT move back. */
3863 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3864 		    tp1->whoTo->flight_size,
3865 		    tp1->book_size,
3866 		    (uintptr_t) tp1->whoTo,
3867 		    tp1->rec.data.TSN_seq);
3868 		return;
3869 	}
3870 	/* First setup this by shrinking flight */
3871 	sctp_flight_size_decrease(tp1);
3872 	sctp_total_flight_decrease(stcb, tp1);
3873 	/* Now mark for resend */
3874 	tp1->sent = SCTP_DATAGRAM_RESEND;
3875 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3876 
3877 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3878 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3879 		    tp1->whoTo->flight_size,
3880 		    tp1->book_size,
3881 		    (uintptr_t) tp1->whoTo,
3882 		    tp1->rec.data.TSN_seq);
3883 	}
3884 }
3885 
3886 void
3887 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3888     uint32_t rwnd, int nonce_sum_flag, int *abort_now)
3889 {
3890 	struct sctp_nets *net;
3891 	struct sctp_association *asoc;
3892 	struct sctp_tmit_chunk *tp1, *tp2;
3893 	uint32_t old_rwnd;
3894 	int win_probe_recovery = 0;
3895 	int win_probe_recovered = 0;
3896 	int j, done_once = 0;
3897 
3898 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3899 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3900 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3901 	}
3902 	SCTP_TCB_LOCK_ASSERT(stcb);
3903 #ifdef SCTP_ASOCLOG_OF_TSNS
3904 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3905 	stcb->asoc.cumack_log_at++;
3906 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3907 		stcb->asoc.cumack_log_at = 0;
3908 	}
3909 #endif
3910 	asoc = &stcb->asoc;
3911 	old_rwnd = asoc->peers_rwnd;
3912 	if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
3913 		/* old ack */
3914 		return;
3915 	} else if (asoc->last_acked_seq == cumack) {
3916 		/* Window update sack */
3917 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3918 		    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3919 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3920 			/* SWS sender side engages */
3921 			asoc->peers_rwnd = 0;
3922 		}
3923 		if (asoc->peers_rwnd > old_rwnd) {
3924 			goto again;
3925 		}
3926 		return;
3927 	}
3928 	/* First setup for CC stuff */
3929 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3930 		net->prev_cwnd = net->cwnd;
3931 		net->net_ack = 0;
3932 		net->net_ack2 = 0;
3933 
3934 		/*
3935 		 * CMT: Reset CUC and Fast recovery algo variables before
3936 		 * SACK processing
3937 		 */
3938 		net->new_pseudo_cumack = 0;
3939 		net->will_exit_fast_recovery = 0;
3940 	}
3941 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3942 		uint32_t send_s;
3943 
3944 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3945 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3946 			    sctpchunk_listhead);
3947 			send_s = tp1->rec.data.TSN_seq + 1;
3948 		} else {
3949 			send_s = asoc->sending_seq;
3950 		}
3951 		if ((cumack == send_s) ||
3952 		    compare_with_wrap(cumack, send_s, MAX_TSN)) {
3953 #ifndef INVARIANTS
3954 			struct mbuf *oper;
3955 
3956 #endif
3957 #ifdef INVARIANTS
3958 			panic("Impossible sack 1");
3959 #else
3960 
3961 			*abort_now = 1;
3962 			/* XXX */
3963 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3964 			    0, M_DONTWAIT, 1, MT_DATA);
3965 			if (oper) {
3966 				struct sctp_paramhdr *ph;
3967 				uint32_t *ippp;
3968 
3969 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3970 				    sizeof(uint32_t);
3971 				ph = mtod(oper, struct sctp_paramhdr *);
3972 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3973 				ph->param_length = htons(SCTP_BUF_LEN(oper));
3974 				ippp = (uint32_t *) (ph + 1);
3975 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3976 			}
3977 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3978 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
3979 			return;
3980 #endif
3981 		}
3982 	}
3983 	asoc->this_sack_highest_gap = cumack;
3984 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3985 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3986 		    stcb->asoc.overall_error_count,
3987 		    0,
3988 		    SCTP_FROM_SCTP_INDATA,
3989 		    __LINE__);
3990 	}
3991 	stcb->asoc.overall_error_count = 0;
3992 	if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
3993 		/* process the new consecutive TSN first */
3994 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
3995 		while (tp1) {
3996 			tp2 = TAILQ_NEXT(tp1, sctp_next);
3997 			if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
3998 			    MAX_TSN) ||
3999 			    cumack == tp1->rec.data.TSN_seq) {
4000 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4001 					printf("Warning, an unsent is now acked?\n");
4002 				}
4003 				/*
4004 				 * ECN Nonce: Add the nonce to the sender's
4005 				 * nonce sum
4006 				 */
4007 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4008 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4009 					/*
4010 					 * If it is less than ACKED, it is
4011 					 * now no-longer in flight. Higher
4012 					 * values may occur during marking
4013 					 */
4014 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4015 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4016 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4017 							    tp1->whoTo->flight_size,
4018 							    tp1->book_size,
4019 							    (uintptr_t) tp1->whoTo,
4020 							    tp1->rec.data.TSN_seq);
4021 						}
4022 						sctp_flight_size_decrease(tp1);
4023 						/* sa_ignore NO_NULL_CHK */
4024 						sctp_total_flight_decrease(stcb, tp1);
4025 					}
4026 					tp1->whoTo->net_ack += tp1->send_size;
4027 					if (tp1->snd_count < 2) {
4028 						/*
4029 						 * True non-retransmited
4030 						 * chunk
4031 						 */
4032 						tp1->whoTo->net_ack2 +=
4033 						    tp1->send_size;
4034 
4035 						/* update RTO too? */
4036 						if (tp1->do_rtt) {
4037 							tp1->whoTo->RTO =
4038 							/*
4039 							 * sa_ignore
4040 							 * NO_NULL_CHK
4041 							 */
4042 							    sctp_calculate_rto(stcb,
4043 							    asoc, tp1->whoTo,
4044 							    &tp1->sent_rcv_time,
4045 							    sctp_align_safe_nocopy);
4046 							tp1->do_rtt = 0;
4047 						}
4048 					}
4049 					/*
4050 					 * CMT: CUCv2 algorithm. From the
4051 					 * cumack'd TSNs, for each TSN being
4052 					 * acked for the first time, set the
4053 					 * following variables for the
4054 					 * corresp destination.
4055 					 * new_pseudo_cumack will trigger a
4056 					 * cwnd update.
4057 					 * find_(rtx_)pseudo_cumack will
4058 					 * trigger search for the next
4059 					 * expected (rtx-)pseudo-cumack.
4060 					 */
4061 					tp1->whoTo->new_pseudo_cumack = 1;
4062 					tp1->whoTo->find_pseudo_cumack = 1;
4063 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4064 
4065 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4066 						/* sa_ignore NO_NULL_CHK */
4067 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4068 					}
4069 				}
4070 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4071 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4072 				}
4073 				if (tp1->rec.data.chunk_was_revoked) {
4074 					/* deflate the cwnd */
4075 					tp1->whoTo->cwnd -= tp1->book_size;
4076 					tp1->rec.data.chunk_was_revoked = 0;
4077 				}
4078 				tp1->sent = SCTP_DATAGRAM_ACKED;
4079 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4080 				if (tp1->data) {
4081 					/* sa_ignore NO_NULL_CHK */
4082 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4083 					sctp_m_freem(tp1->data);
4084 				}
4085 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4086 					sctp_log_sack(asoc->last_acked_seq,
4087 					    cumack,
4088 					    tp1->rec.data.TSN_seq,
4089 					    0,
4090 					    0,
4091 					    SCTP_LOG_FREE_SENT);
4092 				}
4093 				tp1->data = NULL;
4094 				asoc->sent_queue_cnt--;
4095 				sctp_free_a_chunk(stcb, tp1);
4096 				tp1 = tp2;
4097 			} else {
4098 				break;
4099 			}
4100 		}
4101 
4102 	}
4103 	/* sa_ignore NO_NULL_CHK */
4104 	if (stcb->sctp_socket) {
4105 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4106 		struct socket *so;
4107 
4108 #endif
4109 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4110 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4111 			/* sa_ignore NO_NULL_CHK */
4112 			sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4113 		}
4114 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4115 		so = SCTP_INP_SO(stcb->sctp_ep);
4116 		atomic_add_int(&stcb->asoc.refcnt, 1);
4117 		SCTP_TCB_UNLOCK(stcb);
4118 		SCTP_SOCKET_LOCK(so, 1);
4119 		SCTP_TCB_LOCK(stcb);
4120 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4121 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4122 			/* assoc was freed while we were unlocked */
4123 			SCTP_SOCKET_UNLOCK(so, 1);
4124 			return;
4125 		}
4126 #endif
4127 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4128 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4129 		SCTP_SOCKET_UNLOCK(so, 1);
4130 #endif
4131 	} else {
4132 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4133 			sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4134 		}
4135 	}
4136 
4137 	/* JRS - Use the congestion control given in the CC module */
4138 	if (asoc->last_acked_seq != cumack)
4139 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4140 
4141 	asoc->last_acked_seq = cumack;
4142 
4143 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4144 		/* nothing left in-flight */
4145 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4146 			net->flight_size = 0;
4147 			net->partial_bytes_acked = 0;
4148 		}
4149 		asoc->total_flight = 0;
4150 		asoc->total_flight_count = 0;
4151 	}
4152 	/* ECN Nonce updates */
4153 	if (asoc->ecn_nonce_allowed) {
4154 		if (asoc->nonce_sum_check) {
4155 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4156 				if (asoc->nonce_wait_for_ecne == 0) {
4157 					struct sctp_tmit_chunk *lchk;
4158 
4159 					lchk = TAILQ_FIRST(&asoc->send_queue);
4160 					asoc->nonce_wait_for_ecne = 1;
4161 					if (lchk) {
4162 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4163 					} else {
4164 						asoc->nonce_wait_tsn = asoc->sending_seq;
4165 					}
4166 				} else {
4167 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4168 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4169 						/*
4170 						 * Misbehaving peer. We need
4171 						 * to react to this guy
4172 						 */
4173 						asoc->ecn_allowed = 0;
4174 						asoc->ecn_nonce_allowed = 0;
4175 					}
4176 				}
4177 			}
4178 		} else {
4179 			/* See if Resynchronization Possible */
4180 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4181 				asoc->nonce_sum_check = 1;
4182 				/*
4183 				 * Now we must calculate what the base is.
4184 				 * We do this based on two things, we know
4185 				 * the total's for all the segments
4186 				 * gap-acked in the SACK (none). We also
4187 				 * know the SACK's nonce sum, its in
4188 				 * nonce_sum_flag. So we can build a truth
4189 				 * table to back-calculate the new value of
4190 				 * asoc->nonce_sum_expect_base:
4191 				 *
4192 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
4193 				 * 1                    0 1 0 1 1 1
4194 				 * 1 0
4195 				 */
4196 				asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4197 			}
4198 		}
4199 	}
4200 	/* RWND update */
4201 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4202 	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4203 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4204 		/* SWS sender side engages */
4205 		asoc->peers_rwnd = 0;
4206 	}
4207 	if (asoc->peers_rwnd > old_rwnd) {
4208 		win_probe_recovery = 1;
4209 	}
4210 	/* Now assure a timer where data is queued at */
4211 again:
4212 	j = 0;
4213 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4214 		int to_ticks;
4215 
4216 		if (win_probe_recovery && (net->window_probe)) {
4217 			win_probe_recovered = 1;
4218 			/*
4219 			 * Find first chunk that was used with window probe
4220 			 * and clear the sent
4221 			 */
4222 			/* sa_ignore FREED_MEMORY */
4223 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4224 				if (tp1->window_probe) {
4225 					/* move back to data send queue */
4226 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
4227 					break;
4228 				}
4229 			}
4230 		}
4231 		if (net->RTO == 0) {
4232 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4233 		} else {
4234 			to_ticks = MSEC_TO_TICKS(net->RTO);
4235 		}
4236 		if (net->flight_size) {
4237 			j++;
4238 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4239 			    sctp_timeout_handler, &net->rxt_timer);
4240 			if (net->window_probe) {
4241 				net->window_probe = 0;
4242 			}
4243 		} else {
4244 			if (net->window_probe) {
4245 				/*
4246 				 * In window probes we must assure a timer
4247 				 * is still running there
4248 				 */
4249 				net->window_probe = 0;
4250 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4251 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4252 					    sctp_timeout_handler, &net->rxt_timer);
4253 				}
4254 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4255 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4256 				    stcb, net,
4257 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4258 			}
4259 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4260 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4261 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4262 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4263 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4264 				}
4265 			}
4266 		}
4267 	}
4268 	if ((j == 0) &&
4269 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4270 	    (asoc->sent_queue_retran_cnt == 0) &&
4271 	    (win_probe_recovered == 0) &&
4272 	    (done_once == 0)) {
4273 		/*
4274 		 * huh, this should not happen unless all packets are
4275 		 * PR-SCTP and marked to skip of course.
4276 		 */
4277 		if (sctp_fs_audit(asoc)) {
4278 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4279 				net->flight_size = 0;
4280 			}
4281 			asoc->total_flight = 0;
4282 			asoc->total_flight_count = 0;
4283 			asoc->sent_queue_retran_cnt = 0;
4284 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4285 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4286 					sctp_flight_size_increase(tp1);
4287 					sctp_total_flight_increase(stcb, tp1);
4288 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4289 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4290 				}
4291 			}
4292 		}
4293 		done_once = 1;
4294 		goto again;
4295 	}
4296 	/**********************************/
4297 	/* Now what about shutdown issues */
4298 	/**********************************/
4299 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4300 		/* nothing left on sendqueue.. consider done */
4301 		/* clean up */
4302 		if ((asoc->stream_queue_cnt == 1) &&
4303 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4304 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4305 		    (asoc->locked_on_sending)
4306 		    ) {
4307 			struct sctp_stream_queue_pending *sp;
4308 
4309 			/*
4310 			 * I may be in a state where we got all across.. but
4311 			 * cannot write more due to a shutdown... we abort
4312 			 * since the user did not indicate EOR in this case.
4313 			 * The sp will be cleaned during free of the asoc.
4314 			 */
4315 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4316 			    sctp_streamhead);
4317 			if ((sp) && (sp->length == 0)) {
4318 				/* Let cleanup code purge it */
4319 				if (sp->msg_is_complete) {
4320 					asoc->stream_queue_cnt--;
4321 				} else {
4322 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4323 					asoc->locked_on_sending = NULL;
4324 					asoc->stream_queue_cnt--;
4325 				}
4326 			}
4327 		}
4328 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4329 		    (asoc->stream_queue_cnt == 0)) {
4330 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4331 				/* Need to abort here */
4332 				struct mbuf *oper;
4333 
4334 		abort_out_now:
4335 				*abort_now = 1;
4336 				/* XXX */
4337 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4338 				    0, M_DONTWAIT, 1, MT_DATA);
4339 				if (oper) {
4340 					struct sctp_paramhdr *ph;
4341 					uint32_t *ippp;
4342 
4343 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4344 					    sizeof(uint32_t);
4345 					ph = mtod(oper, struct sctp_paramhdr *);
4346 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4347 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4348 					ippp = (uint32_t *) (ph + 1);
4349 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4350 				}
4351 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4352 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4353 			} else {
4354 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4355 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4356 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4357 				}
4358 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4359 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4360 				sctp_stop_timers_for_shutdown(stcb);
4361 				sctp_send_shutdown(stcb,
4362 				    stcb->asoc.primary_destination);
4363 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4364 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4365 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4366 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4367 			}
4368 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4369 		    (asoc->stream_queue_cnt == 0)) {
4370 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4371 				goto abort_out_now;
4372 			}
4373 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4374 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4375 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4376 			sctp_send_shutdown_ack(stcb,
4377 			    stcb->asoc.primary_destination);
4378 
4379 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4380 			    stcb->sctp_ep, stcb, asoc->primary_destination);
4381 		}
4382 	}
4383 	/*********************************************/
4384 	/* Here we perform PR-SCTP procedures        */
4385 	/* (section 4.2)                             */
4386 	/*********************************************/
4387 	/* C1. update advancedPeerAckPoint */
4388 	if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4389 		asoc->advanced_peer_ack_point = cumack;
4390 	}
4391 	/* PR-Sctp issues need to be addressed too */
4392 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4393 		struct sctp_tmit_chunk *lchk;
4394 		uint32_t old_adv_peer_ack_point;
4395 
4396 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4397 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4398 		/* C3. See if we need to send a Fwd-TSN */
4399 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cumack,
4400 		    MAX_TSN)) {
4401 			/*
4402 			 * ISSUE with ECN, see FWD-TSN processing for notes
4403 			 * on issues that will occur when the ECN NONCE
4404 			 * stuff is put into SCTP for cross checking.
4405 			 */
4406 			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
4407 			    MAX_TSN)) {
4408 				send_forward_tsn(stcb, asoc);
4409 				/*
4410 				 * ECN Nonce: Disable Nonce Sum check when
4411 				 * FWD TSN is sent and store resync tsn
4412 				 */
4413 				asoc->nonce_sum_check = 0;
4414 				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4415 			} else if (lchk) {
4416 				/* try to FR fwd-tsn's that get lost too */
4417 				lchk->rec.data.fwd_tsn_cnt++;
4418 				if (lchk->rec.data.fwd_tsn_cnt > 3) {
4419 					send_forward_tsn(stcb, asoc);
4420 					lchk->rec.data.fwd_tsn_cnt = 0;
4421 				}
4422 			}
4423 		}
4424 		if (lchk) {
4425 			/* Assure a timer is up */
4426 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4427 			    stcb->sctp_ep, stcb, lchk->whoTo);
4428 		}
4429 	}
4430 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4431 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4432 		    rwnd,
4433 		    stcb->asoc.peers_rwnd,
4434 		    stcb->asoc.total_flight,
4435 		    stcb->asoc.total_output_queue_size);
4436 	}
4437 }
4438 
4439 void
4440 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4441     struct sctp_tcb *stcb, struct sctp_nets *net_from,
4442     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4443     int *abort_now, uint8_t flags,
4444     uint32_t cum_ack, uint32_t rwnd)
4445 {
4446 	struct sctp_association *asoc;
4447 	struct sctp_tmit_chunk *tp1, *tp2;
4448 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4449 	uint32_t sav_cum_ack;
4450 	uint16_t wake_him = 0;
4451 	uint32_t send_s = 0;
4452 	long j;
4453 	int accum_moved = 0;
4454 	int will_exit_fast_recovery = 0;
4455 	uint32_t a_rwnd, old_rwnd;
4456 	int win_probe_recovery = 0;
4457 	int win_probe_recovered = 0;
4458 	struct sctp_nets *net = NULL;
4459 	int nonce_sum_flag, ecn_seg_sums = 0;
4460 	int done_once;
4461 	uint8_t reneged_all = 0;
4462 	uint8_t cmt_dac_flag;
4463 
4464 	/*
4465 	 * we take any chance we can to service our queues since we cannot
4466 	 * get awoken when the socket is read from :<
4467 	 */
4468 	/*
4469 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4470 	 * old sack, if so discard. 2) If there is nothing left in the send
4471 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4472 	 * too, update any rwnd change and verify no timers are running.
4473 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4474 	 * moved process these first and note that it moved. 4) Process any
4475 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4476 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4477 	 * sync up flightsizes and things, stop all timers and also check
4478 	 * for shutdown_pending state. If so then go ahead and send off the
4479 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4480 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4481 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4482 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4483 	 * if in shutdown_recv state.
4484 	 */
4485 	SCTP_TCB_LOCK_ASSERT(stcb);
4486 	/* CMT DAC algo */
4487 	this_sack_lowest_newack = 0;
4488 	j = 0;
4489 	SCTP_STAT_INCR(sctps_slowpath_sack);
4490 	last_tsn = cum_ack;
4491 	nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
4492 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4493 #ifdef SCTP_ASOCLOG_OF_TSNS
4494 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4495 	stcb->asoc.cumack_log_at++;
4496 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4497 		stcb->asoc.cumack_log_at = 0;
4498 	}
4499 #endif
4500 	a_rwnd = rwnd;
4501 
4502 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4503 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4504 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4505 	}
4506 	old_rwnd = stcb->asoc.peers_rwnd;
4507 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4508 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4509 		    stcb->asoc.overall_error_count,
4510 		    0,
4511 		    SCTP_FROM_SCTP_INDATA,
4512 		    __LINE__);
4513 	}
4514 	stcb->asoc.overall_error_count = 0;
4515 	asoc = &stcb->asoc;
4516 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4517 		sctp_log_sack(asoc->last_acked_seq,
4518 		    cum_ack,
4519 		    0,
4520 		    num_seg,
4521 		    num_dup,
4522 		    SCTP_LOG_NEW_SACK);
4523 	}
4524 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4525 		uint16_t i;
4526 		uint32_t *dupdata, dblock;
4527 
4528 		for (i = 0; i < num_dup; i++) {
4529 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4530 			    sizeof(uint32_t), (uint8_t *) & dblock);
4531 			if (dupdata == NULL) {
4532 				break;
4533 			}
4534 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4535 		}
4536 	}
4537 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4538 		/* reality check */
4539 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4540 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4541 			    sctpchunk_listhead);
4542 			send_s = tp1->rec.data.TSN_seq + 1;
4543 		} else {
4544 			tp1 = NULL;
4545 			send_s = asoc->sending_seq;
4546 		}
4547 		if (cum_ack == send_s ||
4548 		    compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4549 			struct mbuf *oper;
4550 
4551 			/*
4552 			 * no way, we have not even sent this TSN out yet.
4553 			 * Peer is hopelessly messed up with us.
4554 			 */
4555 			printf("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4556 			    cum_ack, send_s);
4557 			if (tp1) {
4558 				printf("Got send_s from tsn:%x + 1 of tp1:%p\n",
4559 				    tp1->rec.data.TSN_seq, tp1);
4560 			}
4561 	hopeless_peer:
4562 			*abort_now = 1;
4563 			/* XXX */
4564 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4565 			    0, M_DONTWAIT, 1, MT_DATA);
4566 			if (oper) {
4567 				struct sctp_paramhdr *ph;
4568 				uint32_t *ippp;
4569 
4570 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4571 				    sizeof(uint32_t);
4572 				ph = mtod(oper, struct sctp_paramhdr *);
4573 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4574 				ph->param_length = htons(SCTP_BUF_LEN(oper));
4575 				ippp = (uint32_t *) (ph + 1);
4576 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4577 			}
4578 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4579 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4580 			return;
4581 		}
4582 	}
4583 	/**********************/
4584 	/* 1) check the range */
4585 	/**********************/
4586 	if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4587 		/* acking something behind */
4588 		return;
4589 	}
4590 	sav_cum_ack = asoc->last_acked_seq;
4591 
4592 	/* update the Rwnd of the peer */
4593 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4594 	    TAILQ_EMPTY(&asoc->send_queue) &&
4595 	    (asoc->stream_queue_cnt == 0)) {
4596 		/* nothing left on send/sent and strmq */
4597 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4598 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4599 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4600 		}
4601 		asoc->peers_rwnd = a_rwnd;
4602 		if (asoc->sent_queue_retran_cnt) {
4603 			asoc->sent_queue_retran_cnt = 0;
4604 		}
4605 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4606 			/* SWS sender side engages */
4607 			asoc->peers_rwnd = 0;
4608 		}
4609 		/* stop any timers */
4610 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4611 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4612 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4613 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4614 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4615 					SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4616 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4617 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4618 				}
4619 			}
4620 			net->partial_bytes_acked = 0;
4621 			net->flight_size = 0;
4622 		}
4623 		asoc->total_flight = 0;
4624 		asoc->total_flight_count = 0;
4625 		return;
4626 	}
4627 	/*
4628 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4629 	 * things. The total byte count acked is tracked in netAckSz AND
4630 	 * netAck2 is used to track the total bytes acked that are un-
4631 	 * amibguious and were never retransmitted. We track these on a per
4632 	 * destination address basis.
4633 	 */
4634 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4635 		net->prev_cwnd = net->cwnd;
4636 		net->net_ack = 0;
4637 		net->net_ack2 = 0;
4638 
4639 		/*
4640 		 * CMT: Reset CUC and Fast recovery algo variables before
4641 		 * SACK processing
4642 		 */
4643 		net->new_pseudo_cumack = 0;
4644 		net->will_exit_fast_recovery = 0;
4645 	}
4646 	/* process the new consecutive TSN first */
4647 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4648 	while (tp1) {
4649 		if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
4650 		    MAX_TSN) ||
4651 		    last_tsn == tp1->rec.data.TSN_seq) {
4652 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4653 				/*
4654 				 * ECN Nonce: Add the nonce to the sender's
4655 				 * nonce sum
4656 				 */
4657 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4658 				accum_moved = 1;
4659 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4660 					/*
4661 					 * If it is less than ACKED, it is
4662 					 * now no-longer in flight. Higher
4663 					 * values may occur during marking
4664 					 */
4665 					if ((tp1->whoTo->dest_state &
4666 					    SCTP_ADDR_UNCONFIRMED) &&
4667 					    (tp1->snd_count < 2)) {
4668 						/*
4669 						 * If there was no retran
4670 						 * and the address is
4671 						 * un-confirmed and we sent
4672 						 * there and are now
4673 						 * sacked.. its confirmed,
4674 						 * mark it so.
4675 						 */
4676 						tp1->whoTo->dest_state &=
4677 						    ~SCTP_ADDR_UNCONFIRMED;
4678 					}
4679 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4680 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4681 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4682 							    tp1->whoTo->flight_size,
4683 							    tp1->book_size,
4684 							    (uintptr_t) tp1->whoTo,
4685 							    tp1->rec.data.TSN_seq);
4686 						}
4687 						sctp_flight_size_decrease(tp1);
4688 						sctp_total_flight_decrease(stcb, tp1);
4689 					}
4690 					tp1->whoTo->net_ack += tp1->send_size;
4691 
4692 					/* CMT SFR and DAC algos */
4693 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4694 					tp1->whoTo->saw_newack = 1;
4695 
4696 					if (tp1->snd_count < 2) {
4697 						/*
4698 						 * True non-retransmited
4699 						 * chunk
4700 						 */
4701 						tp1->whoTo->net_ack2 +=
4702 						    tp1->send_size;
4703 
4704 						/* update RTO too? */
4705 						if (tp1->do_rtt) {
4706 							tp1->whoTo->RTO =
4707 							    sctp_calculate_rto(stcb,
4708 							    asoc, tp1->whoTo,
4709 							    &tp1->sent_rcv_time,
4710 							    sctp_align_safe_nocopy);
4711 							tp1->do_rtt = 0;
4712 						}
4713 					}
4714 					/*
4715 					 * CMT: CUCv2 algorithm. From the
4716 					 * cumack'd TSNs, for each TSN being
4717 					 * acked for the first time, set the
4718 					 * following variables for the
4719 					 * corresp destination.
4720 					 * new_pseudo_cumack will trigger a
4721 					 * cwnd update.
4722 					 * find_(rtx_)pseudo_cumack will
4723 					 * trigger search for the next
4724 					 * expected (rtx-)pseudo-cumack.
4725 					 */
4726 					tp1->whoTo->new_pseudo_cumack = 1;
4727 					tp1->whoTo->find_pseudo_cumack = 1;
4728 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4729 
4730 
4731 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4732 						sctp_log_sack(asoc->last_acked_seq,
4733 						    cum_ack,
4734 						    tp1->rec.data.TSN_seq,
4735 						    0,
4736 						    0,
4737 						    SCTP_LOG_TSN_ACKED);
4738 					}
4739 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4740 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4741 					}
4742 				}
4743 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4744 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4745 #ifdef SCTP_AUDITING_ENABLED
4746 					sctp_audit_log(0xB3,
4747 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4748 #endif
4749 				}
4750 				if (tp1->rec.data.chunk_was_revoked) {
4751 					/* deflate the cwnd */
4752 					tp1->whoTo->cwnd -= tp1->book_size;
4753 					tp1->rec.data.chunk_was_revoked = 0;
4754 				}
4755 				tp1->sent = SCTP_DATAGRAM_ACKED;
4756 			}
4757 		} else {
4758 			break;
4759 		}
4760 		tp1 = TAILQ_NEXT(tp1, sctp_next);
4761 	}
4762 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4763 	/* always set this up to cum-ack */
4764 	asoc->this_sack_highest_gap = last_tsn;
4765 
4766 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4767 
4768 		/*
4769 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4770 		 * to be greater than the cumack. Also reset saw_newack to 0
4771 		 * for all dests.
4772 		 */
4773 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4774 			net->saw_newack = 0;
4775 			net->this_sack_highest_newack = last_tsn;
4776 		}
4777 
4778 		/*
4779 		 * thisSackHighestGap will increase while handling NEW
4780 		 * segments this_sack_highest_newack will increase while
4781 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4782 		 * used for CMT DAC algo. saw_newack will also change.
4783 		 */
4784 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4785 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4786 		    num_seg, num_nr_seg, &ecn_seg_sums)) {
4787 			wake_him++;
4788 		}
4789 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4790 			/*
4791 			 * validate the biggest_tsn_acked in the gap acks if
4792 			 * strict adherence is wanted.
4793 			 */
4794 			if ((biggest_tsn_acked == send_s) ||
4795 			    (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
4796 				/*
4797 				 * peer is either confused or we are under
4798 				 * attack. We must abort.
4799 				 */
4800 				printf("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4801 				    biggest_tsn_acked,
4802 				    send_s);
4803 
4804 				goto hopeless_peer;
4805 			}
4806 		}
4807 	}
4808 	/*******************************************/
4809 	/* cancel ALL T3-send timer if accum moved */
4810 	/*******************************************/
4811 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
4812 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4813 			if (net->new_pseudo_cumack)
4814 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4815 				    stcb, net,
4816 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4817 
4818 		}
4819 	} else {
4820 		if (accum_moved) {
4821 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4822 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4823 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4824 			}
4825 		}
4826 	}
4827 	/********************************************/
4828 	/* drop the acked chunks from the sendqueue */
4829 	/********************************************/
4830 	asoc->last_acked_seq = cum_ack;
4831 
4832 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4833 	if (tp1 == NULL)
4834 		goto done_with_it;
4835 	do {
4836 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
4837 		    MAX_TSN)) {
4838 			break;
4839 		}
4840 		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4841 			/* no more sent on list */
4842 			printf("Warning, tp1->sent == %d and its now acked?\n",
4843 			    tp1->sent);
4844 		}
4845 		tp2 = TAILQ_NEXT(tp1, sctp_next);
4846 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4847 		if (tp1->pr_sctp_on) {
4848 			if (asoc->pr_sctp_cnt != 0)
4849 				asoc->pr_sctp_cnt--;
4850 		}
4851 		if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
4852 		    (asoc->total_flight > 0)) {
4853 #ifdef INVARIANTS
4854 			panic("Warning flight size is postive and should be 0");
4855 #else
4856 			SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4857 			    asoc->total_flight);
4858 #endif
4859 			asoc->total_flight = 0;
4860 		}
4861 		if (tp1->data) {
4862 			/* sa_ignore NO_NULL_CHK */
4863 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4864 			sctp_m_freem(tp1->data);
4865 			if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4866 				asoc->sent_queue_cnt_removeable--;
4867 			}
4868 		}
4869 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4870 			sctp_log_sack(asoc->last_acked_seq,
4871 			    cum_ack,
4872 			    tp1->rec.data.TSN_seq,
4873 			    0,
4874 			    0,
4875 			    SCTP_LOG_FREE_SENT);
4876 		}
4877 		tp1->data = NULL;
4878 		asoc->sent_queue_cnt--;
4879 		sctp_free_a_chunk(stcb, tp1);
4880 		wake_him++;
4881 		tp1 = tp2;
4882 	} while (tp1 != NULL);
4883 
4884 done_with_it:
4885 	/* sa_ignore NO_NULL_CHK */
4886 	if ((wake_him) && (stcb->sctp_socket)) {
4887 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4888 		struct socket *so;
4889 
4890 #endif
4891 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4892 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4893 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
4894 		}
4895 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4896 		so = SCTP_INP_SO(stcb->sctp_ep);
4897 		atomic_add_int(&stcb->asoc.refcnt, 1);
4898 		SCTP_TCB_UNLOCK(stcb);
4899 		SCTP_SOCKET_LOCK(so, 1);
4900 		SCTP_TCB_LOCK(stcb);
4901 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4902 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4903 			/* assoc was freed while we were unlocked */
4904 			SCTP_SOCKET_UNLOCK(so, 1);
4905 			return;
4906 		}
4907 #endif
4908 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4909 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4910 		SCTP_SOCKET_UNLOCK(so, 1);
4911 #endif
4912 	} else {
4913 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4914 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
4915 		}
4916 	}
4917 
4918 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4919 		if (compare_with_wrap(asoc->last_acked_seq,
4920 		    asoc->fast_recovery_tsn, MAX_TSN) ||
4921 		    asoc->last_acked_seq == asoc->fast_recovery_tsn) {
4922 			/* Setup so we will exit RFC2582 fast recovery */
4923 			will_exit_fast_recovery = 1;
4924 		}
4925 	}
4926 	/*
4927 	 * Check for revoked fragments:
4928 	 *
4929 	 * if Previous sack - Had no frags then we can't have any revoked if
4930 	 * Previous sack - Had frag's then - If we now have frags aka
4931 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4932 	 * some of them. else - The peer revoked all ACKED fragments, since
4933 	 * we had some before and now we have NONE.
4934 	 */
4935 
4936 	if (num_seg)
4937 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4938 	else if (asoc->saw_sack_with_frags) {
4939 		int cnt_revoked = 0;
4940 
4941 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
4942 		if (tp1 != NULL) {
4943 			/* Peer revoked all dg's marked or acked */
4944 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4945 				if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4946 					tp1->sent = SCTP_DATAGRAM_SENT;
4947 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4948 						sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4949 						    tp1->whoTo->flight_size,
4950 						    tp1->book_size,
4951 						    (uintptr_t) tp1->whoTo,
4952 						    tp1->rec.data.TSN_seq);
4953 					}
4954 					sctp_flight_size_increase(tp1);
4955 					sctp_total_flight_increase(stcb, tp1);
4956 					tp1->rec.data.chunk_was_revoked = 1;
4957 					/*
4958 					 * To ensure that this increase in
4959 					 * flightsize, which is artificial,
4960 					 * does not throttle the sender, we
4961 					 * also increase the cwnd
4962 					 * artificially.
4963 					 */
4964 					tp1->whoTo->cwnd += tp1->book_size;
4965 					cnt_revoked++;
4966 				}
4967 			}
4968 			if (cnt_revoked) {
4969 				reneged_all = 1;
4970 			}
4971 		}
4972 		asoc->saw_sack_with_frags = 0;
4973 	}
4974 	if (num_seg || num_nr_seg)
4975 		asoc->saw_sack_with_frags = 1;
4976 	else
4977 		asoc->saw_sack_with_frags = 0;
4978 
4979 	/* JRS - Use the congestion control given in the CC module */
4980 	asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4981 
4982 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4983 		/* nothing left in-flight */
4984 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4985 			/* stop all timers */
4986 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4987 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4988 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4989 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4990 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4991 				}
4992 			}
4993 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4994 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4995 			net->flight_size = 0;
4996 			net->partial_bytes_acked = 0;
4997 		}
4998 		asoc->total_flight = 0;
4999 		asoc->total_flight_count = 0;
5000 	}
5001 	/**********************************/
5002 	/* Now what about shutdown issues */
5003 	/**********************************/
5004 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5005 		/* nothing left on sendqueue.. consider done */
5006 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5007 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5008 			    asoc->peers_rwnd, 0, 0, a_rwnd);
5009 		}
5010 		asoc->peers_rwnd = a_rwnd;
5011 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5012 			/* SWS sender side engages */
5013 			asoc->peers_rwnd = 0;
5014 		}
5015 		/* clean up */
5016 		if ((asoc->stream_queue_cnt == 1) &&
5017 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5018 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5019 		    (asoc->locked_on_sending)
5020 		    ) {
5021 			struct sctp_stream_queue_pending *sp;
5022 
5023 			/*
5024 			 * I may be in a state where we got all across.. but
5025 			 * cannot write more due to a shutdown... we abort
5026 			 * since the user did not indicate EOR in this case.
5027 			 */
5028 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
5029 			    sctp_streamhead);
5030 			if ((sp) && (sp->length == 0)) {
5031 				asoc->locked_on_sending = NULL;
5032 				if (sp->msg_is_complete) {
5033 					asoc->stream_queue_cnt--;
5034 				} else {
5035 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5036 					asoc->stream_queue_cnt--;
5037 				}
5038 			}
5039 		}
5040 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5041 		    (asoc->stream_queue_cnt == 0)) {
5042 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5043 				/* Need to abort here */
5044 				struct mbuf *oper;
5045 
5046 		abort_out_now:
5047 				*abort_now = 1;
5048 				/* XXX */
5049 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
5050 				    0, M_DONTWAIT, 1, MT_DATA);
5051 				if (oper) {
5052 					struct sctp_paramhdr *ph;
5053 					uint32_t *ippp;
5054 
5055 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5056 					    sizeof(uint32_t);
5057 					ph = mtod(oper, struct sctp_paramhdr *);
5058 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
5059 					ph->param_length = htons(SCTP_BUF_LEN(oper));
5060 					ippp = (uint32_t *) (ph + 1);
5061 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
5062 				}
5063 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
5064 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
5065 				return;
5066 			} else {
5067 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5068 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5069 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5070 				}
5071 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5072 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5073 				sctp_stop_timers_for_shutdown(stcb);
5074 				sctp_send_shutdown(stcb,
5075 				    stcb->asoc.primary_destination);
5076 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5077 				    stcb->sctp_ep, stcb, asoc->primary_destination);
5078 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5079 				    stcb->sctp_ep, stcb, asoc->primary_destination);
5080 			}
5081 			return;
5082 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5083 		    (asoc->stream_queue_cnt == 0)) {
5084 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5085 				goto abort_out_now;
5086 			}
5087 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5088 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5089 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5090 			sctp_send_shutdown_ack(stcb,
5091 			    stcb->asoc.primary_destination);
5092 
5093 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5094 			    stcb->sctp_ep, stcb, asoc->primary_destination);
5095 			return;
5096 		}
5097 	}
5098 	/*
5099 	 * Now here we are going to recycle net_ack for a different use...
5100 	 * HEADS UP.
5101 	 */
5102 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5103 		net->net_ack = 0;
5104 	}
5105 
5106 	/*
5107 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5108 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5109 	 * automatically ensure that.
5110 	 */
5111 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
5112 		this_sack_lowest_newack = cum_ack;
5113 	}
5114 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5115 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5116 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5117 	}
5118 	/* JRS - Use the congestion control given in the CC module */
5119 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5120 
5121 	/******************************************************************
5122 	 *  Here we do the stuff with ECN Nonce checking.
5123 	 *  We basically check to see if the nonce sum flag was incorrect
5124 	 *  or if resynchronization needs to be done. Also if we catch a
5125 	 *  misbehaving receiver we give him the kick.
5126 	 ******************************************************************/
5127 
5128 	if (asoc->ecn_nonce_allowed) {
5129 		if (asoc->nonce_sum_check) {
5130 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5131 				if (asoc->nonce_wait_for_ecne == 0) {
5132 					struct sctp_tmit_chunk *lchk;
5133 
5134 					lchk = TAILQ_FIRST(&asoc->send_queue);
5135 					asoc->nonce_wait_for_ecne = 1;
5136 					if (lchk) {
5137 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5138 					} else {
5139 						asoc->nonce_wait_tsn = asoc->sending_seq;
5140 					}
5141 				} else {
5142 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5143 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5144 						/*
5145 						 * Misbehaving peer. We need
5146 						 * to react to this guy
5147 						 */
5148 						asoc->ecn_allowed = 0;
5149 						asoc->ecn_nonce_allowed = 0;
5150 					}
5151 				}
5152 			}
5153 		} else {
5154 			/* See if Resynchronization Possible */
5155 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5156 				asoc->nonce_sum_check = 1;
5157 				/*
5158 				 * now we must calculate what the base is.
5159 				 * We do this based on two things, we know
5160 				 * the total's for all the segments
5161 				 * gap-acked in the SACK, its stored in
5162 				 * ecn_seg_sums. We also know the SACK's
5163 				 * nonce sum, its in nonce_sum_flag. So we
5164 				 * can build a truth table to back-calculate
5165 				 * the new value of
5166 				 * asoc->nonce_sum_expect_base:
5167 				 *
5168 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
5169 				 * 1                    0 1 0 1 1 1
5170 				 * 1 0
5171 				 */
5172 				asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5173 			}
5174 		}
5175 	}
5176 	/* Now are we exiting loss recovery ? */
5177 	if (will_exit_fast_recovery) {
5178 		/* Ok, we must exit fast recovery */
5179 		asoc->fast_retran_loss_recovery = 0;
5180 	}
5181 	if ((asoc->sat_t3_loss_recovery) &&
5182 	    ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5183 	    MAX_TSN) ||
5184 	    (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5185 		/* end satellite t3 loss recovery */
5186 		asoc->sat_t3_loss_recovery = 0;
5187 	}
5188 	/*
5189 	 * CMT Fast recovery
5190 	 */
5191 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5192 		if (net->will_exit_fast_recovery) {
5193 			/* Ok, we must exit fast recovery */
5194 			net->fast_retran_loss_recovery = 0;
5195 		}
5196 	}
5197 
5198 	/* Adjust and set the new rwnd value */
5199 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5200 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5201 		    asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5202 	}
5203 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5204 	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5205 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5206 		/* SWS sender side engages */
5207 		asoc->peers_rwnd = 0;
5208 	}
5209 	if (asoc->peers_rwnd > old_rwnd) {
5210 		win_probe_recovery = 1;
5211 	}
5212 	/*
5213 	 * Now we must setup so we have a timer up for anyone with
5214 	 * outstanding data.
5215 	 */
5216 	done_once = 0;
5217 again:
5218 	j = 0;
5219 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5220 		if (win_probe_recovery && (net->window_probe)) {
5221 			win_probe_recovered = 1;
5222 			/*-
5223 			 * Find first chunk that was used with
5224 			 * window probe and clear the event. Put
5225 			 * it back into the send queue as if has
5226 			 * not been sent.
5227 			 */
5228 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5229 				if (tp1->window_probe) {
5230 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
5231 					break;
5232 				}
5233 			}
5234 		}
5235 		if (net->flight_size) {
5236 			j++;
5237 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5238 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5239 				    stcb->sctp_ep, stcb, net);
5240 			}
5241 			if (net->window_probe) {
5242 				net->window_probe = 0;
5243 			}
5244 		} else {
5245 			if (net->window_probe) {
5246 				/*
5247 				 * In window probes we must assure a timer
5248 				 * is still running there
5249 				 */
5250 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5251 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5252 					    stcb->sctp_ep, stcb, net);
5253 
5254 				}
5255 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5256 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5257 				    stcb, net,
5258 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5259 			}
5260 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5261 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5262 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5263 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5264 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5265 				}
5266 			}
5267 		}
5268 	}
5269 	if ((j == 0) &&
5270 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5271 	    (asoc->sent_queue_retran_cnt == 0) &&
5272 	    (win_probe_recovered == 0) &&
5273 	    (done_once == 0)) {
5274 		/*
5275 		 * huh, this should not happen unless all packets are
5276 		 * PR-SCTP and marked to skip of course.
5277 		 */
5278 		if (sctp_fs_audit(asoc)) {
5279 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5280 				net->flight_size = 0;
5281 			}
5282 			asoc->total_flight = 0;
5283 			asoc->total_flight_count = 0;
5284 			asoc->sent_queue_retran_cnt = 0;
5285 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5286 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5287 					sctp_flight_size_increase(tp1);
5288 					sctp_total_flight_increase(stcb, tp1);
5289 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5290 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5291 				}
5292 			}
5293 		}
5294 		done_once = 1;
5295 		goto again;
5296 	}
5297 	/*********************************************/
5298 	/* Here we perform PR-SCTP procedures        */
5299 	/* (section 4.2)                             */
5300 	/*********************************************/
5301 	/* C1. update advancedPeerAckPoint */
5302 	if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5303 		asoc->advanced_peer_ack_point = cum_ack;
5304 	}
5305 	/* C2. try to further move advancedPeerAckPoint ahead */
5306 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5307 		struct sctp_tmit_chunk *lchk;
5308 		uint32_t old_adv_peer_ack_point;
5309 
5310 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5311 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5312 		/* C3. See if we need to send a Fwd-TSN */
5313 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5314 		    MAX_TSN)) {
5315 			/*
5316 			 * ISSUE with ECN, see FWD-TSN processing for notes
5317 			 * on issues that will occur when the ECN NONCE
5318 			 * stuff is put into SCTP for cross checking.
5319 			 */
5320 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5321 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5322 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5323 				    old_adv_peer_ack_point);
5324 			}
5325 			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
5326 			    MAX_TSN)) {
5327 				send_forward_tsn(stcb, asoc);
5328 				/*
5329 				 * ECN Nonce: Disable Nonce Sum check when
5330 				 * FWD TSN is sent and store resync tsn
5331 				 */
5332 				asoc->nonce_sum_check = 0;
5333 				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5334 			} else if (lchk) {
5335 				/* try to FR fwd-tsn's that get lost too */
5336 				lchk->rec.data.fwd_tsn_cnt++;
5337 				if (lchk->rec.data.fwd_tsn_cnt > 3) {
5338 					send_forward_tsn(stcb, asoc);
5339 					lchk->rec.data.fwd_tsn_cnt = 0;
5340 				}
5341 			}
5342 		}
5343 		if (lchk) {
5344 			/* Assure a timer is up */
5345 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5346 			    stcb->sctp_ep, stcb, lchk->whoTo);
5347 		}
5348 	}
5349 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5350 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5351 		    a_rwnd,
5352 		    stcb->asoc.peers_rwnd,
5353 		    stcb->asoc.total_flight,
5354 		    stcb->asoc.total_output_queue_size);
5355 	}
5356 }
5357 
5358 void
5359 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5360     struct sctp_nets *netp, int *abort_flag)
5361 {
5362 	/* Copy cum-ack */
5363 	uint32_t cum_ack, a_rwnd;
5364 
5365 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5366 	/* Arrange so a_rwnd does NOT change */
5367 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5368 
5369 	/* Now call the express sack handling */
5370 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5371 }
5372 
5373 static void
5374 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5375     struct sctp_stream_in *strmin)
5376 {
5377 	struct sctp_queued_to_read *ctl, *nctl;
5378 	struct sctp_association *asoc;
5379 	uint16_t tt;
5380 
5381 	asoc = &stcb->asoc;
5382 	tt = strmin->last_sequence_delivered;
5383 	/*
5384 	 * First deliver anything prior to and including the stream no that
5385 	 * came in
5386 	 */
5387 	ctl = TAILQ_FIRST(&strmin->inqueue);
5388 	while (ctl) {
5389 		nctl = TAILQ_NEXT(ctl, next);
5390 		if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5391 		    (tt == ctl->sinfo_ssn)) {
5392 			/* this is deliverable now */
5393 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5394 			/* subtract pending on streams */
5395 			asoc->size_on_all_streams -= ctl->length;
5396 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5397 			/* deliver it to at least the delivery-q */
5398 			if (stcb->sctp_socket) {
5399 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5400 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5401 				    ctl,
5402 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5403 			}
5404 		} else {
5405 			/* no more delivery now. */
5406 			break;
5407 		}
5408 		ctl = nctl;
5409 	}
5410 	/*
5411 	 * now we must deliver things in queue the normal way  if any are
5412 	 * now ready.
5413 	 */
5414 	tt = strmin->last_sequence_delivered + 1;
5415 	ctl = TAILQ_FIRST(&strmin->inqueue);
5416 	while (ctl) {
5417 		nctl = TAILQ_NEXT(ctl, next);
5418 		if (tt == ctl->sinfo_ssn) {
5419 			/* this is deliverable now */
5420 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5421 			/* subtract pending on streams */
5422 			asoc->size_on_all_streams -= ctl->length;
5423 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5424 			/* deliver it to at least the delivery-q */
5425 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5426 			if (stcb->sctp_socket) {
5427 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5428 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5429 				    ctl,
5430 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5431 
5432 			}
5433 			tt = strmin->last_sequence_delivered + 1;
5434 		} else {
5435 			break;
5436 		}
5437 		ctl = nctl;
5438 	}
5439 }
5440 
5441 static void
5442 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5443     struct sctp_association *asoc,
5444     uint16_t stream, uint16_t seq)
5445 {
5446 	struct sctp_tmit_chunk *chk, *at;
5447 
5448 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5449 		/* For each one on here see if we need to toss it */
5450 		/*
5451 		 * For now large messages held on the reasmqueue that are
5452 		 * complete will be tossed too. We could in theory do more
5453 		 * work to spin through and stop after dumping one msg aka
5454 		 * seeing the start of a new msg at the head, and call the
5455 		 * delivery function... to see if it can be delivered... But
5456 		 * for now we just dump everything on the queue.
5457 		 */
5458 		chk = TAILQ_FIRST(&asoc->reasmqueue);
5459 		while (chk) {
5460 			at = TAILQ_NEXT(chk, sctp_next);
5461 			/*
5462 			 * Do not toss it if on a different stream or marked
5463 			 * for unordered delivery in which case the stream
5464 			 * sequence number has no meaning.
5465 			 */
5466 			if ((chk->rec.data.stream_number != stream) ||
5467 			    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5468 				chk = at;
5469 				continue;
5470 			}
5471 			if (chk->rec.data.stream_seq == seq) {
5472 				/* It needs to be tossed */
5473 				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5474 				if (compare_with_wrap(chk->rec.data.TSN_seq,
5475 				    asoc->tsn_last_delivered, MAX_TSN)) {
5476 					asoc->tsn_last_delivered =
5477 					    chk->rec.data.TSN_seq;
5478 					asoc->str_of_pdapi =
5479 					    chk->rec.data.stream_number;
5480 					asoc->ssn_of_pdapi =
5481 					    chk->rec.data.stream_seq;
5482 					asoc->fragment_flags =
5483 					    chk->rec.data.rcv_flags;
5484 				}
5485 				asoc->size_on_reasm_queue -= chk->send_size;
5486 				sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5487 
5488 				/* Clear up any stream problem */
5489 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5490 				    SCTP_DATA_UNORDERED &&
5491 				    (compare_with_wrap(chk->rec.data.stream_seq,
5492 				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5493 				    MAX_SEQ))) {
5494 					/*
5495 					 * We must dump forward this streams
5496 					 * sequence number if the chunk is
5497 					 * not unordered that is being
5498 					 * skipped. There is a chance that
5499 					 * if the peer does not include the
5500 					 * last fragment in its FWD-TSN we
5501 					 * WILL have a problem here since
5502 					 * you would have a partial chunk in
5503 					 * queue that may not be
5504 					 * deliverable. Also if a Partial
5505 					 * delivery API as started the user
5506 					 * may get a partial chunk. The next
5507 					 * read returning a new chunk...
5508 					 * really ugly but I see no way
5509 					 * around it! Maybe a notify??
5510 					 */
5511 					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5512 					    chk->rec.data.stream_seq;
5513 				}
5514 				if (chk->data) {
5515 					sctp_m_freem(chk->data);
5516 					chk->data = NULL;
5517 				}
5518 				sctp_free_a_chunk(stcb, chk);
5519 			} else if (compare_with_wrap(chk->rec.data.stream_seq, seq, MAX_SEQ)) {
5520 				/*
5521 				 * If the stream_seq is > than the purging
5522 				 * one, we are done
5523 				 */
5524 				break;
5525 			}
5526 			chk = at;
5527 		}
5528 	}
5529 }
5530 
5531 
5532 void
5533 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5534     struct sctp_forward_tsn_chunk *fwd,
5535     int *abort_flag, struct mbuf *m, int offset)
5536 {
5537 	/*
5538 	 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5539 	 * forward TSN, when the SACK comes back that acknowledges the
5540 	 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5541 	 * get quite tricky since we may have sent more data interveneing
5542 	 * and must carefully account for what the SACK says on the nonce
5543 	 * and any gaps that are reported. This work will NOT be done here,
5544 	 * but I note it here since it is really related to PR-SCTP and
5545 	 * FWD-TSN's
5546 	 */
5547 
5548 	/* The pr-sctp fwd tsn */
5549 	/*
5550 	 * here we will perform all the data receiver side steps for
5551 	 * processing FwdTSN, as required in by pr-sctp draft:
5552 	 *
5553 	 * Assume we get FwdTSN(x):
5554 	 *
5555 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5556 	 * others we have 3) examine and update re-ordering queue on
5557 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5558 	 * report where we are.
5559 	 */
5560 	struct sctp_association *asoc;
5561 	uint32_t new_cum_tsn, gap;
5562 	unsigned int i, fwd_sz, cumack_set_flag, m_size;
5563 	uint32_t str_seq;
5564 	struct sctp_stream_in *strm;
5565 	struct sctp_tmit_chunk *chk, *at;
5566 	struct sctp_queued_to_read *ctl, *sv;
5567 
5568 	cumack_set_flag = 0;
5569 	asoc = &stcb->asoc;
5570 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5571 		SCTPDBG(SCTP_DEBUG_INDATA1,
5572 		    "Bad size too small/big fwd-tsn\n");
5573 		return;
5574 	}
5575 	m_size = (stcb->asoc.mapping_array_size << 3);
5576 	/*************************************************************/
5577 	/* 1. Here we update local cumTSN and shift the bitmap array */
5578 	/*************************************************************/
5579 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5580 
5581 	if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
5582 	    asoc->cumulative_tsn == new_cum_tsn) {
5583 		/* Already got there ... */
5584 		return;
5585 	}
5586 	/*
5587 	 * now we know the new TSN is more advanced, let's find the actual
5588 	 * gap
5589 	 */
5590 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5591 	asoc->cumulative_tsn = new_cum_tsn;
5592 	if (gap >= m_size) {
5593 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5594 			struct mbuf *oper;
5595 
5596 			/*
5597 			 * out of range (of single byte chunks in the rwnd I
5598 			 * give out). This must be an attacker.
5599 			 */
5600 			*abort_flag = 1;
5601 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5602 			    0, M_DONTWAIT, 1, MT_DATA);
5603 			if (oper) {
5604 				struct sctp_paramhdr *ph;
5605 				uint32_t *ippp;
5606 
5607 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5608 				    (sizeof(uint32_t) * 3);
5609 				ph = mtod(oper, struct sctp_paramhdr *);
5610 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5611 				ph->param_length = htons(SCTP_BUF_LEN(oper));
5612 				ippp = (uint32_t *) (ph + 1);
5613 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5614 				ippp++;
5615 				*ippp = asoc->highest_tsn_inside_map;
5616 				ippp++;
5617 				*ippp = new_cum_tsn;
5618 			}
5619 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5620 			sctp_abort_an_association(stcb->sctp_ep, stcb,
5621 			    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
5622 			return;
5623 		}
5624 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5625 
5626 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5627 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5628 		asoc->highest_tsn_inside_map = new_cum_tsn;
5629 
5630 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5631 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5632 
5633 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5634 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5635 		}
5636 		asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
5637 	} else {
5638 		SCTP_TCB_LOCK_ASSERT(stcb);
5639 		for (i = 0; i <= gap; i++) {
5640 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5641 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5642 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5643 				if (compare_with_wrap(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
5644 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5645 				}
5646 			}
5647 		}
5648 	}
5649 	/*************************************************************/
5650 	/* 2. Clear up re-assembly queue                             */
5651 	/*************************************************************/
5652 	/*
5653 	 * First service it if pd-api is up, just in case we can progress it
5654 	 * forward
5655 	 */
5656 	if (asoc->fragmented_delivery_inprogress) {
5657 		sctp_service_reassembly(stcb, asoc);
5658 	}
5659 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5660 		/* For each one on here see if we need to toss it */
5661 		/*
5662 		 * For now large messages held on the reasmqueue that are
5663 		 * complete will be tossed too. We could in theory do more
5664 		 * work to spin through and stop after dumping one msg aka
5665 		 * seeing the start of a new msg at the head, and call the
5666 		 * delivery function... to see if it can be delivered... But
5667 		 * for now we just dump everything on the queue.
5668 		 */
5669 		chk = TAILQ_FIRST(&asoc->reasmqueue);
5670 		while (chk) {
5671 			at = TAILQ_NEXT(chk, sctp_next);
5672 			if ((compare_with_wrap(new_cum_tsn,
5673 			    chk->rec.data.TSN_seq, MAX_TSN)) ||
5674 			    (new_cum_tsn == chk->rec.data.TSN_seq)) {
5675 				/* It needs to be tossed */
5676 				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5677 				if (compare_with_wrap(chk->rec.data.TSN_seq,
5678 				    asoc->tsn_last_delivered, MAX_TSN)) {
5679 					asoc->tsn_last_delivered =
5680 					    chk->rec.data.TSN_seq;
5681 					asoc->str_of_pdapi =
5682 					    chk->rec.data.stream_number;
5683 					asoc->ssn_of_pdapi =
5684 					    chk->rec.data.stream_seq;
5685 					asoc->fragment_flags =
5686 					    chk->rec.data.rcv_flags;
5687 				}
5688 				asoc->size_on_reasm_queue -= chk->send_size;
5689 				sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5690 
5691 				/* Clear up any stream problem */
5692 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5693 				    SCTP_DATA_UNORDERED &&
5694 				    (compare_with_wrap(chk->rec.data.stream_seq,
5695 				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5696 				    MAX_SEQ))) {
5697 					/*
5698 					 * We must dump forward this streams
5699 					 * sequence number if the chunk is
5700 					 * not unordered that is being
5701 					 * skipped. There is a chance that
5702 					 * if the peer does not include the
5703 					 * last fragment in its FWD-TSN we
5704 					 * WILL have a problem here since
5705 					 * you would have a partial chunk in
5706 					 * queue that may not be
5707 					 * deliverable. Also if a Partial
5708 					 * delivery API as started the user
5709 					 * may get a partial chunk. The next
5710 					 * read returning a new chunk...
5711 					 * really ugly but I see no way
5712 					 * around it! Maybe a notify??
5713 					 */
5714 					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5715 					    chk->rec.data.stream_seq;
5716 				}
5717 				if (chk->data) {
5718 					sctp_m_freem(chk->data);
5719 					chk->data = NULL;
5720 				}
5721 				sctp_free_a_chunk(stcb, chk);
5722 			} else {
5723 				/*
5724 				 * Ok we have gone beyond the end of the
5725 				 * fwd-tsn's mark.
5726 				 */
5727 				break;
5728 			}
5729 			chk = at;
5730 		}
5731 	}
5732 	/*******************************************************/
5733 	/* 3. Update the PR-stream re-ordering queues and fix  */
5734 	/* delivery issues as needed.                       */
5735 	/*******************************************************/
5736 	fwd_sz -= sizeof(*fwd);
5737 	if (m && fwd_sz) {
5738 		/* New method. */
5739 		unsigned int num_str;
5740 		struct sctp_strseq *stseq, strseqbuf;
5741 
5742 		offset += sizeof(*fwd);
5743 
5744 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5745 		num_str = fwd_sz / sizeof(struct sctp_strseq);
5746 		for (i = 0; i < num_str; i++) {
5747 			uint16_t st;
5748 
5749 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5750 			    sizeof(struct sctp_strseq),
5751 			    (uint8_t *) & strseqbuf);
5752 			offset += sizeof(struct sctp_strseq);
5753 			if (stseq == NULL) {
5754 				break;
5755 			}
5756 			/* Convert */
5757 			st = ntohs(stseq->stream);
5758 			stseq->stream = st;
5759 			st = ntohs(stseq->sequence);
5760 			stseq->sequence = st;
5761 
5762 			/* now process */
5763 
5764 			/*
5765 			 * Ok we now look for the stream/seq on the read
5766 			 * queue where its not all delivered. If we find it
5767 			 * we transmute the read entry into a PDI_ABORTED.
5768 			 */
5769 			if (stseq->stream >= asoc->streamincnt) {
5770 				/* screwed up streams, stop!  */
5771 				break;
5772 			}
5773 			if ((asoc->str_of_pdapi == stseq->stream) &&
5774 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5775 				/*
5776 				 * If this is the one we were partially
5777 				 * delivering now then we no longer are.
5778 				 * Note this will change with the reassembly
5779 				 * re-write.
5780 				 */
5781 				asoc->fragmented_delivery_inprogress = 0;
5782 			}
5783 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5784 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5785 				if ((ctl->sinfo_stream == stseq->stream) &&
5786 				    (ctl->sinfo_ssn == stseq->sequence)) {
5787 					str_seq = (stseq->stream << 16) | stseq->sequence;
5788 					ctl->end_added = 1;
5789 					ctl->pdapi_aborted = 1;
5790 					sv = stcb->asoc.control_pdapi;
5791 					stcb->asoc.control_pdapi = ctl;
5792 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5793 					    stcb,
5794 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5795 					    (void *)&str_seq,
5796 					    SCTP_SO_NOT_LOCKED);
5797 					stcb->asoc.control_pdapi = sv;
5798 					break;
5799 				} else if ((ctl->sinfo_stream == stseq->stream) &&
5800 				    (compare_with_wrap(ctl->sinfo_ssn, stseq->sequence, MAX_SEQ))) {
5801 					/* We are past our victim SSN */
5802 					break;
5803 				}
5804 			}
5805 			strm = &asoc->strmin[stseq->stream];
5806 			if (compare_with_wrap(stseq->sequence,
5807 			    strm->last_sequence_delivered, MAX_SEQ)) {
5808 				/* Update the sequence number */
5809 				strm->last_sequence_delivered =
5810 				    stseq->sequence;
5811 			}
5812 			/* now kick the stream the new way */
5813 			/* sa_ignore NO_NULL_CHK */
5814 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5815 		}
5816 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5817 	}
5818 	/*
5819 	 * Now slide thing forward.
5820 	 */
5821 	sctp_slide_mapping_arrays(stcb);
5822 
5823 	if (TAILQ_FIRST(&asoc->reasmqueue)) {
5824 		/* now lets kick out and check for more fragmented delivery */
5825 		/* sa_ignore NO_NULL_CHK */
5826 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5827 	}
5828 }
5829