xref: /freebsd/sys/netinet/sctp_indata.c (revision 5dcd9c10612684d1c823670cbb5b4715028784e7)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *   this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *   the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $	 */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctputil.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_input.h>
46 #include <netinet/sctp_indata.h>
47 #include <netinet/sctp_uio.h>
48 #include <netinet/sctp_timer.h>
49 
50 
51 /*
52  * NOTES: On the outbound side of things I need to check the sack timer to
53  * see if I should generate a sack into the chunk queue (if I have data to
54  * send that is and will be sending it .. for bundling.
55  *
56  * The callback in sctp_usrreq.c will get called when the socket is read from.
57  * This will cause sctp_service_queues() to get called on the top entry in
58  * the list.
59  */
60 
61 void
62 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
63 {
64 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65 }
66 
67 /* Calculate what the rwnd would be */
68 uint32_t
69 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
70 {
71 	uint32_t calc = 0;
72 
73 	/*
74 	 * This is really set wrong with respect to a 1-2-m socket. Since
75 	 * the sb_cc is the count that everyone as put up. When we re-write
76 	 * sctp_soreceive then we will fix this so that ONLY this
77 	 * associations data is taken into account.
78 	 */
79 	if (stcb->sctp_socket == NULL)
80 		return (calc);
81 
82 	if (stcb->asoc.sb_cc == 0 &&
83 	    asoc->size_on_reasm_queue == 0 &&
84 	    asoc->size_on_all_streams == 0) {
85 		/* Full rwnd granted */
86 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87 		return (calc);
88 	}
89 	/* get actual space */
90 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91 
92 	/*
93 	 * take out what has NOT been put on socket queue and we yet hold
94 	 * for putting up.
95 	 */
96 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
97 	    asoc->cnt_on_reasm_queue * MSIZE));
98 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
99 	    asoc->cnt_on_all_streams * MSIZE));
100 
101 	if (calc == 0) {
102 		/* out of space */
103 		return (calc);
104 	}
105 	/* what is the overhead of all these rwnd's */
106 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
107 	/*
108 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
109 	 * even it is 0. SWS engaged
110 	 */
111 	if (calc < stcb->asoc.my_rwnd_control_len) {
112 		calc = 1;
113 	}
114 	return (calc);
115 }
116 
117 
118 
119 /*
120  * Build out our readq entry based on the incoming packet.
121  */
122 struct sctp_queued_to_read *
123 sctp_build_readq_entry(struct sctp_tcb *stcb,
124     struct sctp_nets *net,
125     uint32_t tsn, uint32_t ppid,
126     uint32_t context, uint16_t stream_no,
127     uint16_t stream_seq, uint8_t flags,
128     struct mbuf *dm)
129 {
130 	struct sctp_queued_to_read *read_queue_e = NULL;
131 
132 	sctp_alloc_a_readq(stcb, read_queue_e);
133 	if (read_queue_e == NULL) {
134 		goto failed_build;
135 	}
136 	read_queue_e->sinfo_stream = stream_no;
137 	read_queue_e->sinfo_ssn = stream_seq;
138 	read_queue_e->sinfo_flags = (flags << 8);
139 	read_queue_e->sinfo_ppid = ppid;
140 	read_queue_e->sinfo_context = stcb->asoc.context;
141 	read_queue_e->sinfo_timetolive = 0;
142 	read_queue_e->sinfo_tsn = tsn;
143 	read_queue_e->sinfo_cumtsn = tsn;
144 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
145 	read_queue_e->whoFrom = net;
146 	read_queue_e->length = 0;
147 	atomic_add_int(&net->ref_count, 1);
148 	read_queue_e->data = dm;
149 	read_queue_e->spec_flags = 0;
150 	read_queue_e->tail_mbuf = NULL;
151 	read_queue_e->aux_data = NULL;
152 	read_queue_e->stcb = stcb;
153 	read_queue_e->port_from = stcb->rport;
154 	read_queue_e->do_not_ref_stcb = 0;
155 	read_queue_e->end_added = 0;
156 	read_queue_e->some_taken = 0;
157 	read_queue_e->pdapi_aborted = 0;
158 failed_build:
159 	return (read_queue_e);
160 }
161 
162 
163 /*
164  * Build out our readq entry based on the incoming packet.
165  */
166 static struct sctp_queued_to_read *
167 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
168     struct sctp_tmit_chunk *chk)
169 {
170 	struct sctp_queued_to_read *read_queue_e = NULL;
171 
172 	sctp_alloc_a_readq(stcb, read_queue_e);
173 	if (read_queue_e == NULL) {
174 		goto failed_build;
175 	}
176 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
177 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
178 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
179 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
180 	read_queue_e->sinfo_context = stcb->asoc.context;
181 	read_queue_e->sinfo_timetolive = 0;
182 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
183 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
184 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
185 	read_queue_e->whoFrom = chk->whoTo;
186 	read_queue_e->aux_data = NULL;
187 	read_queue_e->length = 0;
188 	atomic_add_int(&chk->whoTo->ref_count, 1);
189 	read_queue_e->data = chk->data;
190 	read_queue_e->tail_mbuf = NULL;
191 	read_queue_e->stcb = stcb;
192 	read_queue_e->port_from = stcb->rport;
193 	read_queue_e->spec_flags = 0;
194 	read_queue_e->do_not_ref_stcb = 0;
195 	read_queue_e->end_added = 0;
196 	read_queue_e->some_taken = 0;
197 	read_queue_e->pdapi_aborted = 0;
198 failed_build:
199 	return (read_queue_e);
200 }
201 
202 
203 struct mbuf *
204 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
205     struct sctp_sndrcvinfo *sinfo)
206 {
207 	struct sctp_sndrcvinfo *outinfo;
208 	struct cmsghdr *cmh;
209 	struct mbuf *ret;
210 	int len;
211 	int use_extended = 0;
212 
213 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
214 		/* user does not want the sndrcv ctl */
215 		return (NULL);
216 	}
217 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
218 		use_extended = 1;
219 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
220 	} else {
221 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
222 	}
223 
224 
225 	ret = sctp_get_mbuf_for_msg(len,
226 	    0, M_DONTWAIT, 1, MT_DATA);
227 
228 	if (ret == NULL) {
229 		/* No space */
230 		return (ret);
231 	}
232 	/* We need a CMSG header followed by the struct  */
233 	cmh = mtod(ret, struct cmsghdr *);
234 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
235 	cmh->cmsg_level = IPPROTO_SCTP;
236 	if (use_extended) {
237 		cmh->cmsg_type = SCTP_EXTRCV;
238 		cmh->cmsg_len = len;
239 		memcpy(outinfo, sinfo, len);
240 	} else {
241 		cmh->cmsg_type = SCTP_SNDRCV;
242 		cmh->cmsg_len = len;
243 		*outinfo = *sinfo;
244 	}
245 	SCTP_BUF_LEN(ret) = cmh->cmsg_len;
246 	return (ret);
247 }
248 
249 
250 char *
251 sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
252     int *control_len,
253     struct sctp_sndrcvinfo *sinfo)
254 {
255 	struct sctp_sndrcvinfo *outinfo;
256 	struct cmsghdr *cmh;
257 	char *buf;
258 	int len;
259 	int use_extended = 0;
260 
261 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
262 		/* user does not want the sndrcv ctl */
263 		return (NULL);
264 	}
265 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
266 		use_extended = 1;
267 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
268 	} else {
269 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
270 	}
271 	SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
272 	if (buf == NULL) {
273 		/* No space */
274 		return (buf);
275 	}
276 	/* We need a CMSG header followed by the struct  */
277 	cmh = (struct cmsghdr *)buf;
278 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
279 	cmh->cmsg_level = IPPROTO_SCTP;
280 	if (use_extended) {
281 		cmh->cmsg_type = SCTP_EXTRCV;
282 		cmh->cmsg_len = len;
283 		memcpy(outinfo, sinfo, len);
284 	} else {
285 		cmh->cmsg_type = SCTP_SNDRCV;
286 		cmh->cmsg_len = len;
287 		*outinfo = *sinfo;
288 	}
289 	*control_len = len;
290 	return (buf);
291 }
292 
293 static void
294 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
295 {
296 	uint32_t gap, i, cumackp1;
297 	int fnd = 0;
298 
299 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
300 		return;
301 	}
302 	cumackp1 = asoc->cumulative_tsn + 1;
303 	if (SCTP_TSN_GT(cumackp1, tsn)) {
304 		/*
305 		 * this tsn is behind the cum ack and thus we don't need to
306 		 * worry about it being moved from one to the other.
307 		 */
308 		return;
309 	}
310 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
311 	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
312 		printf("gap:%x tsn:%x\n", gap, tsn);
313 		sctp_print_mapping_array(asoc);
314 #ifdef INVARIANTS
315 		panic("Things are really messed up now!!");
316 #endif
317 	}
318 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
319 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
320 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
321 		asoc->highest_tsn_inside_nr_map = tsn;
322 	}
323 	if (tsn == asoc->highest_tsn_inside_map) {
324 		/* We must back down to see what the new highest is */
325 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
326 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
327 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
328 				asoc->highest_tsn_inside_map = i;
329 				fnd = 1;
330 				break;
331 			}
332 		}
333 		if (!fnd) {
334 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
335 		}
336 	}
337 }
338 
339 
340 /*
341  * We are delivering currently from the reassembly queue. We must continue to
342  * deliver until we either: 1) run out of space. 2) run out of sequential
343  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
344  */
345 static void
346 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
347 {
348 	struct sctp_tmit_chunk *chk, *nchk;
349 	uint16_t nxt_todel;
350 	uint16_t stream_no;
351 	int end = 0;
352 	int cntDel;
353 	struct sctp_queued_to_read *control, *ctl, *nctl;
354 
355 	if (stcb == NULL)
356 		return;
357 
358 	cntDel = stream_no = 0;
359 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
360 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
361 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
362 		/* socket above is long gone or going.. */
363 abandon:
364 		asoc->fragmented_delivery_inprogress = 0;
365 		TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
366 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
367 			asoc->size_on_reasm_queue -= chk->send_size;
368 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
369 			/*
370 			 * Lose the data pointer, since its in the socket
371 			 * buffer
372 			 */
373 			if (chk->data) {
374 				sctp_m_freem(chk->data);
375 				chk->data = NULL;
376 			}
377 			/* Now free the address and data */
378 			sctp_free_a_chunk(stcb, chk);
379 			/* sa_ignore FREED_MEMORY */
380 		}
381 		return;
382 	}
383 	SCTP_TCB_LOCK_ASSERT(stcb);
384 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
385 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
386 			/* Can't deliver more :< */
387 			return;
388 		}
389 		stream_no = chk->rec.data.stream_number;
390 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
391 		if (nxt_todel != chk->rec.data.stream_seq &&
392 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
393 			/*
394 			 * Not the next sequence to deliver in its stream OR
395 			 * unordered
396 			 */
397 			return;
398 		}
399 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
400 
401 			control = sctp_build_readq_entry_chk(stcb, chk);
402 			if (control == NULL) {
403 				/* out of memory? */
404 				return;
405 			}
406 			/* save it off for our future deliveries */
407 			stcb->asoc.control_pdapi = control;
408 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
409 				end = 1;
410 			else
411 				end = 0;
412 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
413 			sctp_add_to_readq(stcb->sctp_ep,
414 			    stcb, control, &stcb->sctp_socket->so_rcv, end,
415 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
416 			cntDel++;
417 		} else {
418 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
419 				end = 1;
420 			else
421 				end = 0;
422 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
423 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
424 			    stcb->asoc.control_pdapi,
425 			    chk->data, end, chk->rec.data.TSN_seq,
426 			    &stcb->sctp_socket->so_rcv)) {
427 				/*
428 				 * something is very wrong, either
429 				 * control_pdapi is NULL, or the tail_mbuf
430 				 * is corrupt, or there is a EOM already on
431 				 * the mbuf chain.
432 				 */
433 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
434 					goto abandon;
435 				} else {
436 #ifdef INVARIANTS
437 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
438 						panic("This should not happen control_pdapi NULL?");
439 					}
440 					/* if we did not panic, it was a EOM */
441 					panic("Bad chunking ??");
442 #else
443 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
444 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
445 					}
446 					SCTP_PRINTF("Bad chunking ??\n");
447 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
448 
449 #endif
450 					goto abandon;
451 				}
452 			}
453 			cntDel++;
454 		}
455 		/* pull it we did it */
456 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
457 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
458 			asoc->fragmented_delivery_inprogress = 0;
459 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
460 				asoc->strmin[stream_no].last_sequence_delivered++;
461 			}
462 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
463 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
464 			}
465 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
466 			/*
467 			 * turn the flag back on since we just  delivered
468 			 * yet another one.
469 			 */
470 			asoc->fragmented_delivery_inprogress = 1;
471 		}
472 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
473 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
474 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
475 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
476 
477 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
478 		asoc->size_on_reasm_queue -= chk->send_size;
479 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
480 		/* free up the chk */
481 		chk->data = NULL;
482 		sctp_free_a_chunk(stcb, chk);
483 
484 		if (asoc->fragmented_delivery_inprogress == 0) {
485 			/*
486 			 * Now lets see if we can deliver the next one on
487 			 * the stream
488 			 */
489 			struct sctp_stream_in *strm;
490 
491 			strm = &asoc->strmin[stream_no];
492 			nxt_todel = strm->last_sequence_delivered + 1;
493 			TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
494 				/* Deliver more if we can. */
495 				if (nxt_todel == ctl->sinfo_ssn) {
496 					TAILQ_REMOVE(&strm->inqueue, ctl, next);
497 					asoc->size_on_all_streams -= ctl->length;
498 					sctp_ucount_decr(asoc->cnt_on_all_streams);
499 					strm->last_sequence_delivered++;
500 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
501 					sctp_add_to_readq(stcb->sctp_ep, stcb,
502 					    ctl,
503 					    &stcb->sctp_socket->so_rcv, 1,
504 					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
505 				} else {
506 					break;
507 				}
508 				nxt_todel = strm->last_sequence_delivered + 1;
509 			}
510 			break;
511 		}
512 	}
513 }
514 
515 /*
516  * Queue the chunk either right into the socket buffer if it is the next one
517  * to go OR put it in the correct place in the delivery queue.  If we do
518  * append to the so_buf, keep doing so until we are out of order. One big
519  * question still remains, what to do when the socket buffer is FULL??
520  */
521 static void
522 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
523     struct sctp_queued_to_read *control, int *abort_flag)
524 {
525 	/*
526 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
527 	 * all the data in one stream this could happen quite rapidly. One
528 	 * could use the TSN to keep track of things, but this scheme breaks
529 	 * down in the other type of stream useage that could occur. Send a
530 	 * single msg to stream 0, send 4Billion messages to stream 1, now
531 	 * send a message to stream 0. You have a situation where the TSN
532 	 * has wrapped but not in the stream. Is this worth worrying about
533 	 * or should we just change our queue sort at the bottom to be by
534 	 * TSN.
535 	 *
536 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
537 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
538 	 * assignment this could happen... and I don't see how this would be
539 	 * a violation. So for now I am undecided an will leave the sort by
540 	 * SSN alone. Maybe a hybred approach is the answer
541 	 *
542 	 */
543 	struct sctp_stream_in *strm;
544 	struct sctp_queued_to_read *at;
545 	int queue_needed;
546 	uint16_t nxt_todel;
547 	struct mbuf *oper;
548 
549 	queue_needed = 1;
550 	asoc->size_on_all_streams += control->length;
551 	sctp_ucount_incr(asoc->cnt_on_all_streams);
552 	strm = &asoc->strmin[control->sinfo_stream];
553 	nxt_todel = strm->last_sequence_delivered + 1;
554 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
555 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
556 	}
557 	SCTPDBG(SCTP_DEBUG_INDATA1,
558 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
559 	    (uint32_t) control->sinfo_stream,
560 	    (uint32_t) strm->last_sequence_delivered,
561 	    (uint32_t) nxt_todel);
562 	if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
563 		/* The incoming sseq is behind where we last delivered? */
564 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
565 		    control->sinfo_ssn, strm->last_sequence_delivered);
566 protocol_error:
567 		/*
568 		 * throw it in the stream so it gets cleaned up in
569 		 * association destruction
570 		 */
571 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
572 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
573 		    0, M_DONTWAIT, 1, MT_DATA);
574 		if (oper) {
575 			struct sctp_paramhdr *ph;
576 			uint32_t *ippp;
577 
578 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
579 			    (sizeof(uint32_t) * 3);
580 			ph = mtod(oper, struct sctp_paramhdr *);
581 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
582 			ph->param_length = htons(SCTP_BUF_LEN(oper));
583 			ippp = (uint32_t *) (ph + 1);
584 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
585 			ippp++;
586 			*ippp = control->sinfo_tsn;
587 			ippp++;
588 			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
589 		}
590 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
591 		sctp_abort_an_association(stcb->sctp_ep, stcb,
592 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
593 
594 		*abort_flag = 1;
595 		return;
596 
597 	}
598 	if (nxt_todel == control->sinfo_ssn) {
599 		/* can be delivered right away? */
600 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
601 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
602 		}
603 		/* EY it wont be queued if it could be delivered directly */
604 		queue_needed = 0;
605 		asoc->size_on_all_streams -= control->length;
606 		sctp_ucount_decr(asoc->cnt_on_all_streams);
607 		strm->last_sequence_delivered++;
608 
609 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
610 		sctp_add_to_readq(stcb->sctp_ep, stcb,
611 		    control,
612 		    &stcb->sctp_socket->so_rcv, 1,
613 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
614 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
615 			/* all delivered */
616 			nxt_todel = strm->last_sequence_delivered + 1;
617 			if (nxt_todel == control->sinfo_ssn) {
618 				TAILQ_REMOVE(&strm->inqueue, control, next);
619 				asoc->size_on_all_streams -= control->length;
620 				sctp_ucount_decr(asoc->cnt_on_all_streams);
621 				strm->last_sequence_delivered++;
622 				/*
623 				 * We ignore the return of deliver_data here
624 				 * since we always can hold the chunk on the
625 				 * d-queue. And we have a finite number that
626 				 * can be delivered from the strq.
627 				 */
628 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
629 					sctp_log_strm_del(control, NULL,
630 					    SCTP_STR_LOG_FROM_IMMED_DEL);
631 				}
632 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
633 				sctp_add_to_readq(stcb->sctp_ep, stcb,
634 				    control,
635 				    &stcb->sctp_socket->so_rcv, 1,
636 				    SCTP_READ_LOCK_NOT_HELD,
637 				    SCTP_SO_NOT_LOCKED);
638 				continue;
639 			}
640 			break;
641 		}
642 	}
643 	if (queue_needed) {
644 		/*
645 		 * Ok, we did not deliver this guy, find the correct place
646 		 * to put it on the queue.
647 		 */
648 		if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
649 			goto protocol_error;
650 		}
651 		if (TAILQ_EMPTY(&strm->inqueue)) {
652 			/* Empty queue */
653 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
654 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
655 			}
656 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
657 		} else {
658 			TAILQ_FOREACH(at, &strm->inqueue, next) {
659 				if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
660 					/*
661 					 * one in queue is bigger than the
662 					 * new one, insert before this one
663 					 */
664 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
665 						sctp_log_strm_del(control, at,
666 						    SCTP_STR_LOG_FROM_INSERT_MD);
667 					}
668 					TAILQ_INSERT_BEFORE(at, control, next);
669 					break;
670 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
671 					/*
672 					 * Gak, He sent me a duplicate str
673 					 * seq number
674 					 */
675 					/*
676 					 * foo bar, I guess I will just free
677 					 * this new guy, should we abort
678 					 * too? FIX ME MAYBE? Or it COULD be
679 					 * that the SSN's have wrapped.
680 					 * Maybe I should compare to TSN
681 					 * somehow... sigh for now just blow
682 					 * away the chunk!
683 					 */
684 
685 					if (control->data)
686 						sctp_m_freem(control->data);
687 					control->data = NULL;
688 					asoc->size_on_all_streams -= control->length;
689 					sctp_ucount_decr(asoc->cnt_on_all_streams);
690 					if (control->whoFrom) {
691 						sctp_free_remote_addr(control->whoFrom);
692 						control->whoFrom = NULL;
693 					}
694 					sctp_free_a_readq(stcb, control);
695 					return;
696 				} else {
697 					if (TAILQ_NEXT(at, next) == NULL) {
698 						/*
699 						 * We are at the end, insert
700 						 * it after this one
701 						 */
702 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
703 							sctp_log_strm_del(control, at,
704 							    SCTP_STR_LOG_FROM_INSERT_TL);
705 						}
706 						TAILQ_INSERT_AFTER(&strm->inqueue,
707 						    at, control, next);
708 						break;
709 					}
710 				}
711 			}
712 		}
713 	}
714 }
715 
716 /*
717  * Returns two things: You get the total size of the deliverable parts of the
718  * first fragmented message on the reassembly queue. And you get a 1 back if
719  * all of the message is ready or a 0 back if the message is still incomplete
720  */
721 static int
722 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
723 {
724 	struct sctp_tmit_chunk *chk;
725 	uint32_t tsn;
726 
727 	*t_size = 0;
728 	chk = TAILQ_FIRST(&asoc->reasmqueue);
729 	if (chk == NULL) {
730 		/* nothing on the queue */
731 		return (0);
732 	}
733 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
734 		/* Not a first on the queue */
735 		return (0);
736 	}
737 	tsn = chk->rec.data.TSN_seq;
738 	TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
739 		if (tsn != chk->rec.data.TSN_seq) {
740 			return (0);
741 		}
742 		*t_size += chk->send_size;
743 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
744 			return (1);
745 		}
746 		tsn++;
747 	}
748 	return (0);
749 }
750 
751 static void
752 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
753 {
754 	struct sctp_tmit_chunk *chk;
755 	uint16_t nxt_todel;
756 	uint32_t tsize, pd_point;
757 
758 doit_again:
759 	chk = TAILQ_FIRST(&asoc->reasmqueue);
760 	if (chk == NULL) {
761 		/* Huh? */
762 		asoc->size_on_reasm_queue = 0;
763 		asoc->cnt_on_reasm_queue = 0;
764 		return;
765 	}
766 	if (asoc->fragmented_delivery_inprogress == 0) {
767 		nxt_todel =
768 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
769 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
770 		    (nxt_todel == chk->rec.data.stream_seq ||
771 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
772 			/*
773 			 * Yep the first one is here and its ok to deliver
774 			 * but should we?
775 			 */
776 			if (stcb->sctp_socket) {
777 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
778 				    stcb->sctp_ep->partial_delivery_point);
779 			} else {
780 				pd_point = stcb->sctp_ep->partial_delivery_point;
781 			}
782 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
783 
784 				/*
785 				 * Yes, we setup to start reception, by
786 				 * backing down the TSN just in case we
787 				 * can't deliver. If we
788 				 */
789 				asoc->fragmented_delivery_inprogress = 1;
790 				asoc->tsn_last_delivered =
791 				    chk->rec.data.TSN_seq - 1;
792 				asoc->str_of_pdapi =
793 				    chk->rec.data.stream_number;
794 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
795 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
796 				asoc->fragment_flags = chk->rec.data.rcv_flags;
797 				sctp_service_reassembly(stcb, asoc);
798 			}
799 		}
800 	} else {
801 		/*
802 		 * Service re-assembly will deliver stream data queued at
803 		 * the end of fragmented delivery.. but it wont know to go
804 		 * back and call itself again... we do that here with the
805 		 * got doit_again
806 		 */
807 		sctp_service_reassembly(stcb, asoc);
808 		if (asoc->fragmented_delivery_inprogress == 0) {
809 			/*
810 			 * finished our Fragmented delivery, could be more
811 			 * waiting?
812 			 */
813 			goto doit_again;
814 		}
815 	}
816 }
817 
818 /*
819  * Dump onto the re-assembly queue, in its proper place. After dumping on the
820  * queue, see if anthing can be delivered. If so pull it off (or as much as
821  * we can. If we run out of space then we must dump what we can and set the
822  * appropriate flag to say we queued what we could.
823  */
824 static void
825 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
826     struct sctp_tmit_chunk *chk, int *abort_flag)
827 {
828 	struct mbuf *oper;
829 	uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
830 	u_char last_flags;
831 	struct sctp_tmit_chunk *at, *prev, *next;
832 
833 	prev = next = NULL;
834 	cum_ackp1 = asoc->tsn_last_delivered + 1;
835 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
836 		/* This is the first one on the queue */
837 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
838 		/*
839 		 * we do not check for delivery of anything when only one
840 		 * fragment is here
841 		 */
842 		asoc->size_on_reasm_queue = chk->send_size;
843 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
844 		if (chk->rec.data.TSN_seq == cum_ackp1) {
845 			if (asoc->fragmented_delivery_inprogress == 0 &&
846 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
847 			    SCTP_DATA_FIRST_FRAG) {
848 				/*
849 				 * An empty queue, no delivery inprogress,
850 				 * we hit the next one and it does NOT have
851 				 * a FIRST fragment mark.
852 				 */
853 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
854 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
855 				    0, M_DONTWAIT, 1, MT_DATA);
856 
857 				if (oper) {
858 					struct sctp_paramhdr *ph;
859 					uint32_t *ippp;
860 
861 					SCTP_BUF_LEN(oper) =
862 					    sizeof(struct sctp_paramhdr) +
863 					    (sizeof(uint32_t) * 3);
864 					ph = mtod(oper, struct sctp_paramhdr *);
865 					ph->param_type =
866 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
867 					ph->param_length = htons(SCTP_BUF_LEN(oper));
868 					ippp = (uint32_t *) (ph + 1);
869 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
870 					ippp++;
871 					*ippp = chk->rec.data.TSN_seq;
872 					ippp++;
873 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
874 
875 				}
876 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
877 				sctp_abort_an_association(stcb->sctp_ep, stcb,
878 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
879 				*abort_flag = 1;
880 			} else if (asoc->fragmented_delivery_inprogress &&
881 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
882 				/*
883 				 * We are doing a partial delivery and the
884 				 * NEXT chunk MUST be either the LAST or
885 				 * MIDDLE fragment NOT a FIRST
886 				 */
887 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
888 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
889 				    0, M_DONTWAIT, 1, MT_DATA);
890 				if (oper) {
891 					struct sctp_paramhdr *ph;
892 					uint32_t *ippp;
893 
894 					SCTP_BUF_LEN(oper) =
895 					    sizeof(struct sctp_paramhdr) +
896 					    (3 * sizeof(uint32_t));
897 					ph = mtod(oper, struct sctp_paramhdr *);
898 					ph->param_type =
899 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
900 					ph->param_length = htons(SCTP_BUF_LEN(oper));
901 					ippp = (uint32_t *) (ph + 1);
902 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
903 					ippp++;
904 					*ippp = chk->rec.data.TSN_seq;
905 					ippp++;
906 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
907 				}
908 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
909 				sctp_abort_an_association(stcb->sctp_ep, stcb,
910 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
911 				*abort_flag = 1;
912 			} else if (asoc->fragmented_delivery_inprogress) {
913 				/*
914 				 * Here we are ok with a MIDDLE or LAST
915 				 * piece
916 				 */
917 				if (chk->rec.data.stream_number !=
918 				    asoc->str_of_pdapi) {
919 					/* Got to be the right STR No */
920 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
921 					    chk->rec.data.stream_number,
922 					    asoc->str_of_pdapi);
923 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
924 					    0, M_DONTWAIT, 1, MT_DATA);
925 					if (oper) {
926 						struct sctp_paramhdr *ph;
927 						uint32_t *ippp;
928 
929 						SCTP_BUF_LEN(oper) =
930 						    sizeof(struct sctp_paramhdr) +
931 						    (sizeof(uint32_t) * 3);
932 						ph = mtod(oper,
933 						    struct sctp_paramhdr *);
934 						ph->param_type =
935 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
936 						ph->param_length =
937 						    htons(SCTP_BUF_LEN(oper));
938 						ippp = (uint32_t *) (ph + 1);
939 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
940 						ippp++;
941 						*ippp = chk->rec.data.TSN_seq;
942 						ippp++;
943 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
944 					}
945 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
946 					sctp_abort_an_association(stcb->sctp_ep,
947 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
948 					*abort_flag = 1;
949 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
950 					    SCTP_DATA_UNORDERED &&
951 				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
952 					/* Got to be the right STR Seq */
953 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
954 					    chk->rec.data.stream_seq,
955 					    asoc->ssn_of_pdapi);
956 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
957 					    0, M_DONTWAIT, 1, MT_DATA);
958 					if (oper) {
959 						struct sctp_paramhdr *ph;
960 						uint32_t *ippp;
961 
962 						SCTP_BUF_LEN(oper) =
963 						    sizeof(struct sctp_paramhdr) +
964 						    (3 * sizeof(uint32_t));
965 						ph = mtod(oper,
966 						    struct sctp_paramhdr *);
967 						ph->param_type =
968 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
969 						ph->param_length =
970 						    htons(SCTP_BUF_LEN(oper));
971 						ippp = (uint32_t *) (ph + 1);
972 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
973 						ippp++;
974 						*ippp = chk->rec.data.TSN_seq;
975 						ippp++;
976 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
977 
978 					}
979 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
980 					sctp_abort_an_association(stcb->sctp_ep,
981 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
982 					*abort_flag = 1;
983 				}
984 			}
985 		}
986 		return;
987 	}
988 	/* Find its place */
989 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
990 		if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
991 			/*
992 			 * one in queue is bigger than the new one, insert
993 			 * before this one
994 			 */
995 			/* A check */
996 			asoc->size_on_reasm_queue += chk->send_size;
997 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
998 			next = at;
999 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1000 			break;
1001 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1002 			/* Gak, He sent me a duplicate str seq number */
1003 			/*
1004 			 * foo bar, I guess I will just free this new guy,
1005 			 * should we abort too? FIX ME MAYBE? Or it COULD be
1006 			 * that the SSN's have wrapped. Maybe I should
1007 			 * compare to TSN somehow... sigh for now just blow
1008 			 * away the chunk!
1009 			 */
1010 			if (chk->data) {
1011 				sctp_m_freem(chk->data);
1012 				chk->data = NULL;
1013 			}
1014 			sctp_free_a_chunk(stcb, chk);
1015 			return;
1016 		} else {
1017 			last_flags = at->rec.data.rcv_flags;
1018 			last_tsn = at->rec.data.TSN_seq;
1019 			prev = at;
1020 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1021 				/*
1022 				 * We are at the end, insert it after this
1023 				 * one
1024 				 */
1025 				/* check it first */
1026 				asoc->size_on_reasm_queue += chk->send_size;
1027 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1028 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1029 				break;
1030 			}
1031 		}
1032 	}
1033 	/* Now the audits */
1034 	if (prev) {
1035 		prev_tsn = chk->rec.data.TSN_seq - 1;
1036 		if (prev_tsn == prev->rec.data.TSN_seq) {
1037 			/*
1038 			 * Ok the one I am dropping onto the end is the
1039 			 * NEXT. A bit of valdiation here.
1040 			 */
1041 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1042 			    SCTP_DATA_FIRST_FRAG ||
1043 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1044 			    SCTP_DATA_MIDDLE_FRAG) {
1045 				/*
1046 				 * Insert chk MUST be a MIDDLE or LAST
1047 				 * fragment
1048 				 */
1049 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1050 				    SCTP_DATA_FIRST_FRAG) {
1051 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1052 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1053 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1054 					    0, M_DONTWAIT, 1, MT_DATA);
1055 					if (oper) {
1056 						struct sctp_paramhdr *ph;
1057 						uint32_t *ippp;
1058 
1059 						SCTP_BUF_LEN(oper) =
1060 						    sizeof(struct sctp_paramhdr) +
1061 						    (3 * sizeof(uint32_t));
1062 						ph = mtod(oper,
1063 						    struct sctp_paramhdr *);
1064 						ph->param_type =
1065 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1066 						ph->param_length =
1067 						    htons(SCTP_BUF_LEN(oper));
1068 						ippp = (uint32_t *) (ph + 1);
1069 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1070 						ippp++;
1071 						*ippp = chk->rec.data.TSN_seq;
1072 						ippp++;
1073 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1074 
1075 					}
1076 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1077 					sctp_abort_an_association(stcb->sctp_ep,
1078 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1079 					*abort_flag = 1;
1080 					return;
1081 				}
1082 				if (chk->rec.data.stream_number !=
1083 				    prev->rec.data.stream_number) {
1084 					/*
1085 					 * Huh, need the correct STR here,
1086 					 * they must be the same.
1087 					 */
1088 					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1089 					    chk->rec.data.stream_number,
1090 					    prev->rec.data.stream_number);
1091 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1092 					    0, M_DONTWAIT, 1, MT_DATA);
1093 					if (oper) {
1094 						struct sctp_paramhdr *ph;
1095 						uint32_t *ippp;
1096 
1097 						SCTP_BUF_LEN(oper) =
1098 						    sizeof(struct sctp_paramhdr) +
1099 						    (3 * sizeof(uint32_t));
1100 						ph = mtod(oper,
1101 						    struct sctp_paramhdr *);
1102 						ph->param_type =
1103 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1104 						ph->param_length =
1105 						    htons(SCTP_BUF_LEN(oper));
1106 						ippp = (uint32_t *) (ph + 1);
1107 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1108 						ippp++;
1109 						*ippp = chk->rec.data.TSN_seq;
1110 						ippp++;
1111 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1112 					}
1113 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1114 					sctp_abort_an_association(stcb->sctp_ep,
1115 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1116 
1117 					*abort_flag = 1;
1118 					return;
1119 				}
1120 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1121 				    chk->rec.data.stream_seq !=
1122 				    prev->rec.data.stream_seq) {
1123 					/*
1124 					 * Huh, need the correct STR here,
1125 					 * they must be the same.
1126 					 */
1127 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1128 					    chk->rec.data.stream_seq,
1129 					    prev->rec.data.stream_seq);
1130 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1131 					    0, M_DONTWAIT, 1, MT_DATA);
1132 					if (oper) {
1133 						struct sctp_paramhdr *ph;
1134 						uint32_t *ippp;
1135 
1136 						SCTP_BUF_LEN(oper) =
1137 						    sizeof(struct sctp_paramhdr) +
1138 						    (3 * sizeof(uint32_t));
1139 						ph = mtod(oper,
1140 						    struct sctp_paramhdr *);
1141 						ph->param_type =
1142 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1143 						ph->param_length =
1144 						    htons(SCTP_BUF_LEN(oper));
1145 						ippp = (uint32_t *) (ph + 1);
1146 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1147 						ippp++;
1148 						*ippp = chk->rec.data.TSN_seq;
1149 						ippp++;
1150 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1151 					}
1152 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1153 					sctp_abort_an_association(stcb->sctp_ep,
1154 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1155 
1156 					*abort_flag = 1;
1157 					return;
1158 				}
1159 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1160 			    SCTP_DATA_LAST_FRAG) {
1161 				/* Insert chk MUST be a FIRST */
1162 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1163 				    SCTP_DATA_FIRST_FRAG) {
1164 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1165 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1166 					    0, M_DONTWAIT, 1, MT_DATA);
1167 					if (oper) {
1168 						struct sctp_paramhdr *ph;
1169 						uint32_t *ippp;
1170 
1171 						SCTP_BUF_LEN(oper) =
1172 						    sizeof(struct sctp_paramhdr) +
1173 						    (3 * sizeof(uint32_t));
1174 						ph = mtod(oper,
1175 						    struct sctp_paramhdr *);
1176 						ph->param_type =
1177 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1178 						ph->param_length =
1179 						    htons(SCTP_BUF_LEN(oper));
1180 						ippp = (uint32_t *) (ph + 1);
1181 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1182 						ippp++;
1183 						*ippp = chk->rec.data.TSN_seq;
1184 						ippp++;
1185 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1186 
1187 					}
1188 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1189 					sctp_abort_an_association(stcb->sctp_ep,
1190 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1191 
1192 					*abort_flag = 1;
1193 					return;
1194 				}
1195 			}
1196 		}
1197 	}
1198 	if (next) {
1199 		post_tsn = chk->rec.data.TSN_seq + 1;
1200 		if (post_tsn == next->rec.data.TSN_seq) {
1201 			/*
1202 			 * Ok the one I am inserting ahead of is my NEXT
1203 			 * one. A bit of valdiation here.
1204 			 */
1205 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1206 				/* Insert chk MUST be a last fragment */
1207 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1208 				    != SCTP_DATA_LAST_FRAG) {
1209 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1210 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1211 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1212 					    0, M_DONTWAIT, 1, MT_DATA);
1213 					if (oper) {
1214 						struct sctp_paramhdr *ph;
1215 						uint32_t *ippp;
1216 
1217 						SCTP_BUF_LEN(oper) =
1218 						    sizeof(struct sctp_paramhdr) +
1219 						    (3 * sizeof(uint32_t));
1220 						ph = mtod(oper,
1221 						    struct sctp_paramhdr *);
1222 						ph->param_type =
1223 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1224 						ph->param_length =
1225 						    htons(SCTP_BUF_LEN(oper));
1226 						ippp = (uint32_t *) (ph + 1);
1227 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1228 						ippp++;
1229 						*ippp = chk->rec.data.TSN_seq;
1230 						ippp++;
1231 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1232 					}
1233 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1234 					sctp_abort_an_association(stcb->sctp_ep,
1235 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1236 
1237 					*abort_flag = 1;
1238 					return;
1239 				}
1240 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1241 				    SCTP_DATA_MIDDLE_FRAG ||
1242 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1243 			    SCTP_DATA_LAST_FRAG) {
1244 				/*
1245 				 * Insert chk CAN be MIDDLE or FIRST NOT
1246 				 * LAST
1247 				 */
1248 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1249 				    SCTP_DATA_LAST_FRAG) {
1250 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1251 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1252 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1253 					    0, M_DONTWAIT, 1, MT_DATA);
1254 					if (oper) {
1255 						struct sctp_paramhdr *ph;
1256 						uint32_t *ippp;
1257 
1258 						SCTP_BUF_LEN(oper) =
1259 						    sizeof(struct sctp_paramhdr) +
1260 						    (3 * sizeof(uint32_t));
1261 						ph = mtod(oper,
1262 						    struct sctp_paramhdr *);
1263 						ph->param_type =
1264 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1265 						ph->param_length =
1266 						    htons(SCTP_BUF_LEN(oper));
1267 						ippp = (uint32_t *) (ph + 1);
1268 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1269 						ippp++;
1270 						*ippp = chk->rec.data.TSN_seq;
1271 						ippp++;
1272 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1273 
1274 					}
1275 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1276 					sctp_abort_an_association(stcb->sctp_ep,
1277 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1278 
1279 					*abort_flag = 1;
1280 					return;
1281 				}
1282 				if (chk->rec.data.stream_number !=
1283 				    next->rec.data.stream_number) {
1284 					/*
1285 					 * Huh, need the correct STR here,
1286 					 * they must be the same.
1287 					 */
1288 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1289 					    chk->rec.data.stream_number,
1290 					    next->rec.data.stream_number);
1291 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1292 					    0, M_DONTWAIT, 1, MT_DATA);
1293 					if (oper) {
1294 						struct sctp_paramhdr *ph;
1295 						uint32_t *ippp;
1296 
1297 						SCTP_BUF_LEN(oper) =
1298 						    sizeof(struct sctp_paramhdr) +
1299 						    (3 * sizeof(uint32_t));
1300 						ph = mtod(oper,
1301 						    struct sctp_paramhdr *);
1302 						ph->param_type =
1303 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1304 						ph->param_length =
1305 						    htons(SCTP_BUF_LEN(oper));
1306 						ippp = (uint32_t *) (ph + 1);
1307 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1308 						ippp++;
1309 						*ippp = chk->rec.data.TSN_seq;
1310 						ippp++;
1311 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1312 
1313 					}
1314 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1315 					sctp_abort_an_association(stcb->sctp_ep,
1316 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1317 
1318 					*abort_flag = 1;
1319 					return;
1320 				}
1321 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1322 				    chk->rec.data.stream_seq !=
1323 				    next->rec.data.stream_seq) {
1324 					/*
1325 					 * Huh, need the correct STR here,
1326 					 * they must be the same.
1327 					 */
1328 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1329 					    chk->rec.data.stream_seq,
1330 					    next->rec.data.stream_seq);
1331 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1332 					    0, M_DONTWAIT, 1, MT_DATA);
1333 					if (oper) {
1334 						struct sctp_paramhdr *ph;
1335 						uint32_t *ippp;
1336 
1337 						SCTP_BUF_LEN(oper) =
1338 						    sizeof(struct sctp_paramhdr) +
1339 						    (3 * sizeof(uint32_t));
1340 						ph = mtod(oper,
1341 						    struct sctp_paramhdr *);
1342 						ph->param_type =
1343 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1344 						ph->param_length =
1345 						    htons(SCTP_BUF_LEN(oper));
1346 						ippp = (uint32_t *) (ph + 1);
1347 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1348 						ippp++;
1349 						*ippp = chk->rec.data.TSN_seq;
1350 						ippp++;
1351 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1352 					}
1353 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1354 					sctp_abort_an_association(stcb->sctp_ep,
1355 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1356 
1357 					*abort_flag = 1;
1358 					return;
1359 				}
1360 			}
1361 		}
1362 	}
1363 	/* Do we need to do some delivery? check */
1364 	sctp_deliver_reasm_check(stcb, asoc);
1365 }
1366 
1367 /*
1368  * This is an unfortunate routine. It checks to make sure a evil guy is not
1369  * stuffing us full of bad packet fragments. A broken peer could also do this
1370  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1371  * :< more cycles.
1372  */
1373 static int
1374 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1375     uint32_t TSN_seq)
1376 {
1377 	struct sctp_tmit_chunk *at;
1378 	uint32_t tsn_est;
1379 
1380 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1381 		if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1382 			/* is it one bigger? */
1383 			tsn_est = at->rec.data.TSN_seq + 1;
1384 			if (tsn_est == TSN_seq) {
1385 				/* yep. It better be a last then */
1386 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1387 				    SCTP_DATA_LAST_FRAG) {
1388 					/*
1389 					 * Ok this guy belongs next to a guy
1390 					 * that is NOT last, it should be a
1391 					 * middle/last, not a complete
1392 					 * chunk.
1393 					 */
1394 					return (1);
1395 				} else {
1396 					/*
1397 					 * This guy is ok since its a LAST
1398 					 * and the new chunk is a fully
1399 					 * self- contained one.
1400 					 */
1401 					return (0);
1402 				}
1403 			}
1404 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1405 			/* Software error since I have a dup? */
1406 			return (1);
1407 		} else {
1408 			/*
1409 			 * Ok, 'at' is larger than new chunk but does it
1410 			 * need to be right before it.
1411 			 */
1412 			tsn_est = TSN_seq + 1;
1413 			if (tsn_est == at->rec.data.TSN_seq) {
1414 				/* Yep, It better be a first */
1415 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1416 				    SCTP_DATA_FIRST_FRAG) {
1417 					return (1);
1418 				} else {
1419 					return (0);
1420 				}
1421 			}
1422 		}
1423 	}
1424 	return (0);
1425 }
1426 
1427 
1428 static int
1429 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1430     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1431     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1432     int *break_flag, int last_chunk)
1433 {
1434 	/* Process a data chunk */
1435 	/* struct sctp_tmit_chunk *chk; */
1436 	struct sctp_tmit_chunk *chk;
1437 	uint32_t tsn, gap;
1438 	struct mbuf *dmbuf;
1439 	int indx, the_len;
1440 	int need_reasm_check = 0;
1441 	uint16_t strmno, strmseq;
1442 	struct mbuf *oper;
1443 	struct sctp_queued_to_read *control;
1444 	int ordered;
1445 	uint32_t protocol_id;
1446 	uint8_t chunk_flags;
1447 	struct sctp_stream_reset_list *liste;
1448 
1449 	chk = NULL;
1450 	tsn = ntohl(ch->dp.tsn);
1451 	chunk_flags = ch->ch.chunk_flags;
1452 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1453 		asoc->send_sack = 1;
1454 	}
1455 	protocol_id = ch->dp.protocol_id;
1456 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1457 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1458 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1459 	}
1460 	if (stcb == NULL) {
1461 		return (0);
1462 	}
1463 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1464 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1465 		/* It is a duplicate */
1466 		SCTP_STAT_INCR(sctps_recvdupdata);
1467 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1468 			/* Record a dup for the next outbound sack */
1469 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1470 			asoc->numduptsns++;
1471 		}
1472 		asoc->send_sack = 1;
1473 		return (0);
1474 	}
1475 	/* Calculate the number of TSN's between the base and this TSN */
1476 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1477 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1478 		/* Can't hold the bit in the mapping at max array, toss it */
1479 		return (0);
1480 	}
1481 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1482 		SCTP_TCB_LOCK_ASSERT(stcb);
1483 		if (sctp_expand_mapping_array(asoc, gap)) {
1484 			/* Can't expand, drop it */
1485 			return (0);
1486 		}
1487 	}
1488 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1489 		*high_tsn = tsn;
1490 	}
1491 	/* See if we have received this one already */
1492 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1493 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1494 		SCTP_STAT_INCR(sctps_recvdupdata);
1495 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1496 			/* Record a dup for the next outbound sack */
1497 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1498 			asoc->numduptsns++;
1499 		}
1500 		asoc->send_sack = 1;
1501 		return (0);
1502 	}
1503 	/*
1504 	 * Check to see about the GONE flag, duplicates would cause a sack
1505 	 * to be sent up above
1506 	 */
1507 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1508 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1509 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1510 	    ) {
1511 		/*
1512 		 * wait a minute, this guy is gone, there is no longer a
1513 		 * receiver. Send peer an ABORT!
1514 		 */
1515 		struct mbuf *op_err;
1516 
1517 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1518 		sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1519 		*abort_flag = 1;
1520 		return (0);
1521 	}
1522 	/*
1523 	 * Now before going further we see if there is room. If NOT then we
1524 	 * MAY let one through only IF this TSN is the one we are waiting
1525 	 * for on a partial delivery API.
1526 	 */
1527 
1528 	/* now do the tests */
1529 	if (((asoc->cnt_on_all_streams +
1530 	    asoc->cnt_on_reasm_queue +
1531 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1532 	    (((int)asoc->my_rwnd) <= 0)) {
1533 		/*
1534 		 * When we have NO room in the rwnd we check to make sure
1535 		 * the reader is doing its job...
1536 		 */
1537 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1538 			/* some to read, wake-up */
1539 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1540 			struct socket *so;
1541 
1542 			so = SCTP_INP_SO(stcb->sctp_ep);
1543 			atomic_add_int(&stcb->asoc.refcnt, 1);
1544 			SCTP_TCB_UNLOCK(stcb);
1545 			SCTP_SOCKET_LOCK(so, 1);
1546 			SCTP_TCB_LOCK(stcb);
1547 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1548 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1549 				/* assoc was freed while we were unlocked */
1550 				SCTP_SOCKET_UNLOCK(so, 1);
1551 				return (0);
1552 			}
1553 #endif
1554 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1555 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1556 			SCTP_SOCKET_UNLOCK(so, 1);
1557 #endif
1558 		}
1559 		/* now is it in the mapping array of what we have accepted? */
1560 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1561 		    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1562 			/* Nope not in the valid range dump it */
1563 			sctp_set_rwnd(stcb, asoc);
1564 			if ((asoc->cnt_on_all_streams +
1565 			    asoc->cnt_on_reasm_queue +
1566 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1567 				SCTP_STAT_INCR(sctps_datadropchklmt);
1568 			} else {
1569 				SCTP_STAT_INCR(sctps_datadroprwnd);
1570 			}
1571 			indx = *break_flag;
1572 			*break_flag = 1;
1573 			return (0);
1574 		}
1575 	}
1576 	strmno = ntohs(ch->dp.stream_id);
1577 	if (strmno >= asoc->streamincnt) {
1578 		struct sctp_paramhdr *phdr;
1579 		struct mbuf *mb;
1580 
1581 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1582 		    0, M_DONTWAIT, 1, MT_DATA);
1583 		if (mb != NULL) {
1584 			/* add some space up front so prepend will work well */
1585 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1586 			phdr = mtod(mb, struct sctp_paramhdr *);
1587 			/*
1588 			 * Error causes are just param's and this one has
1589 			 * two back to back phdr, one with the error type
1590 			 * and size, the other with the streamid and a rsvd
1591 			 */
1592 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1593 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1594 			phdr->param_length =
1595 			    htons(sizeof(struct sctp_paramhdr) * 2);
1596 			phdr++;
1597 			/* We insert the stream in the type field */
1598 			phdr->param_type = ch->dp.stream_id;
1599 			/* And set the length to 0 for the rsvd field */
1600 			phdr->param_length = 0;
1601 			sctp_queue_op_err(stcb, mb);
1602 		}
1603 		SCTP_STAT_INCR(sctps_badsid);
1604 		SCTP_TCB_LOCK_ASSERT(stcb);
1605 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1606 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1607 			asoc->highest_tsn_inside_nr_map = tsn;
1608 		}
1609 		if (tsn == (asoc->cumulative_tsn + 1)) {
1610 			/* Update cum-ack */
1611 			asoc->cumulative_tsn = tsn;
1612 		}
1613 		return (0);
1614 	}
1615 	/*
1616 	 * Before we continue lets validate that we are not being fooled by
1617 	 * an evil attacker. We can only have 4k chunks based on our TSN
1618 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1619 	 * way our stream sequence numbers could have wrapped. We of course
1620 	 * only validate the FIRST fragment so the bit must be set.
1621 	 */
1622 	strmseq = ntohs(ch->dp.stream_sequence);
1623 #ifdef SCTP_ASOCLOG_OF_TSNS
1624 	SCTP_TCB_LOCK_ASSERT(stcb);
1625 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1626 		asoc->tsn_in_at = 0;
1627 		asoc->tsn_in_wrapped = 1;
1628 	}
1629 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1630 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1631 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1632 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1633 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1634 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1635 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1636 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1637 	asoc->tsn_in_at++;
1638 #endif
1639 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1640 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1641 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1642 	    SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1643 		/* The incoming sseq is behind where we last delivered? */
1644 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1645 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1646 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1647 		    0, M_DONTWAIT, 1, MT_DATA);
1648 		if (oper) {
1649 			struct sctp_paramhdr *ph;
1650 			uint32_t *ippp;
1651 
1652 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1653 			    (3 * sizeof(uint32_t));
1654 			ph = mtod(oper, struct sctp_paramhdr *);
1655 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1656 			ph->param_length = htons(SCTP_BUF_LEN(oper));
1657 			ippp = (uint32_t *) (ph + 1);
1658 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1659 			ippp++;
1660 			*ippp = tsn;
1661 			ippp++;
1662 			*ippp = ((strmno << 16) | strmseq);
1663 
1664 		}
1665 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1666 		sctp_abort_an_association(stcb->sctp_ep, stcb,
1667 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1668 		*abort_flag = 1;
1669 		return (0);
1670 	}
1671 	/************************************
1672 	 * From here down we may find ch-> invalid
1673 	 * so its a good idea NOT to use it.
1674 	 *************************************/
1675 
1676 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1677 	if (last_chunk == 0) {
1678 		dmbuf = SCTP_M_COPYM(*m,
1679 		    (offset + sizeof(struct sctp_data_chunk)),
1680 		    the_len, M_DONTWAIT);
1681 #ifdef SCTP_MBUF_LOGGING
1682 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1683 			struct mbuf *mat;
1684 
1685 			mat = dmbuf;
1686 			while (mat) {
1687 				if (SCTP_BUF_IS_EXTENDED(mat)) {
1688 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1689 				}
1690 				mat = SCTP_BUF_NEXT(mat);
1691 			}
1692 		}
1693 #endif
1694 	} else {
1695 		/* We can steal the last chunk */
1696 		int l_len;
1697 
1698 		dmbuf = *m;
1699 		/* lop off the top part */
1700 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1701 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1702 			l_len = SCTP_BUF_LEN(dmbuf);
1703 		} else {
1704 			/*
1705 			 * need to count up the size hopefully does not hit
1706 			 * this to often :-0
1707 			 */
1708 			struct mbuf *lat;
1709 
1710 			l_len = 0;
1711 			lat = dmbuf;
1712 			while (lat) {
1713 				l_len += SCTP_BUF_LEN(lat);
1714 				lat = SCTP_BUF_NEXT(lat);
1715 			}
1716 		}
1717 		if (l_len > the_len) {
1718 			/* Trim the end round bytes off  too */
1719 			m_adj(dmbuf, -(l_len - the_len));
1720 		}
1721 	}
1722 	if (dmbuf == NULL) {
1723 		SCTP_STAT_INCR(sctps_nomem);
1724 		return (0);
1725 	}
1726 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1727 	    asoc->fragmented_delivery_inprogress == 0 &&
1728 	    TAILQ_EMPTY(&asoc->resetHead) &&
1729 	    ((ordered == 0) ||
1730 	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1731 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1732 		/* Candidate for express delivery */
1733 		/*
1734 		 * Its not fragmented, No PD-API is up, Nothing in the
1735 		 * delivery queue, Its un-ordered OR ordered and the next to
1736 		 * deliver AND nothing else is stuck on the stream queue,
1737 		 * And there is room for it in the socket buffer. Lets just
1738 		 * stuff it up the buffer....
1739 		 */
1740 
1741 		/* It would be nice to avoid this copy if we could :< */
1742 		sctp_alloc_a_readq(stcb, control);
1743 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1744 		    protocol_id,
1745 		    stcb->asoc.context,
1746 		    strmno, strmseq,
1747 		    chunk_flags,
1748 		    dmbuf);
1749 		if (control == NULL) {
1750 			goto failed_express_del;
1751 		}
1752 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1753 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1754 			asoc->highest_tsn_inside_nr_map = tsn;
1755 		}
1756 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1757 		    control, &stcb->sctp_socket->so_rcv,
1758 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1759 
1760 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1761 			/* for ordered, bump what we delivered */
1762 			asoc->strmin[strmno].last_sequence_delivered++;
1763 		}
1764 		SCTP_STAT_INCR(sctps_recvexpress);
1765 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1766 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1767 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1768 		}
1769 		control = NULL;
1770 
1771 		goto finish_express_del;
1772 	}
1773 failed_express_del:
1774 	/* If we reach here this is a new chunk */
1775 	chk = NULL;
1776 	control = NULL;
1777 	/* Express for fragmented delivery? */
1778 	if ((asoc->fragmented_delivery_inprogress) &&
1779 	    (stcb->asoc.control_pdapi) &&
1780 	    (asoc->str_of_pdapi == strmno) &&
1781 	    (asoc->ssn_of_pdapi == strmseq)
1782 	    ) {
1783 		control = stcb->asoc.control_pdapi;
1784 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1785 			/* Can't be another first? */
1786 			goto failed_pdapi_express_del;
1787 		}
1788 		if (tsn == (control->sinfo_tsn + 1)) {
1789 			/* Yep, we can add it on */
1790 			int end = 0;
1791 			uint32_t cumack;
1792 
1793 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1794 				end = 1;
1795 			}
1796 			cumack = asoc->cumulative_tsn;
1797 			if ((cumack + 1) == tsn)
1798 				cumack = tsn;
1799 
1800 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1801 			    tsn,
1802 			    &stcb->sctp_socket->so_rcv)) {
1803 				SCTP_PRINTF("Append fails end:%d\n", end);
1804 				goto failed_pdapi_express_del;
1805 			}
1806 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1807 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1808 				asoc->highest_tsn_inside_nr_map = tsn;
1809 			}
1810 			SCTP_STAT_INCR(sctps_recvexpressm);
1811 			control->sinfo_tsn = tsn;
1812 			asoc->tsn_last_delivered = tsn;
1813 			asoc->fragment_flags = chunk_flags;
1814 			asoc->tsn_of_pdapi_last_delivered = tsn;
1815 			asoc->last_flags_delivered = chunk_flags;
1816 			asoc->last_strm_seq_delivered = strmseq;
1817 			asoc->last_strm_no_delivered = strmno;
1818 			if (end) {
1819 				/* clean up the flags and such */
1820 				asoc->fragmented_delivery_inprogress = 0;
1821 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1822 					asoc->strmin[strmno].last_sequence_delivered++;
1823 				}
1824 				stcb->asoc.control_pdapi = NULL;
1825 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1826 					/*
1827 					 * There could be another message
1828 					 * ready
1829 					 */
1830 					need_reasm_check = 1;
1831 				}
1832 			}
1833 			control = NULL;
1834 			goto finish_express_del;
1835 		}
1836 	}
1837 failed_pdapi_express_del:
1838 	control = NULL;
1839 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1840 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1841 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1842 			asoc->highest_tsn_inside_nr_map = tsn;
1843 		}
1844 	} else {
1845 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1846 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1847 			asoc->highest_tsn_inside_map = tsn;
1848 		}
1849 	}
1850 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1851 		sctp_alloc_a_chunk(stcb, chk);
1852 		if (chk == NULL) {
1853 			/* No memory so we drop the chunk */
1854 			SCTP_STAT_INCR(sctps_nomem);
1855 			if (last_chunk == 0) {
1856 				/* we copied it, free the copy */
1857 				sctp_m_freem(dmbuf);
1858 			}
1859 			return (0);
1860 		}
1861 		chk->rec.data.TSN_seq = tsn;
1862 		chk->no_fr_allowed = 0;
1863 		chk->rec.data.stream_seq = strmseq;
1864 		chk->rec.data.stream_number = strmno;
1865 		chk->rec.data.payloadtype = protocol_id;
1866 		chk->rec.data.context = stcb->asoc.context;
1867 		chk->rec.data.doing_fast_retransmit = 0;
1868 		chk->rec.data.rcv_flags = chunk_flags;
1869 		chk->asoc = asoc;
1870 		chk->send_size = the_len;
1871 		chk->whoTo = net;
1872 		atomic_add_int(&net->ref_count, 1);
1873 		chk->data = dmbuf;
1874 	} else {
1875 		sctp_alloc_a_readq(stcb, control);
1876 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1877 		    protocol_id,
1878 		    stcb->asoc.context,
1879 		    strmno, strmseq,
1880 		    chunk_flags,
1881 		    dmbuf);
1882 		if (control == NULL) {
1883 			/* No memory so we drop the chunk */
1884 			SCTP_STAT_INCR(sctps_nomem);
1885 			if (last_chunk == 0) {
1886 				/* we copied it, free the copy */
1887 				sctp_m_freem(dmbuf);
1888 			}
1889 			return (0);
1890 		}
1891 		control->length = the_len;
1892 	}
1893 
1894 	/* Mark it as received */
1895 	/* Now queue it where it belongs */
1896 	if (control != NULL) {
1897 		/* First a sanity check */
1898 		if (asoc->fragmented_delivery_inprogress) {
1899 			/*
1900 			 * Ok, we have a fragmented delivery in progress if
1901 			 * this chunk is next to deliver OR belongs in our
1902 			 * view to the reassembly, the peer is evil or
1903 			 * broken.
1904 			 */
1905 			uint32_t estimate_tsn;
1906 
1907 			estimate_tsn = asoc->tsn_last_delivered + 1;
1908 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1909 			    (estimate_tsn == control->sinfo_tsn)) {
1910 				/* Evil/Broke peer */
1911 				sctp_m_freem(control->data);
1912 				control->data = NULL;
1913 				if (control->whoFrom) {
1914 					sctp_free_remote_addr(control->whoFrom);
1915 					control->whoFrom = NULL;
1916 				}
1917 				sctp_free_a_readq(stcb, control);
1918 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1919 				    0, M_DONTWAIT, 1, MT_DATA);
1920 				if (oper) {
1921 					struct sctp_paramhdr *ph;
1922 					uint32_t *ippp;
1923 
1924 					SCTP_BUF_LEN(oper) =
1925 					    sizeof(struct sctp_paramhdr) +
1926 					    (3 * sizeof(uint32_t));
1927 					ph = mtod(oper, struct sctp_paramhdr *);
1928 					ph->param_type =
1929 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1930 					ph->param_length = htons(SCTP_BUF_LEN(oper));
1931 					ippp = (uint32_t *) (ph + 1);
1932 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1933 					ippp++;
1934 					*ippp = tsn;
1935 					ippp++;
1936 					*ippp = ((strmno << 16) | strmseq);
1937 				}
1938 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1939 				sctp_abort_an_association(stcb->sctp_ep, stcb,
1940 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1941 
1942 				*abort_flag = 1;
1943 				return (0);
1944 			} else {
1945 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1946 					sctp_m_freem(control->data);
1947 					control->data = NULL;
1948 					if (control->whoFrom) {
1949 						sctp_free_remote_addr(control->whoFrom);
1950 						control->whoFrom = NULL;
1951 					}
1952 					sctp_free_a_readq(stcb, control);
1953 
1954 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1955 					    0, M_DONTWAIT, 1, MT_DATA);
1956 					if (oper) {
1957 						struct sctp_paramhdr *ph;
1958 						uint32_t *ippp;
1959 
1960 						SCTP_BUF_LEN(oper) =
1961 						    sizeof(struct sctp_paramhdr) +
1962 						    (3 * sizeof(uint32_t));
1963 						ph = mtod(oper,
1964 						    struct sctp_paramhdr *);
1965 						ph->param_type =
1966 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1967 						ph->param_length =
1968 						    htons(SCTP_BUF_LEN(oper));
1969 						ippp = (uint32_t *) (ph + 1);
1970 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1971 						ippp++;
1972 						*ippp = tsn;
1973 						ippp++;
1974 						*ippp = ((strmno << 16) | strmseq);
1975 					}
1976 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1977 					sctp_abort_an_association(stcb->sctp_ep,
1978 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1979 
1980 					*abort_flag = 1;
1981 					return (0);
1982 				}
1983 			}
1984 		} else {
1985 			/* No PDAPI running */
1986 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1987 				/*
1988 				 * Reassembly queue is NOT empty validate
1989 				 * that this tsn does not need to be in
1990 				 * reasembly queue. If it does then our peer
1991 				 * is broken or evil.
1992 				 */
1993 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1994 					sctp_m_freem(control->data);
1995 					control->data = NULL;
1996 					if (control->whoFrom) {
1997 						sctp_free_remote_addr(control->whoFrom);
1998 						control->whoFrom = NULL;
1999 					}
2000 					sctp_free_a_readq(stcb, control);
2001 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2002 					    0, M_DONTWAIT, 1, MT_DATA);
2003 					if (oper) {
2004 						struct sctp_paramhdr *ph;
2005 						uint32_t *ippp;
2006 
2007 						SCTP_BUF_LEN(oper) =
2008 						    sizeof(struct sctp_paramhdr) +
2009 						    (3 * sizeof(uint32_t));
2010 						ph = mtod(oper,
2011 						    struct sctp_paramhdr *);
2012 						ph->param_type =
2013 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2014 						ph->param_length =
2015 						    htons(SCTP_BUF_LEN(oper));
2016 						ippp = (uint32_t *) (ph + 1);
2017 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2018 						ippp++;
2019 						*ippp = tsn;
2020 						ippp++;
2021 						*ippp = ((strmno << 16) | strmseq);
2022 					}
2023 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2024 					sctp_abort_an_association(stcb->sctp_ep,
2025 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2026 
2027 					*abort_flag = 1;
2028 					return (0);
2029 				}
2030 			}
2031 		}
2032 		/* ok, if we reach here we have passed the sanity checks */
2033 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2034 			/* queue directly into socket buffer */
2035 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2036 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2037 			    control,
2038 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2039 		} else {
2040 			/*
2041 			 * Special check for when streams are resetting. We
2042 			 * could be more smart about this and check the
2043 			 * actual stream to see if it is not being reset..
2044 			 * that way we would not create a HOLB when amongst
2045 			 * streams being reset and those not being reset.
2046 			 *
2047 			 * We take complete messages that have a stream reset
2048 			 * intervening (aka the TSN is after where our
2049 			 * cum-ack needs to be) off and put them on a
2050 			 * pending_reply_queue. The reassembly ones we do
2051 			 * not have to worry about since they are all sorted
2052 			 * and proceessed by TSN order. It is only the
2053 			 * singletons I must worry about.
2054 			 */
2055 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2056 			    SCTP_TSN_GT(tsn, liste->tsn)) {
2057 				/*
2058 				 * yep its past where we need to reset... go
2059 				 * ahead and queue it.
2060 				 */
2061 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2062 					/* first one on */
2063 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2064 				} else {
2065 					struct sctp_queued_to_read *ctlOn,
2066 					                   *nctlOn;
2067 					unsigned char inserted = 0;
2068 
2069 					TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2070 						if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2071 							continue;
2072 						} else {
2073 							/* found it */
2074 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2075 							inserted = 1;
2076 							break;
2077 						}
2078 					}
2079 					if (inserted == 0) {
2080 						/*
2081 						 * must be put at end, use
2082 						 * prevP (all setup from
2083 						 * loop) to setup nextP.
2084 						 */
2085 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2086 					}
2087 				}
2088 			} else {
2089 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2090 				if (*abort_flag) {
2091 					return (0);
2092 				}
2093 			}
2094 		}
2095 	} else {
2096 		/* Into the re-assembly queue */
2097 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2098 		if (*abort_flag) {
2099 			/*
2100 			 * the assoc is now gone and chk was put onto the
2101 			 * reasm queue, which has all been freed.
2102 			 */
2103 			*m = NULL;
2104 			return (0);
2105 		}
2106 	}
2107 finish_express_del:
2108 	if (tsn == (asoc->cumulative_tsn + 1)) {
2109 		/* Update cum-ack */
2110 		asoc->cumulative_tsn = tsn;
2111 	}
2112 	if (last_chunk) {
2113 		*m = NULL;
2114 	}
2115 	if (ordered) {
2116 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2117 	} else {
2118 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2119 	}
2120 	SCTP_STAT_INCR(sctps_recvdata);
2121 	/* Set it present please */
2122 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2123 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2124 	}
2125 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2126 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2127 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2128 	}
2129 	/* check the special flag for stream resets */
2130 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2131 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2132 		/*
2133 		 * we have finished working through the backlogged TSN's now
2134 		 * time to reset streams. 1: call reset function. 2: free
2135 		 * pending_reply space 3: distribute any chunks in
2136 		 * pending_reply_queue.
2137 		 */
2138 		struct sctp_queued_to_read *ctl, *nctl;
2139 
2140 		sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2141 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2142 		SCTP_FREE(liste, SCTP_M_STRESET);
2143 		/* sa_ignore FREED_MEMORY */
2144 		liste = TAILQ_FIRST(&asoc->resetHead);
2145 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2146 			/* All can be removed */
2147 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2148 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2149 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2150 				if (*abort_flag) {
2151 					return (0);
2152 				}
2153 			}
2154 		} else {
2155 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2156 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2157 					break;
2158 				}
2159 				/*
2160 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2161 				 * process it which is the NOT of
2162 				 * ctl->sinfo_tsn > liste->tsn
2163 				 */
2164 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2165 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2166 				if (*abort_flag) {
2167 					return (0);
2168 				}
2169 			}
2170 		}
2171 		/*
2172 		 * Now service re-assembly to pick up anything that has been
2173 		 * held on reassembly queue?
2174 		 */
2175 		sctp_deliver_reasm_check(stcb, asoc);
2176 		need_reasm_check = 0;
2177 	}
2178 	if (need_reasm_check) {
2179 		/* Another one waits ? */
2180 		sctp_deliver_reasm_check(stcb, asoc);
2181 	}
2182 	return (1);
2183 }
2184 
2185 int8_t sctp_map_lookup_tab[256] = {
2186 	0, 1, 0, 2, 0, 1, 0, 3,
2187 	0, 1, 0, 2, 0, 1, 0, 4,
2188 	0, 1, 0, 2, 0, 1, 0, 3,
2189 	0, 1, 0, 2, 0, 1, 0, 5,
2190 	0, 1, 0, 2, 0, 1, 0, 3,
2191 	0, 1, 0, 2, 0, 1, 0, 4,
2192 	0, 1, 0, 2, 0, 1, 0, 3,
2193 	0, 1, 0, 2, 0, 1, 0, 6,
2194 	0, 1, 0, 2, 0, 1, 0, 3,
2195 	0, 1, 0, 2, 0, 1, 0, 4,
2196 	0, 1, 0, 2, 0, 1, 0, 3,
2197 	0, 1, 0, 2, 0, 1, 0, 5,
2198 	0, 1, 0, 2, 0, 1, 0, 3,
2199 	0, 1, 0, 2, 0, 1, 0, 4,
2200 	0, 1, 0, 2, 0, 1, 0, 3,
2201 	0, 1, 0, 2, 0, 1, 0, 7,
2202 	0, 1, 0, 2, 0, 1, 0, 3,
2203 	0, 1, 0, 2, 0, 1, 0, 4,
2204 	0, 1, 0, 2, 0, 1, 0, 3,
2205 	0, 1, 0, 2, 0, 1, 0, 5,
2206 	0, 1, 0, 2, 0, 1, 0, 3,
2207 	0, 1, 0, 2, 0, 1, 0, 4,
2208 	0, 1, 0, 2, 0, 1, 0, 3,
2209 	0, 1, 0, 2, 0, 1, 0, 6,
2210 	0, 1, 0, 2, 0, 1, 0, 3,
2211 	0, 1, 0, 2, 0, 1, 0, 4,
2212 	0, 1, 0, 2, 0, 1, 0, 3,
2213 	0, 1, 0, 2, 0, 1, 0, 5,
2214 	0, 1, 0, 2, 0, 1, 0, 3,
2215 	0, 1, 0, 2, 0, 1, 0, 4,
2216 	0, 1, 0, 2, 0, 1, 0, 3,
2217 	0, 1, 0, 2, 0, 1, 0, 8
2218 };
2219 
2220 
2221 void
2222 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2223 {
2224 	/*
2225 	 * Now we also need to check the mapping array in a couple of ways.
2226 	 * 1) Did we move the cum-ack point?
2227 	 *
2228 	 * When you first glance at this you might think that all entries that
2229 	 * make up the postion of the cum-ack would be in the nr-mapping
2230 	 * array only.. i.e. things up to the cum-ack are always
2231 	 * deliverable. Thats true with one exception, when its a fragmented
2232 	 * message we may not deliver the data until some threshold (or all
2233 	 * of it) is in place. So we must OR the nr_mapping_array and
2234 	 * mapping_array to get a true picture of the cum-ack.
2235 	 */
2236 	struct sctp_association *asoc;
2237 	int at;
2238 	uint8_t val;
2239 	int slide_from, slide_end, lgap, distance;
2240 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2241 
2242 	asoc = &stcb->asoc;
2243 	at = 0;
2244 
2245 	old_cumack = asoc->cumulative_tsn;
2246 	old_base = asoc->mapping_array_base_tsn;
2247 	old_highest = asoc->highest_tsn_inside_map;
2248 	/*
2249 	 * We could probably improve this a small bit by calculating the
2250 	 * offset of the current cum-ack as the starting point.
2251 	 */
2252 	at = 0;
2253 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2254 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2255 		if (val == 0xff) {
2256 			at += 8;
2257 		} else {
2258 			/* there is a 0 bit */
2259 			at += sctp_map_lookup_tab[val];
2260 			break;
2261 		}
2262 	}
2263 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2264 
2265 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2266 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2267 #ifdef INVARIANTS
2268 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2269 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2270 #else
2271 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2272 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2273 		sctp_print_mapping_array(asoc);
2274 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2275 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2276 		}
2277 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2278 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2279 #endif
2280 	}
2281 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2282 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2283 	} else {
2284 		highest_tsn = asoc->highest_tsn_inside_map;
2285 	}
2286 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2287 		/* The complete array was completed by a single FR */
2288 		/* highest becomes the cum-ack */
2289 		int clr;
2290 
2291 #ifdef INVARIANTS
2292 		unsigned int i;
2293 
2294 #endif
2295 
2296 		/* clear the array */
2297 		clr = ((at + 7) >> 3);
2298 		if (clr > asoc->mapping_array_size) {
2299 			clr = asoc->mapping_array_size;
2300 		}
2301 		memset(asoc->mapping_array, 0, clr);
2302 		memset(asoc->nr_mapping_array, 0, clr);
2303 #ifdef INVARIANTS
2304 		for (i = 0; i < asoc->mapping_array_size; i++) {
2305 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2306 				printf("Error Mapping array's not clean at clear\n");
2307 				sctp_print_mapping_array(asoc);
2308 			}
2309 		}
2310 #endif
2311 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2312 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2313 	} else if (at >= 8) {
2314 		/* we can slide the mapping array down */
2315 		/* slide_from holds where we hit the first NON 0xff byte */
2316 
2317 		/*
2318 		 * now calculate the ceiling of the move using our highest
2319 		 * TSN value
2320 		 */
2321 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2322 		slide_end = (lgap >> 3);
2323 		if (slide_end < slide_from) {
2324 			sctp_print_mapping_array(asoc);
2325 #ifdef INVARIANTS
2326 			panic("impossible slide");
2327 #else
2328 			printf("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2329 			    lgap, slide_end, slide_from, at);
2330 			return;
2331 #endif
2332 		}
2333 		if (slide_end > asoc->mapping_array_size) {
2334 #ifdef INVARIANTS
2335 			panic("would overrun buffer");
2336 #else
2337 			printf("Gak, would have overrun map end:%d slide_end:%d\n",
2338 			    asoc->mapping_array_size, slide_end);
2339 			slide_end = asoc->mapping_array_size;
2340 #endif
2341 		}
2342 		distance = (slide_end - slide_from) + 1;
2343 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2344 			sctp_log_map(old_base, old_cumack, old_highest,
2345 			    SCTP_MAP_PREPARE_SLIDE);
2346 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2347 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2348 		}
2349 		if (distance + slide_from > asoc->mapping_array_size ||
2350 		    distance < 0) {
2351 			/*
2352 			 * Here we do NOT slide forward the array so that
2353 			 * hopefully when more data comes in to fill it up
2354 			 * we will be able to slide it forward. Really I
2355 			 * don't think this should happen :-0
2356 			 */
2357 
2358 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2359 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2360 				    (uint32_t) asoc->mapping_array_size,
2361 				    SCTP_MAP_SLIDE_NONE);
2362 			}
2363 		} else {
2364 			int ii;
2365 
2366 			for (ii = 0; ii < distance; ii++) {
2367 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2368 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2369 
2370 			}
2371 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2372 				asoc->mapping_array[ii] = 0;
2373 				asoc->nr_mapping_array[ii] = 0;
2374 			}
2375 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2376 				asoc->highest_tsn_inside_map += (slide_from << 3);
2377 			}
2378 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2379 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2380 			}
2381 			asoc->mapping_array_base_tsn += (slide_from << 3);
2382 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2383 				sctp_log_map(asoc->mapping_array_base_tsn,
2384 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2385 				    SCTP_MAP_SLIDE_RESULT);
2386 			}
2387 		}
2388 	}
2389 }
2390 
2391 void
2392 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap, int *abort_flag)
2393 {
2394 	struct sctp_association *asoc;
2395 	uint32_t highest_tsn;
2396 
2397 	asoc = &stcb->asoc;
2398 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2399 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2400 	} else {
2401 		highest_tsn = asoc->highest_tsn_inside_map;
2402 	}
2403 
2404 	/*
2405 	 * Now we need to see if we need to queue a sack or just start the
2406 	 * timer (if allowed).
2407 	 */
2408 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2409 		/*
2410 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2411 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2412 		 * SACK
2413 		 */
2414 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2415 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2416 			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2417 		}
2418 		sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2419 		sctp_send_sack(stcb);
2420 	} else {
2421 		int is_a_gap;
2422 
2423 		/* is there a gap now ? */
2424 		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2425 
2426 		/*
2427 		 * CMT DAC algorithm: increase number of packets received
2428 		 * since last ack
2429 		 */
2430 		stcb->asoc.cmt_dac_pkts_rcvd++;
2431 
2432 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2433 							 * SACK */
2434 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2435 							 * longer is one */
2436 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2437 		    (is_a_gap) ||	/* is still a gap */
2438 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2439 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2440 		    ) {
2441 
2442 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2443 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2444 			    (stcb->asoc.send_sack == 0) &&
2445 			    (stcb->asoc.numduptsns == 0) &&
2446 			    (stcb->asoc.delayed_ack) &&
2447 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2448 
2449 				/*
2450 				 * CMT DAC algorithm: With CMT, delay acks
2451 				 * even in the face of
2452 				 *
2453 				 * reordering. Therefore, if acks that do not
2454 				 * have to be sent because of the above
2455 				 * reasons, will be delayed. That is, acks
2456 				 * that would have been sent due to gap
2457 				 * reports will be delayed with DAC. Start
2458 				 * the delayed ack timer.
2459 				 */
2460 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2461 				    stcb->sctp_ep, stcb, NULL);
2462 			} else {
2463 				/*
2464 				 * Ok we must build a SACK since the timer
2465 				 * is pending, we got our first packet OR
2466 				 * there are gaps or duplicates.
2467 				 */
2468 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2469 				sctp_send_sack(stcb);
2470 			}
2471 		} else {
2472 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2473 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2474 				    stcb->sctp_ep, stcb, NULL);
2475 			}
2476 		}
2477 	}
2478 }
2479 
2480 void
2481 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2482 {
2483 	struct sctp_tmit_chunk *chk;
2484 	uint32_t tsize, pd_point;
2485 	uint16_t nxt_todel;
2486 
2487 	if (asoc->fragmented_delivery_inprogress) {
2488 		sctp_service_reassembly(stcb, asoc);
2489 	}
2490 	/* Can we proceed further, i.e. the PD-API is complete */
2491 	if (asoc->fragmented_delivery_inprogress) {
2492 		/* no */
2493 		return;
2494 	}
2495 	/*
2496 	 * Now is there some other chunk I can deliver from the reassembly
2497 	 * queue.
2498 	 */
2499 doit_again:
2500 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2501 	if (chk == NULL) {
2502 		asoc->size_on_reasm_queue = 0;
2503 		asoc->cnt_on_reasm_queue = 0;
2504 		return;
2505 	}
2506 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2507 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2508 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2509 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2510 		/*
2511 		 * Yep the first one is here. We setup to start reception,
2512 		 * by backing down the TSN just in case we can't deliver.
2513 		 */
2514 
2515 		/*
2516 		 * Before we start though either all of the message should
2517 		 * be here or the socket buffer max or nothing on the
2518 		 * delivery queue and something can be delivered.
2519 		 */
2520 		if (stcb->sctp_socket) {
2521 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2522 			    stcb->sctp_ep->partial_delivery_point);
2523 		} else {
2524 			pd_point = stcb->sctp_ep->partial_delivery_point;
2525 		}
2526 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2527 			asoc->fragmented_delivery_inprogress = 1;
2528 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2529 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2530 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2531 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2532 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2533 			sctp_service_reassembly(stcb, asoc);
2534 			if (asoc->fragmented_delivery_inprogress == 0) {
2535 				goto doit_again;
2536 			}
2537 		}
2538 	}
2539 }
2540 
2541 int
2542 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2543     struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2544     struct sctp_nets *net, uint32_t * high_tsn)
2545 {
2546 	struct sctp_data_chunk *ch, chunk_buf;
2547 	struct sctp_association *asoc;
2548 	int num_chunks = 0;	/* number of control chunks processed */
2549 	int stop_proc = 0;
2550 	int chk_length, break_flag, last_chunk;
2551 	int abort_flag = 0, was_a_gap;
2552 	struct mbuf *m;
2553 	uint32_t highest_tsn;
2554 
2555 	/* set the rwnd */
2556 	sctp_set_rwnd(stcb, &stcb->asoc);
2557 
2558 	m = *mm;
2559 	SCTP_TCB_LOCK_ASSERT(stcb);
2560 	asoc = &stcb->asoc;
2561 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2562 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2563 	} else {
2564 		highest_tsn = asoc->highest_tsn_inside_map;
2565 	}
2566 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2567 	/*
2568 	 * setup where we got the last DATA packet from for any SACK that
2569 	 * may need to go out. Don't bump the net. This is done ONLY when a
2570 	 * chunk is assigned.
2571 	 */
2572 	asoc->last_data_chunk_from = net;
2573 
2574 	/*-
2575 	 * Now before we proceed we must figure out if this is a wasted
2576 	 * cluster... i.e. it is a small packet sent in and yet the driver
2577 	 * underneath allocated a full cluster for it. If so we must copy it
2578 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2579 	 * with cluster starvation. Note for __Panda__ we don't do this
2580 	 * since it has clusters all the way down to 64 bytes.
2581 	 */
2582 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2583 		/* we only handle mbufs that are singletons.. not chains */
2584 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2585 		if (m) {
2586 			/* ok lets see if we can copy the data up */
2587 			caddr_t *from, *to;
2588 
2589 			/* get the pointers and copy */
2590 			to = mtod(m, caddr_t *);
2591 			from = mtod((*mm), caddr_t *);
2592 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2593 			/* copy the length and free up the old */
2594 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2595 			sctp_m_freem(*mm);
2596 			/* sucess, back copy */
2597 			*mm = m;
2598 		} else {
2599 			/* We are in trouble in the mbuf world .. yikes */
2600 			m = *mm;
2601 		}
2602 	}
2603 	/* get pointer to the first chunk header */
2604 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2605 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2606 	if (ch == NULL) {
2607 		return (1);
2608 	}
2609 	/*
2610 	 * process all DATA chunks...
2611 	 */
2612 	*high_tsn = asoc->cumulative_tsn;
2613 	break_flag = 0;
2614 	asoc->data_pkts_seen++;
2615 	while (stop_proc == 0) {
2616 		/* validate chunk length */
2617 		chk_length = ntohs(ch->ch.chunk_length);
2618 		if (length - *offset < chk_length) {
2619 			/* all done, mutulated chunk */
2620 			stop_proc = 1;
2621 			break;
2622 		}
2623 		if (ch->ch.chunk_type == SCTP_DATA) {
2624 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2625 				/*
2626 				 * Need to send an abort since we had a
2627 				 * invalid data chunk.
2628 				 */
2629 				struct mbuf *op_err;
2630 
2631 				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2632 				    0, M_DONTWAIT, 1, MT_DATA);
2633 
2634 				if (op_err) {
2635 					struct sctp_paramhdr *ph;
2636 					uint32_t *ippp;
2637 
2638 					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2639 					    (2 * sizeof(uint32_t));
2640 					ph = mtod(op_err, struct sctp_paramhdr *);
2641 					ph->param_type =
2642 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2643 					ph->param_length = htons(SCTP_BUF_LEN(op_err));
2644 					ippp = (uint32_t *) (ph + 1);
2645 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2646 					ippp++;
2647 					*ippp = asoc->cumulative_tsn;
2648 
2649 				}
2650 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2651 				sctp_abort_association(inp, stcb, m, iphlen, sh,
2652 				    op_err, 0, net->port);
2653 				return (2);
2654 			}
2655 #ifdef SCTP_AUDITING_ENABLED
2656 			sctp_audit_log(0xB1, 0);
2657 #endif
2658 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2659 				last_chunk = 1;
2660 			} else {
2661 				last_chunk = 0;
2662 			}
2663 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2664 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2665 			    last_chunk)) {
2666 				num_chunks++;
2667 			}
2668 			if (abort_flag)
2669 				return (2);
2670 
2671 			if (break_flag) {
2672 				/*
2673 				 * Set because of out of rwnd space and no
2674 				 * drop rep space left.
2675 				 */
2676 				stop_proc = 1;
2677 				break;
2678 			}
2679 		} else {
2680 			/* not a data chunk in the data region */
2681 			switch (ch->ch.chunk_type) {
2682 			case SCTP_INITIATION:
2683 			case SCTP_INITIATION_ACK:
2684 			case SCTP_SELECTIVE_ACK:
2685 			case SCTP_NR_SELECTIVE_ACK:	/* EY */
2686 			case SCTP_HEARTBEAT_REQUEST:
2687 			case SCTP_HEARTBEAT_ACK:
2688 			case SCTP_ABORT_ASSOCIATION:
2689 			case SCTP_SHUTDOWN:
2690 			case SCTP_SHUTDOWN_ACK:
2691 			case SCTP_OPERATION_ERROR:
2692 			case SCTP_COOKIE_ECHO:
2693 			case SCTP_COOKIE_ACK:
2694 			case SCTP_ECN_ECHO:
2695 			case SCTP_ECN_CWR:
2696 			case SCTP_SHUTDOWN_COMPLETE:
2697 			case SCTP_AUTHENTICATION:
2698 			case SCTP_ASCONF_ACK:
2699 			case SCTP_PACKET_DROPPED:
2700 			case SCTP_STREAM_RESET:
2701 			case SCTP_FORWARD_CUM_TSN:
2702 			case SCTP_ASCONF:
2703 				/*
2704 				 * Now, what do we do with KNOWN chunks that
2705 				 * are NOT in the right place?
2706 				 *
2707 				 * For now, I do nothing but ignore them. We
2708 				 * may later want to add sysctl stuff to
2709 				 * switch out and do either an ABORT() or
2710 				 * possibly process them.
2711 				 */
2712 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2713 					struct mbuf *op_err;
2714 
2715 					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2716 					sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
2717 					return (2);
2718 				}
2719 				break;
2720 			default:
2721 				/* unknown chunk type, use bit rules */
2722 				if (ch->ch.chunk_type & 0x40) {
2723 					/* Add a error report to the queue */
2724 					struct mbuf *merr;
2725 					struct sctp_paramhdr *phd;
2726 
2727 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2728 					if (merr) {
2729 						phd = mtod(merr, struct sctp_paramhdr *);
2730 						/*
2731 						 * We cheat and use param
2732 						 * type since we did not
2733 						 * bother to define a error
2734 						 * cause struct. They are
2735 						 * the same basic format
2736 						 * with different names.
2737 						 */
2738 						phd->param_type =
2739 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2740 						phd->param_length =
2741 						    htons(chk_length + sizeof(*phd));
2742 						SCTP_BUF_LEN(merr) = sizeof(*phd);
2743 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
2744 						    SCTP_SIZE32(chk_length),
2745 						    M_DONTWAIT);
2746 						if (SCTP_BUF_NEXT(merr)) {
2747 							sctp_queue_op_err(stcb, merr);
2748 						} else {
2749 							sctp_m_freem(merr);
2750 						}
2751 					}
2752 				}
2753 				if ((ch->ch.chunk_type & 0x80) == 0) {
2754 					/* discard the rest of this packet */
2755 					stop_proc = 1;
2756 				}	/* else skip this bad chunk and
2757 					 * continue... */
2758 				break;
2759 			};	/* switch of chunk type */
2760 		}
2761 		*offset += SCTP_SIZE32(chk_length);
2762 		if ((*offset >= length) || stop_proc) {
2763 			/* no more data left in the mbuf chain */
2764 			stop_proc = 1;
2765 			continue;
2766 		}
2767 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2768 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2769 		if (ch == NULL) {
2770 			*offset = length;
2771 			stop_proc = 1;
2772 			break;
2773 
2774 		}
2775 	}			/* while */
2776 	if (break_flag) {
2777 		/*
2778 		 * we need to report rwnd overrun drops.
2779 		 */
2780 		sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2781 	}
2782 	if (num_chunks) {
2783 		/*
2784 		 * Did we get data, if so update the time for auto-close and
2785 		 * give peer credit for being alive.
2786 		 */
2787 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2788 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2789 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2790 			    stcb->asoc.overall_error_count,
2791 			    0,
2792 			    SCTP_FROM_SCTP_INDATA,
2793 			    __LINE__);
2794 		}
2795 		stcb->asoc.overall_error_count = 0;
2796 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2797 	}
2798 	/* now service all of the reassm queue if needed */
2799 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2800 		sctp_service_queues(stcb, asoc);
2801 
2802 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2803 		/* Assure that we ack right away */
2804 		stcb->asoc.send_sack = 1;
2805 	}
2806 	/* Start a sack timer or QUEUE a SACK for sending */
2807 	sctp_sack_check(stcb, was_a_gap, &abort_flag);
2808 	if (abort_flag)
2809 		return (2);
2810 
2811 	return (0);
2812 }
2813 
2814 static int
2815 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2816     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2817     int *num_frs,
2818     uint32_t * biggest_newly_acked_tsn,
2819     uint32_t * this_sack_lowest_newack,
2820     int *ecn_seg_sums)
2821 {
2822 	struct sctp_tmit_chunk *tp1;
2823 	unsigned int theTSN;
2824 	int j, wake_him = 0, circled = 0;
2825 
2826 	/* Recover the tp1 we last saw */
2827 	tp1 = *p_tp1;
2828 	if (tp1 == NULL) {
2829 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2830 	}
2831 	for (j = frag_strt; j <= frag_end; j++) {
2832 		theTSN = j + last_tsn;
2833 		while (tp1) {
2834 			if (tp1->rec.data.doing_fast_retransmit)
2835 				(*num_frs) += 1;
2836 
2837 			/*-
2838 			 * CMT: CUCv2 algorithm. For each TSN being
2839 			 * processed from the sent queue, track the
2840 			 * next expected pseudo-cumack, or
2841 			 * rtx_pseudo_cumack, if required. Separate
2842 			 * cumack trackers for first transmissions,
2843 			 * and retransmissions.
2844 			 */
2845 			if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2846 			    (tp1->snd_count == 1)) {
2847 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2848 				tp1->whoTo->find_pseudo_cumack = 0;
2849 			}
2850 			if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2851 			    (tp1->snd_count > 1)) {
2852 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2853 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2854 			}
2855 			if (tp1->rec.data.TSN_seq == theTSN) {
2856 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2857 					/*-
2858 					 * must be held until
2859 					 * cum-ack passes
2860 					 */
2861 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2862 						/*-
2863 						 * If it is less than RESEND, it is
2864 						 * now no-longer in flight.
2865 						 * Higher values may already be set
2866 						 * via previous Gap Ack Blocks...
2867 						 * i.e. ACKED or RESEND.
2868 						 */
2869 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2870 						    *biggest_newly_acked_tsn)) {
2871 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2872 						}
2873 						/*-
2874 						 * CMT: SFR algo (and HTNA) - set
2875 						 * saw_newack to 1 for dest being
2876 						 * newly acked. update
2877 						 * this_sack_highest_newack if
2878 						 * appropriate.
2879 						 */
2880 						if (tp1->rec.data.chunk_was_revoked == 0)
2881 							tp1->whoTo->saw_newack = 1;
2882 
2883 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2884 						    tp1->whoTo->this_sack_highest_newack)) {
2885 							tp1->whoTo->this_sack_highest_newack =
2886 							    tp1->rec.data.TSN_seq;
2887 						}
2888 						/*-
2889 						 * CMT DAC algo: also update
2890 						 * this_sack_lowest_newack
2891 						 */
2892 						if (*this_sack_lowest_newack == 0) {
2893 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2894 								sctp_log_sack(*this_sack_lowest_newack,
2895 								    last_tsn,
2896 								    tp1->rec.data.TSN_seq,
2897 								    0,
2898 								    0,
2899 								    SCTP_LOG_TSN_ACKED);
2900 							}
2901 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2902 						}
2903 						/*-
2904 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2905 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2906 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2907 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2908 						 * Separate pseudo_cumack trackers for first transmissions and
2909 						 * retransmissions.
2910 						 */
2911 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2912 							if (tp1->rec.data.chunk_was_revoked == 0) {
2913 								tp1->whoTo->new_pseudo_cumack = 1;
2914 							}
2915 							tp1->whoTo->find_pseudo_cumack = 1;
2916 						}
2917 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2918 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2919 						}
2920 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2921 							if (tp1->rec.data.chunk_was_revoked == 0) {
2922 								tp1->whoTo->new_pseudo_cumack = 1;
2923 							}
2924 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2925 						}
2926 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2927 							sctp_log_sack(*biggest_newly_acked_tsn,
2928 							    last_tsn,
2929 							    tp1->rec.data.TSN_seq,
2930 							    frag_strt,
2931 							    frag_end,
2932 							    SCTP_LOG_TSN_ACKED);
2933 						}
2934 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2935 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2936 							    tp1->whoTo->flight_size,
2937 							    tp1->book_size,
2938 							    (uintptr_t) tp1->whoTo,
2939 							    tp1->rec.data.TSN_seq);
2940 						}
2941 						sctp_flight_size_decrease(tp1);
2942 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2943 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2944 							    tp1);
2945 						}
2946 						sctp_total_flight_decrease(stcb, tp1);
2947 
2948 						tp1->whoTo->net_ack += tp1->send_size;
2949 						if (tp1->snd_count < 2) {
2950 							/*-
2951 							 * True non-retransmited chunk
2952 							 */
2953 							tp1->whoTo->net_ack2 += tp1->send_size;
2954 
2955 							/*-
2956 							 * update RTO too ?
2957 							 */
2958 							if (tp1->do_rtt) {
2959 								tp1->whoTo->RTO =
2960 								    sctp_calculate_rto(stcb,
2961 								    &stcb->asoc,
2962 								    tp1->whoTo,
2963 								    &tp1->sent_rcv_time,
2964 								    sctp_align_safe_nocopy,
2965 								    SCTP_DETERMINE_LL_OK);
2966 								tp1->do_rtt = 0;
2967 							}
2968 						}
2969 					}
2970 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2971 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2972 						    stcb->asoc.this_sack_highest_gap)) {
2973 							stcb->asoc.this_sack_highest_gap =
2974 							    tp1->rec.data.TSN_seq;
2975 						}
2976 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2977 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2978 #ifdef SCTP_AUDITING_ENABLED
2979 							sctp_audit_log(0xB2,
2980 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2981 #endif
2982 						}
2983 					}
2984 					/*-
2985 					 * All chunks NOT UNSENT fall through here and are marked
2986 					 * (leave PR-SCTP ones that are to skip alone though)
2987 					 */
2988 					if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
2989 						tp1->sent = SCTP_DATAGRAM_MARKED;
2990 
2991 					if (tp1->rec.data.chunk_was_revoked) {
2992 						/* deflate the cwnd */
2993 						tp1->whoTo->cwnd -= tp1->book_size;
2994 						tp1->rec.data.chunk_was_revoked = 0;
2995 					}
2996 					/* NR Sack code here */
2997 					if (nr_sacking) {
2998 						if (tp1->data) {
2999 							/*
3000 							 * sa_ignore
3001 							 * NO_NULL_CHK
3002 							 */
3003 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3004 							sctp_m_freem(tp1->data);
3005 							tp1->data = NULL;
3006 						}
3007 						wake_him++;
3008 					}
3009 				}
3010 				break;
3011 			}	/* if (tp1->TSN_seq == theTSN) */
3012 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
3013 				break;
3014 			}
3015 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3016 			if ((tp1 == NULL) && (circled == 0)) {
3017 				circled++;
3018 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3019 			}
3020 		}		/* end while (tp1) */
3021 		if (tp1 == NULL) {
3022 			circled = 0;
3023 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3024 		}
3025 		/* In case the fragments were not in order we must reset */
3026 	}			/* end for (j = fragStart */
3027 	*p_tp1 = tp1;
3028 	return (wake_him);	/* Return value only used for nr-sack */
3029 }
3030 
3031 
3032 static int
3033 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3034     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3035     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3036     int num_seg, int num_nr_seg, int *ecn_seg_sums)
3037 {
3038 	struct sctp_gap_ack_block *frag, block;
3039 	struct sctp_tmit_chunk *tp1;
3040 	int i;
3041 	int num_frs = 0;
3042 	int chunk_freed;
3043 	int non_revocable;
3044 	uint16_t frag_strt, frag_end, prev_frag_end;
3045 
3046 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3047 	prev_frag_end = 0;
3048 	chunk_freed = 0;
3049 
3050 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3051 		if (i == num_seg) {
3052 			prev_frag_end = 0;
3053 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3054 		}
3055 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3056 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3057 		*offset += sizeof(block);
3058 		if (frag == NULL) {
3059 			return (chunk_freed);
3060 		}
3061 		frag_strt = ntohs(frag->start);
3062 		frag_end = ntohs(frag->end);
3063 
3064 		if (frag_strt > frag_end) {
3065 			/* This gap report is malformed, skip it. */
3066 			continue;
3067 		}
3068 		if (frag_strt <= prev_frag_end) {
3069 			/* This gap report is not in order, so restart. */
3070 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3071 		}
3072 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3073 			*biggest_tsn_acked = last_tsn + frag_end;
3074 		}
3075 		if (i < num_seg) {
3076 			non_revocable = 0;
3077 		} else {
3078 			non_revocable = 1;
3079 		}
3080 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3081 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3082 		    this_sack_lowest_newack, ecn_seg_sums)) {
3083 			chunk_freed = 1;
3084 		}
3085 		prev_frag_end = frag_end;
3086 	}
3087 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3088 		if (num_frs)
3089 			sctp_log_fr(*biggest_tsn_acked,
3090 			    *biggest_newly_acked_tsn,
3091 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3092 	}
3093 	return (chunk_freed);
3094 }
3095 
3096 static void
3097 sctp_check_for_revoked(struct sctp_tcb *stcb,
3098     struct sctp_association *asoc, uint32_t cumack,
3099     uint32_t biggest_tsn_acked)
3100 {
3101 	struct sctp_tmit_chunk *tp1;
3102 	int tot_revoked = 0;
3103 
3104 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3105 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3106 			/*
3107 			 * ok this guy is either ACK or MARKED. If it is
3108 			 * ACKED it has been previously acked but not this
3109 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3110 			 * again.
3111 			 */
3112 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3113 				break;
3114 			}
3115 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3116 				/* it has been revoked */
3117 				tp1->sent = SCTP_DATAGRAM_SENT;
3118 				tp1->rec.data.chunk_was_revoked = 1;
3119 				/*
3120 				 * We must add this stuff back in to assure
3121 				 * timers and such get started.
3122 				 */
3123 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3124 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3125 					    tp1->whoTo->flight_size,
3126 					    tp1->book_size,
3127 					    (uintptr_t) tp1->whoTo,
3128 					    tp1->rec.data.TSN_seq);
3129 				}
3130 				sctp_flight_size_increase(tp1);
3131 				sctp_total_flight_increase(stcb, tp1);
3132 				/*
3133 				 * We inflate the cwnd to compensate for our
3134 				 * artificial inflation of the flight_size.
3135 				 */
3136 				tp1->whoTo->cwnd += tp1->book_size;
3137 				tot_revoked++;
3138 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3139 					sctp_log_sack(asoc->last_acked_seq,
3140 					    cumack,
3141 					    tp1->rec.data.TSN_seq,
3142 					    0,
3143 					    0,
3144 					    SCTP_LOG_TSN_REVOKED);
3145 				}
3146 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3147 				/* it has been re-acked in this SACK */
3148 				tp1->sent = SCTP_DATAGRAM_ACKED;
3149 			}
3150 		}
3151 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3152 			break;
3153 	}
3154 }
3155 
3156 
3157 static void
3158 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3159     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3160 {
3161 	struct sctp_tmit_chunk *tp1;
3162 	int strike_flag = 0;
3163 	struct timeval now;
3164 	int tot_retrans = 0;
3165 	uint32_t sending_seq;
3166 	struct sctp_nets *net;
3167 	int num_dests_sacked = 0;
3168 
3169 	/*
3170 	 * select the sending_seq, this is either the next thing ready to be
3171 	 * sent but not transmitted, OR, the next seq we assign.
3172 	 */
3173 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3174 	if (tp1 == NULL) {
3175 		sending_seq = asoc->sending_seq;
3176 	} else {
3177 		sending_seq = tp1->rec.data.TSN_seq;
3178 	}
3179 
3180 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3181 	if ((asoc->sctp_cmt_on_off > 0) &&
3182 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3183 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3184 			if (net->saw_newack)
3185 				num_dests_sacked++;
3186 		}
3187 	}
3188 	if (stcb->asoc.peer_supports_prsctp) {
3189 		(void)SCTP_GETTIME_TIMEVAL(&now);
3190 	}
3191 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3192 		strike_flag = 0;
3193 		if (tp1->no_fr_allowed) {
3194 			/* this one had a timeout or something */
3195 			continue;
3196 		}
3197 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3198 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3199 				sctp_log_fr(biggest_tsn_newly_acked,
3200 				    tp1->rec.data.TSN_seq,
3201 				    tp1->sent,
3202 				    SCTP_FR_LOG_CHECK_STRIKE);
3203 		}
3204 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3205 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3206 			/* done */
3207 			break;
3208 		}
3209 		if (stcb->asoc.peer_supports_prsctp) {
3210 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3211 				/* Is it expired? */
3212 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3213 					/* Yes so drop it */
3214 					if (tp1->data != NULL) {
3215 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3216 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3217 						    SCTP_SO_NOT_LOCKED);
3218 					}
3219 					continue;
3220 				}
3221 			}
3222 		}
3223 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3224 			/* we are beyond the tsn in the sack  */
3225 			break;
3226 		}
3227 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3228 			/* either a RESEND, ACKED, or MARKED */
3229 			/* skip */
3230 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3231 				/* Continue strikin FWD-TSN chunks */
3232 				tp1->rec.data.fwd_tsn_cnt++;
3233 			}
3234 			continue;
3235 		}
3236 		/*
3237 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3238 		 */
3239 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3240 			/*
3241 			 * No new acks were receieved for data sent to this
3242 			 * dest. Therefore, according to the SFR algo for
3243 			 * CMT, no data sent to this dest can be marked for
3244 			 * FR using this SACK.
3245 			 */
3246 			continue;
3247 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3248 		    tp1->whoTo->this_sack_highest_newack)) {
3249 			/*
3250 			 * CMT: New acks were receieved for data sent to
3251 			 * this dest. But no new acks were seen for data
3252 			 * sent after tp1. Therefore, according to the SFR
3253 			 * algo for CMT, tp1 cannot be marked for FR using
3254 			 * this SACK. This step covers part of the DAC algo
3255 			 * and the HTNA algo as well.
3256 			 */
3257 			continue;
3258 		}
3259 		/*
3260 		 * Here we check to see if we were have already done a FR
3261 		 * and if so we see if the biggest TSN we saw in the sack is
3262 		 * smaller than the recovery point. If so we don't strike
3263 		 * the tsn... otherwise we CAN strike the TSN.
3264 		 */
3265 		/*
3266 		 * @@@ JRI: Check for CMT if (accum_moved &&
3267 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3268 		 * 0)) {
3269 		 */
3270 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3271 			/*
3272 			 * Strike the TSN if in fast-recovery and cum-ack
3273 			 * moved.
3274 			 */
3275 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3276 				sctp_log_fr(biggest_tsn_newly_acked,
3277 				    tp1->rec.data.TSN_seq,
3278 				    tp1->sent,
3279 				    SCTP_FR_LOG_STRIKE_CHUNK);
3280 			}
3281 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3282 				tp1->sent++;
3283 			}
3284 			if ((asoc->sctp_cmt_on_off > 0) &&
3285 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3286 				/*
3287 				 * CMT DAC algorithm: If SACK flag is set to
3288 				 * 0, then lowest_newack test will not pass
3289 				 * because it would have been set to the
3290 				 * cumack earlier. If not already to be
3291 				 * rtx'd, If not a mixed sack and if tp1 is
3292 				 * not between two sacked TSNs, then mark by
3293 				 * one more. NOTE that we are marking by one
3294 				 * additional time since the SACK DAC flag
3295 				 * indicates that two packets have been
3296 				 * received after this missing TSN.
3297 				 */
3298 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3299 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3300 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3301 						sctp_log_fr(16 + num_dests_sacked,
3302 						    tp1->rec.data.TSN_seq,
3303 						    tp1->sent,
3304 						    SCTP_FR_LOG_STRIKE_CHUNK);
3305 					}
3306 					tp1->sent++;
3307 				}
3308 			}
3309 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3310 		    (asoc->sctp_cmt_on_off == 0)) {
3311 			/*
3312 			 * For those that have done a FR we must take
3313 			 * special consideration if we strike. I.e the
3314 			 * biggest_newly_acked must be higher than the
3315 			 * sending_seq at the time we did the FR.
3316 			 */
3317 			if (
3318 #ifdef SCTP_FR_TO_ALTERNATE
3319 			/*
3320 			 * If FR's go to new networks, then we must only do
3321 			 * this for singly homed asoc's. However if the FR's
3322 			 * go to the same network (Armando's work) then its
3323 			 * ok to FR multiple times.
3324 			 */
3325 			    (asoc->numnets < 2)
3326 #else
3327 			    (1)
3328 #endif
3329 			    ) {
3330 
3331 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3332 				    tp1->rec.data.fast_retran_tsn)) {
3333 					/*
3334 					 * Strike the TSN, since this ack is
3335 					 * beyond where things were when we
3336 					 * did a FR.
3337 					 */
3338 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3339 						sctp_log_fr(biggest_tsn_newly_acked,
3340 						    tp1->rec.data.TSN_seq,
3341 						    tp1->sent,
3342 						    SCTP_FR_LOG_STRIKE_CHUNK);
3343 					}
3344 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3345 						tp1->sent++;
3346 					}
3347 					strike_flag = 1;
3348 					if ((asoc->sctp_cmt_on_off > 0) &&
3349 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3350 						/*
3351 						 * CMT DAC algorithm: If
3352 						 * SACK flag is set to 0,
3353 						 * then lowest_newack test
3354 						 * will not pass because it
3355 						 * would have been set to
3356 						 * the cumack earlier. If
3357 						 * not already to be rtx'd,
3358 						 * If not a mixed sack and
3359 						 * if tp1 is not between two
3360 						 * sacked TSNs, then mark by
3361 						 * one more. NOTE that we
3362 						 * are marking by one
3363 						 * additional time since the
3364 						 * SACK DAC flag indicates
3365 						 * that two packets have
3366 						 * been received after this
3367 						 * missing TSN.
3368 						 */
3369 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3370 						    (num_dests_sacked == 1) &&
3371 						    SCTP_TSN_GT(this_sack_lowest_newack,
3372 						    tp1->rec.data.TSN_seq)) {
3373 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3374 								sctp_log_fr(32 + num_dests_sacked,
3375 								    tp1->rec.data.TSN_seq,
3376 								    tp1->sent,
3377 								    SCTP_FR_LOG_STRIKE_CHUNK);
3378 							}
3379 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3380 								tp1->sent++;
3381 							}
3382 						}
3383 					}
3384 				}
3385 			}
3386 			/*
3387 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3388 			 * algo covers HTNA.
3389 			 */
3390 		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3391 		    biggest_tsn_newly_acked)) {
3392 			/*
3393 			 * We don't strike these: This is the  HTNA
3394 			 * algorithm i.e. we don't strike If our TSN is
3395 			 * larger than the Highest TSN Newly Acked.
3396 			 */
3397 			;
3398 		} else {
3399 			/* Strike the TSN */
3400 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3401 				sctp_log_fr(biggest_tsn_newly_acked,
3402 				    tp1->rec.data.TSN_seq,
3403 				    tp1->sent,
3404 				    SCTP_FR_LOG_STRIKE_CHUNK);
3405 			}
3406 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3407 				tp1->sent++;
3408 			}
3409 			if ((asoc->sctp_cmt_on_off > 0) &&
3410 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3411 				/*
3412 				 * CMT DAC algorithm: If SACK flag is set to
3413 				 * 0, then lowest_newack test will not pass
3414 				 * because it would have been set to the
3415 				 * cumack earlier. If not already to be
3416 				 * rtx'd, If not a mixed sack and if tp1 is
3417 				 * not between two sacked TSNs, then mark by
3418 				 * one more. NOTE that we are marking by one
3419 				 * additional time since the SACK DAC flag
3420 				 * indicates that two packets have been
3421 				 * received after this missing TSN.
3422 				 */
3423 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3424 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3425 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3426 						sctp_log_fr(48 + num_dests_sacked,
3427 						    tp1->rec.data.TSN_seq,
3428 						    tp1->sent,
3429 						    SCTP_FR_LOG_STRIKE_CHUNK);
3430 					}
3431 					tp1->sent++;
3432 				}
3433 			}
3434 		}
3435 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3436 			struct sctp_nets *alt;
3437 
3438 			/* fix counts and things */
3439 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3440 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3441 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3442 				    tp1->book_size,
3443 				    (uintptr_t) tp1->whoTo,
3444 				    tp1->rec.data.TSN_seq);
3445 			}
3446 			if (tp1->whoTo) {
3447 				tp1->whoTo->net_ack++;
3448 				sctp_flight_size_decrease(tp1);
3449 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3450 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3451 					    tp1);
3452 				}
3453 			}
3454 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3455 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3456 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3457 			}
3458 			/* add back to the rwnd */
3459 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3460 
3461 			/* remove from the total flight */
3462 			sctp_total_flight_decrease(stcb, tp1);
3463 
3464 			if ((stcb->asoc.peer_supports_prsctp) &&
3465 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3466 				/*
3467 				 * Has it been retransmitted tv_sec times? -
3468 				 * we store the retran count there.
3469 				 */
3470 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3471 					/* Yes, so drop it */
3472 					if (tp1->data != NULL) {
3473 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3474 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3475 						    SCTP_SO_NOT_LOCKED);
3476 					}
3477 					/* Make sure to flag we had a FR */
3478 					tp1->whoTo->net_ack++;
3479 					continue;
3480 				}
3481 			}
3482 			/* printf("OK, we are now ready to FR this guy\n"); */
3483 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3484 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3485 				    0, SCTP_FR_MARKED);
3486 			}
3487 			if (strike_flag) {
3488 				/* This is a subsequent FR */
3489 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3490 			}
3491 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3492 			if (asoc->sctp_cmt_on_off > 0) {
3493 				/*
3494 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3495 				 * If CMT is being used, then pick dest with
3496 				 * largest ssthresh for any retransmission.
3497 				 */
3498 				tp1->no_fr_allowed = 1;
3499 				alt = tp1->whoTo;
3500 				/* sa_ignore NO_NULL_CHK */
3501 				if (asoc->sctp_cmt_pf > 0) {
3502 					/*
3503 					 * JRS 5/18/07 - If CMT PF is on,
3504 					 * use the PF version of
3505 					 * find_alt_net()
3506 					 */
3507 					alt = sctp_find_alternate_net(stcb, alt, 2);
3508 				} else {
3509 					/*
3510 					 * JRS 5/18/07 - If only CMT is on,
3511 					 * use the CMT version of
3512 					 * find_alt_net()
3513 					 */
3514 					/* sa_ignore NO_NULL_CHK */
3515 					alt = sctp_find_alternate_net(stcb, alt, 1);
3516 				}
3517 				if (alt == NULL) {
3518 					alt = tp1->whoTo;
3519 				}
3520 				/*
3521 				 * CUCv2: If a different dest is picked for
3522 				 * the retransmission, then new
3523 				 * (rtx-)pseudo_cumack needs to be tracked
3524 				 * for orig dest. Let CUCv2 track new (rtx-)
3525 				 * pseudo-cumack always.
3526 				 */
3527 				if (tp1->whoTo) {
3528 					tp1->whoTo->find_pseudo_cumack = 1;
3529 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3530 				}
3531 			} else {/* CMT is OFF */
3532 
3533 #ifdef SCTP_FR_TO_ALTERNATE
3534 				/* Can we find an alternate? */
3535 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3536 #else
3537 				/*
3538 				 * default behavior is to NOT retransmit
3539 				 * FR's to an alternate. Armando Caro's
3540 				 * paper details why.
3541 				 */
3542 				alt = tp1->whoTo;
3543 #endif
3544 			}
3545 
3546 			tp1->rec.data.doing_fast_retransmit = 1;
3547 			tot_retrans++;
3548 			/* mark the sending seq for possible subsequent FR's */
3549 			/*
3550 			 * printf("Marking TSN for FR new value %x\n",
3551 			 * (uint32_t)tpi->rec.data.TSN_seq);
3552 			 */
3553 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3554 				/*
3555 				 * If the queue of send is empty then its
3556 				 * the next sequence number that will be
3557 				 * assigned so we subtract one from this to
3558 				 * get the one we last sent.
3559 				 */
3560 				tp1->rec.data.fast_retran_tsn = sending_seq;
3561 			} else {
3562 				/*
3563 				 * If there are chunks on the send queue
3564 				 * (unsent data that has made it from the
3565 				 * stream queues but not out the door, we
3566 				 * take the first one (which will have the
3567 				 * lowest TSN) and subtract one to get the
3568 				 * one we last sent.
3569 				 */
3570 				struct sctp_tmit_chunk *ttt;
3571 
3572 				ttt = TAILQ_FIRST(&asoc->send_queue);
3573 				tp1->rec.data.fast_retran_tsn =
3574 				    ttt->rec.data.TSN_seq;
3575 			}
3576 
3577 			if (tp1->do_rtt) {
3578 				/*
3579 				 * this guy had a RTO calculation pending on
3580 				 * it, cancel it
3581 				 */
3582 				tp1->do_rtt = 0;
3583 			}
3584 			if (alt != tp1->whoTo) {
3585 				/* yes, there is an alternate. */
3586 				sctp_free_remote_addr(tp1->whoTo);
3587 				/* sa_ignore FREED_MEMORY */
3588 				tp1->whoTo = alt;
3589 				atomic_add_int(&alt->ref_count, 1);
3590 			}
3591 		}
3592 	}
3593 }
3594 
3595 struct sctp_tmit_chunk *
3596 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3597     struct sctp_association *asoc)
3598 {
3599 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3600 	struct timeval now;
3601 	int now_filled = 0;
3602 
3603 	if (asoc->peer_supports_prsctp == 0) {
3604 		return (NULL);
3605 	}
3606 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3607 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3608 		    tp1->sent != SCTP_DATAGRAM_RESEND) {
3609 			/* no chance to advance, out of here */
3610 			break;
3611 		}
3612 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3613 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3614 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3615 				    asoc->advanced_peer_ack_point,
3616 				    tp1->rec.data.TSN_seq, 0, 0);
3617 			}
3618 		}
3619 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3620 			/*
3621 			 * We can't fwd-tsn past any that are reliable aka
3622 			 * retransmitted until the asoc fails.
3623 			 */
3624 			break;
3625 		}
3626 		if (!now_filled) {
3627 			(void)SCTP_GETTIME_TIMEVAL(&now);
3628 			now_filled = 1;
3629 		}
3630 		/*
3631 		 * now we got a chunk which is marked for another
3632 		 * retransmission to a PR-stream but has run out its chances
3633 		 * already maybe OR has been marked to skip now. Can we skip
3634 		 * it if its a resend?
3635 		 */
3636 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3637 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3638 			/*
3639 			 * Now is this one marked for resend and its time is
3640 			 * now up?
3641 			 */
3642 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3643 				/* Yes so drop it */
3644 				if (tp1->data) {
3645 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3646 					    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3647 					    SCTP_SO_NOT_LOCKED);
3648 				}
3649 			} else {
3650 				/*
3651 				 * No, we are done when hit one for resend
3652 				 * whos time as not expired.
3653 				 */
3654 				break;
3655 			}
3656 		}
3657 		/*
3658 		 * Ok now if this chunk is marked to drop it we can clean up
3659 		 * the chunk, advance our peer ack point and we can check
3660 		 * the next chunk.
3661 		 */
3662 		if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3663 			/* advance PeerAckPoint goes forward */
3664 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3665 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3666 				a_adv = tp1;
3667 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3668 				/* No update but we do save the chk */
3669 				a_adv = tp1;
3670 			}
3671 		} else {
3672 			/*
3673 			 * If it is still in RESEND we can advance no
3674 			 * further
3675 			 */
3676 			break;
3677 		}
3678 	}
3679 	return (a_adv);
3680 }
3681 
3682 static int
3683 sctp_fs_audit(struct sctp_association *asoc)
3684 {
3685 	struct sctp_tmit_chunk *chk;
3686 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3687 	int entry_flight, entry_cnt, ret;
3688 
3689 	entry_flight = asoc->total_flight;
3690 	entry_cnt = asoc->total_flight_count;
3691 	ret = 0;
3692 
3693 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3694 		return (0);
3695 
3696 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3697 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3698 			printf("Chk TSN:%u size:%d inflight cnt:%d\n",
3699 			    chk->rec.data.TSN_seq,
3700 			    chk->send_size,
3701 			    chk->snd_count
3702 			    );
3703 			inflight++;
3704 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3705 			resend++;
3706 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3707 			inbetween++;
3708 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3709 			above++;
3710 		} else {
3711 			acked++;
3712 		}
3713 	}
3714 
3715 	if ((inflight > 0) || (inbetween > 0)) {
3716 #ifdef INVARIANTS
3717 		panic("Flight size-express incorrect? \n");
3718 #else
3719 		printf("asoc->total_flight:%d cnt:%d\n",
3720 		    entry_flight, entry_cnt);
3721 
3722 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3723 		    inflight, inbetween, resend, above, acked);
3724 		ret = 1;
3725 #endif
3726 	}
3727 	return (ret);
3728 }
3729 
3730 
3731 static void
3732 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3733     struct sctp_association *asoc,
3734     struct sctp_nets *net,
3735     struct sctp_tmit_chunk *tp1)
3736 {
3737 	tp1->window_probe = 0;
3738 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3739 		/* TSN's skipped we do NOT move back. */
3740 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3741 		    tp1->whoTo->flight_size,
3742 		    tp1->book_size,
3743 		    (uintptr_t) tp1->whoTo,
3744 		    tp1->rec.data.TSN_seq);
3745 		return;
3746 	}
3747 	/* First setup this by shrinking flight */
3748 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3749 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3750 		    tp1);
3751 	}
3752 	sctp_flight_size_decrease(tp1);
3753 	sctp_total_flight_decrease(stcb, tp1);
3754 	/* Now mark for resend */
3755 	tp1->sent = SCTP_DATAGRAM_RESEND;
3756 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3757 
3758 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3759 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3760 		    tp1->whoTo->flight_size,
3761 		    tp1->book_size,
3762 		    (uintptr_t) tp1->whoTo,
3763 		    tp1->rec.data.TSN_seq);
3764 	}
3765 }
3766 
3767 void
3768 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3769     uint32_t rwnd, int *abort_now, int ecne_seen)
3770 {
3771 	struct sctp_nets *net;
3772 	struct sctp_association *asoc;
3773 	struct sctp_tmit_chunk *tp1, *tp2;
3774 	uint32_t old_rwnd;
3775 	int win_probe_recovery = 0;
3776 	int win_probe_recovered = 0;
3777 	int j, done_once = 0;
3778 
3779 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3780 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3781 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3782 	}
3783 	SCTP_TCB_LOCK_ASSERT(stcb);
3784 #ifdef SCTP_ASOCLOG_OF_TSNS
3785 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3786 	stcb->asoc.cumack_log_at++;
3787 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3788 		stcb->asoc.cumack_log_at = 0;
3789 	}
3790 #endif
3791 	asoc = &stcb->asoc;
3792 	old_rwnd = asoc->peers_rwnd;
3793 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3794 		/* old ack */
3795 		return;
3796 	} else if (asoc->last_acked_seq == cumack) {
3797 		/* Window update sack */
3798 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3799 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3800 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3801 			/* SWS sender side engages */
3802 			asoc->peers_rwnd = 0;
3803 		}
3804 		if (asoc->peers_rwnd > old_rwnd) {
3805 			goto again;
3806 		}
3807 		return;
3808 	}
3809 	/* First setup for CC stuff */
3810 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3811 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3812 			/* Drag along the window_tsn for cwr's */
3813 			net->cwr_window_tsn = cumack;
3814 		}
3815 		net->prev_cwnd = net->cwnd;
3816 		net->net_ack = 0;
3817 		net->net_ack2 = 0;
3818 
3819 		/*
3820 		 * CMT: Reset CUC and Fast recovery algo variables before
3821 		 * SACK processing
3822 		 */
3823 		net->new_pseudo_cumack = 0;
3824 		net->will_exit_fast_recovery = 0;
3825 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3826 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3827 		}
3828 	}
3829 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3830 		uint32_t send_s;
3831 
3832 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3833 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3834 			    sctpchunk_listhead);
3835 			send_s = tp1->rec.data.TSN_seq + 1;
3836 		} else {
3837 			send_s = asoc->sending_seq;
3838 		}
3839 		if (SCTP_TSN_GE(cumack, send_s)) {
3840 #ifndef INVARIANTS
3841 			struct mbuf *oper;
3842 
3843 #endif
3844 #ifdef INVARIANTS
3845 			panic("Impossible sack 1");
3846 #else
3847 
3848 			*abort_now = 1;
3849 			/* XXX */
3850 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3851 			    0, M_DONTWAIT, 1, MT_DATA);
3852 			if (oper) {
3853 				struct sctp_paramhdr *ph;
3854 				uint32_t *ippp;
3855 
3856 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3857 				    sizeof(uint32_t);
3858 				ph = mtod(oper, struct sctp_paramhdr *);
3859 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3860 				ph->param_length = htons(SCTP_BUF_LEN(oper));
3861 				ippp = (uint32_t *) (ph + 1);
3862 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3863 			}
3864 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3865 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
3866 			return;
3867 #endif
3868 		}
3869 	}
3870 	asoc->this_sack_highest_gap = cumack;
3871 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3872 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3873 		    stcb->asoc.overall_error_count,
3874 		    0,
3875 		    SCTP_FROM_SCTP_INDATA,
3876 		    __LINE__);
3877 	}
3878 	stcb->asoc.overall_error_count = 0;
3879 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3880 		/* process the new consecutive TSN first */
3881 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3882 			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3883 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3884 					printf("Warning, an unsent is now acked?\n");
3885 				}
3886 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3887 					/*
3888 					 * If it is less than ACKED, it is
3889 					 * now no-longer in flight. Higher
3890 					 * values may occur during marking
3891 					 */
3892 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3893 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3894 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3895 							    tp1->whoTo->flight_size,
3896 							    tp1->book_size,
3897 							    (uintptr_t) tp1->whoTo,
3898 							    tp1->rec.data.TSN_seq);
3899 						}
3900 						sctp_flight_size_decrease(tp1);
3901 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3902 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3903 							    tp1);
3904 						}
3905 						/* sa_ignore NO_NULL_CHK */
3906 						sctp_total_flight_decrease(stcb, tp1);
3907 					}
3908 					tp1->whoTo->net_ack += tp1->send_size;
3909 					if (tp1->snd_count < 2) {
3910 						/*
3911 						 * True non-retransmited
3912 						 * chunk
3913 						 */
3914 						tp1->whoTo->net_ack2 +=
3915 						    tp1->send_size;
3916 
3917 						/* update RTO too? */
3918 						if (tp1->do_rtt) {
3919 							tp1->whoTo->RTO =
3920 							/*
3921 							 * sa_ignore
3922 							 * NO_NULL_CHK
3923 							 */
3924 							    sctp_calculate_rto(stcb,
3925 							    asoc, tp1->whoTo,
3926 							    &tp1->sent_rcv_time,
3927 							    sctp_align_safe_nocopy,
3928 							    SCTP_DETERMINE_LL_OK);
3929 							tp1->do_rtt = 0;
3930 						}
3931 					}
3932 					/*
3933 					 * CMT: CUCv2 algorithm. From the
3934 					 * cumack'd TSNs, for each TSN being
3935 					 * acked for the first time, set the
3936 					 * following variables for the
3937 					 * corresp destination.
3938 					 * new_pseudo_cumack will trigger a
3939 					 * cwnd update.
3940 					 * find_(rtx_)pseudo_cumack will
3941 					 * trigger search for the next
3942 					 * expected (rtx-)pseudo-cumack.
3943 					 */
3944 					tp1->whoTo->new_pseudo_cumack = 1;
3945 					tp1->whoTo->find_pseudo_cumack = 1;
3946 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3947 
3948 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3949 						/* sa_ignore NO_NULL_CHK */
3950 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3951 					}
3952 				}
3953 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3954 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3955 				}
3956 				if (tp1->rec.data.chunk_was_revoked) {
3957 					/* deflate the cwnd */
3958 					tp1->whoTo->cwnd -= tp1->book_size;
3959 					tp1->rec.data.chunk_was_revoked = 0;
3960 				}
3961 				tp1->sent = SCTP_DATAGRAM_ACKED;
3962 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3963 				if (tp1->data) {
3964 					/* sa_ignore NO_NULL_CHK */
3965 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3966 					sctp_m_freem(tp1->data);
3967 					tp1->data = NULL;
3968 				}
3969 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3970 					sctp_log_sack(asoc->last_acked_seq,
3971 					    cumack,
3972 					    tp1->rec.data.TSN_seq,
3973 					    0,
3974 					    0,
3975 					    SCTP_LOG_FREE_SENT);
3976 				}
3977 				asoc->sent_queue_cnt--;
3978 				sctp_free_a_chunk(stcb, tp1);
3979 			} else {
3980 				break;
3981 			}
3982 		}
3983 
3984 	}
3985 	/* sa_ignore NO_NULL_CHK */
3986 	if (stcb->sctp_socket) {
3987 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3988 		struct socket *so;
3989 
3990 #endif
3991 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3992 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3993 			/* sa_ignore NO_NULL_CHK */
3994 			sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
3995 		}
3996 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3997 		so = SCTP_INP_SO(stcb->sctp_ep);
3998 		atomic_add_int(&stcb->asoc.refcnt, 1);
3999 		SCTP_TCB_UNLOCK(stcb);
4000 		SCTP_SOCKET_LOCK(so, 1);
4001 		SCTP_TCB_LOCK(stcb);
4002 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4003 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4004 			/* assoc was freed while we were unlocked */
4005 			SCTP_SOCKET_UNLOCK(so, 1);
4006 			return;
4007 		}
4008 #endif
4009 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4010 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4011 		SCTP_SOCKET_UNLOCK(so, 1);
4012 #endif
4013 	} else {
4014 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4015 			sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4016 		}
4017 	}
4018 
4019 	/* JRS - Use the congestion control given in the CC module */
4020 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0))
4021 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4022 
4023 	asoc->last_acked_seq = cumack;
4024 
4025 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4026 		/* nothing left in-flight */
4027 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4028 			net->flight_size = 0;
4029 			net->partial_bytes_acked = 0;
4030 		}
4031 		asoc->total_flight = 0;
4032 		asoc->total_flight_count = 0;
4033 	}
4034 	/* RWND update */
4035 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4036 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4037 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4038 		/* SWS sender side engages */
4039 		asoc->peers_rwnd = 0;
4040 	}
4041 	if (asoc->peers_rwnd > old_rwnd) {
4042 		win_probe_recovery = 1;
4043 	}
4044 	/* Now assure a timer where data is queued at */
4045 again:
4046 	j = 0;
4047 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4048 		int to_ticks;
4049 
4050 		if (win_probe_recovery && (net->window_probe)) {
4051 			win_probe_recovered = 1;
4052 			/*
4053 			 * Find first chunk that was used with window probe
4054 			 * and clear the sent
4055 			 */
4056 			/* sa_ignore FREED_MEMORY */
4057 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4058 				if (tp1->window_probe) {
4059 					/* move back to data send queue */
4060 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
4061 					break;
4062 				}
4063 			}
4064 		}
4065 		if (net->RTO == 0) {
4066 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4067 		} else {
4068 			to_ticks = MSEC_TO_TICKS(net->RTO);
4069 		}
4070 		if (net->flight_size) {
4071 			j++;
4072 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4073 			    sctp_timeout_handler, &net->rxt_timer);
4074 			if (net->window_probe) {
4075 				net->window_probe = 0;
4076 			}
4077 		} else {
4078 			if (net->window_probe) {
4079 				/*
4080 				 * In window probes we must assure a timer
4081 				 * is still running there
4082 				 */
4083 				net->window_probe = 0;
4084 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4085 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4086 					    sctp_timeout_handler, &net->rxt_timer);
4087 				}
4088 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4089 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4090 				    stcb, net,
4091 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4092 			}
4093 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4094 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4095 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4096 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4097 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4098 				}
4099 			}
4100 		}
4101 	}
4102 	if ((j == 0) &&
4103 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4104 	    (asoc->sent_queue_retran_cnt == 0) &&
4105 	    (win_probe_recovered == 0) &&
4106 	    (done_once == 0)) {
4107 		/*
4108 		 * huh, this should not happen unless all packets are
4109 		 * PR-SCTP and marked to skip of course.
4110 		 */
4111 		if (sctp_fs_audit(asoc)) {
4112 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4113 				net->flight_size = 0;
4114 			}
4115 			asoc->total_flight = 0;
4116 			asoc->total_flight_count = 0;
4117 			asoc->sent_queue_retran_cnt = 0;
4118 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4119 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4120 					sctp_flight_size_increase(tp1);
4121 					sctp_total_flight_increase(stcb, tp1);
4122 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4123 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4124 				}
4125 			}
4126 		}
4127 		done_once = 1;
4128 		goto again;
4129 	}
4130 	/**********************************/
4131 	/* Now what about shutdown issues */
4132 	/**********************************/
4133 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4134 		/* nothing left on sendqueue.. consider done */
4135 		/* clean up */
4136 		if ((asoc->stream_queue_cnt == 1) &&
4137 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4138 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4139 		    (asoc->locked_on_sending)
4140 		    ) {
4141 			struct sctp_stream_queue_pending *sp;
4142 
4143 			/*
4144 			 * I may be in a state where we got all across.. but
4145 			 * cannot write more due to a shutdown... we abort
4146 			 * since the user did not indicate EOR in this case.
4147 			 * The sp will be cleaned during free of the asoc.
4148 			 */
4149 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4150 			    sctp_streamhead);
4151 			if ((sp) && (sp->length == 0)) {
4152 				/* Let cleanup code purge it */
4153 				if (sp->msg_is_complete) {
4154 					asoc->stream_queue_cnt--;
4155 				} else {
4156 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4157 					asoc->locked_on_sending = NULL;
4158 					asoc->stream_queue_cnt--;
4159 				}
4160 			}
4161 		}
4162 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4163 		    (asoc->stream_queue_cnt == 0)) {
4164 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4165 				/* Need to abort here */
4166 				struct mbuf *oper;
4167 
4168 		abort_out_now:
4169 				*abort_now = 1;
4170 				/* XXX */
4171 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4172 				    0, M_DONTWAIT, 1, MT_DATA);
4173 				if (oper) {
4174 					struct sctp_paramhdr *ph;
4175 					uint32_t *ippp;
4176 
4177 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4178 					    sizeof(uint32_t);
4179 					ph = mtod(oper, struct sctp_paramhdr *);
4180 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4181 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4182 					ippp = (uint32_t *) (ph + 1);
4183 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4184 				}
4185 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4186 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4187 			} else {
4188 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4189 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4190 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4191 				}
4192 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4193 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4194 				sctp_stop_timers_for_shutdown(stcb);
4195 				sctp_send_shutdown(stcb,
4196 				    stcb->asoc.primary_destination);
4197 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4198 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4199 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4200 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4201 			}
4202 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4203 		    (asoc->stream_queue_cnt == 0)) {
4204 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4205 				goto abort_out_now;
4206 			}
4207 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4208 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4209 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4210 			sctp_send_shutdown_ack(stcb,
4211 			    stcb->asoc.primary_destination);
4212 			sctp_stop_timers_for_shutdown(stcb);
4213 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4214 			    stcb->sctp_ep, stcb, asoc->primary_destination);
4215 		}
4216 	}
4217 	/*********************************************/
4218 	/* Here we perform PR-SCTP procedures        */
4219 	/* (section 4.2)                             */
4220 	/*********************************************/
4221 	/* C1. update advancedPeerAckPoint */
4222 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4223 		asoc->advanced_peer_ack_point = cumack;
4224 	}
4225 	/* PR-Sctp issues need to be addressed too */
4226 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4227 		struct sctp_tmit_chunk *lchk;
4228 		uint32_t old_adv_peer_ack_point;
4229 
4230 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4231 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4232 		/* C3. See if we need to send a Fwd-TSN */
4233 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4234 			/*
4235 			 * ISSUE with ECN, see FWD-TSN processing.
4236 			 */
4237 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4238 				send_forward_tsn(stcb, asoc);
4239 			} else if (lchk) {
4240 				/* try to FR fwd-tsn's that get lost too */
4241 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4242 					send_forward_tsn(stcb, asoc);
4243 				}
4244 			}
4245 		}
4246 		if (lchk) {
4247 			/* Assure a timer is up */
4248 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4249 			    stcb->sctp_ep, stcb, lchk->whoTo);
4250 		}
4251 	}
4252 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4253 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4254 		    rwnd,
4255 		    stcb->asoc.peers_rwnd,
4256 		    stcb->asoc.total_flight,
4257 		    stcb->asoc.total_output_queue_size);
4258 	}
4259 }
4260 
4261 void
4262 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4263     struct sctp_tcb *stcb, struct sctp_nets *net_from,
4264     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4265     int *abort_now, uint8_t flags,
4266     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4267 {
4268 	struct sctp_association *asoc;
4269 	struct sctp_tmit_chunk *tp1, *tp2;
4270 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4271 	uint32_t sav_cum_ack;
4272 	uint16_t wake_him = 0;
4273 	uint32_t send_s = 0;
4274 	long j;
4275 	int accum_moved = 0;
4276 	int will_exit_fast_recovery = 0;
4277 	uint32_t a_rwnd, old_rwnd;
4278 	int win_probe_recovery = 0;
4279 	int win_probe_recovered = 0;
4280 	struct sctp_nets *net = NULL;
4281 	int ecn_seg_sums = 0;
4282 	int done_once;
4283 	uint8_t reneged_all = 0;
4284 	uint8_t cmt_dac_flag;
4285 
4286 	/*
4287 	 * we take any chance we can to service our queues since we cannot
4288 	 * get awoken when the socket is read from :<
4289 	 */
4290 	/*
4291 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4292 	 * old sack, if so discard. 2) If there is nothing left in the send
4293 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4294 	 * too, update any rwnd change and verify no timers are running.
4295 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4296 	 * moved process these first and note that it moved. 4) Process any
4297 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4298 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4299 	 * sync up flightsizes and things, stop all timers and also check
4300 	 * for shutdown_pending state. If so then go ahead and send off the
4301 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4302 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4303 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4304 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4305 	 * if in shutdown_recv state.
4306 	 */
4307 	SCTP_TCB_LOCK_ASSERT(stcb);
4308 	/* CMT DAC algo */
4309 	this_sack_lowest_newack = 0;
4310 	j = 0;
4311 	SCTP_STAT_INCR(sctps_slowpath_sack);
4312 	last_tsn = cum_ack;
4313 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4314 #ifdef SCTP_ASOCLOG_OF_TSNS
4315 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4316 	stcb->asoc.cumack_log_at++;
4317 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4318 		stcb->asoc.cumack_log_at = 0;
4319 	}
4320 #endif
4321 	a_rwnd = rwnd;
4322 
4323 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4324 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4325 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4326 	}
4327 	old_rwnd = stcb->asoc.peers_rwnd;
4328 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4329 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4330 		    stcb->asoc.overall_error_count,
4331 		    0,
4332 		    SCTP_FROM_SCTP_INDATA,
4333 		    __LINE__);
4334 	}
4335 	stcb->asoc.overall_error_count = 0;
4336 	asoc = &stcb->asoc;
4337 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4338 		sctp_log_sack(asoc->last_acked_seq,
4339 		    cum_ack,
4340 		    0,
4341 		    num_seg,
4342 		    num_dup,
4343 		    SCTP_LOG_NEW_SACK);
4344 	}
4345 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4346 		uint16_t i;
4347 		uint32_t *dupdata, dblock;
4348 
4349 		for (i = 0; i < num_dup; i++) {
4350 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4351 			    sizeof(uint32_t), (uint8_t *) & dblock);
4352 			if (dupdata == NULL) {
4353 				break;
4354 			}
4355 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4356 		}
4357 	}
4358 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4359 		/* reality check */
4360 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4361 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4362 			    sctpchunk_listhead);
4363 			send_s = tp1->rec.data.TSN_seq + 1;
4364 		} else {
4365 			tp1 = NULL;
4366 			send_s = asoc->sending_seq;
4367 		}
4368 		if (SCTP_TSN_GE(cum_ack, send_s)) {
4369 			struct mbuf *oper;
4370 
4371 			/*
4372 			 * no way, we have not even sent this TSN out yet.
4373 			 * Peer is hopelessly messed up with us.
4374 			 */
4375 			printf("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4376 			    cum_ack, send_s);
4377 			if (tp1) {
4378 				printf("Got send_s from tsn:%x + 1 of tp1:%p\n",
4379 				    tp1->rec.data.TSN_seq, tp1);
4380 			}
4381 	hopeless_peer:
4382 			*abort_now = 1;
4383 			/* XXX */
4384 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4385 			    0, M_DONTWAIT, 1, MT_DATA);
4386 			if (oper) {
4387 				struct sctp_paramhdr *ph;
4388 				uint32_t *ippp;
4389 
4390 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4391 				    sizeof(uint32_t);
4392 				ph = mtod(oper, struct sctp_paramhdr *);
4393 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4394 				ph->param_length = htons(SCTP_BUF_LEN(oper));
4395 				ippp = (uint32_t *) (ph + 1);
4396 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4397 			}
4398 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4399 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4400 			return;
4401 		}
4402 	}
4403 	/**********************/
4404 	/* 1) check the range */
4405 	/**********************/
4406 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4407 		/* acking something behind */
4408 		return;
4409 	}
4410 	sav_cum_ack = asoc->last_acked_seq;
4411 
4412 	/* update the Rwnd of the peer */
4413 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4414 	    TAILQ_EMPTY(&asoc->send_queue) &&
4415 	    (asoc->stream_queue_cnt == 0)) {
4416 		/* nothing left on send/sent and strmq */
4417 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4418 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4419 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4420 		}
4421 		asoc->peers_rwnd = a_rwnd;
4422 		if (asoc->sent_queue_retran_cnt) {
4423 			asoc->sent_queue_retran_cnt = 0;
4424 		}
4425 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4426 			/* SWS sender side engages */
4427 			asoc->peers_rwnd = 0;
4428 		}
4429 		/* stop any timers */
4430 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4431 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4432 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4433 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4434 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4435 					SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4436 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4437 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4438 				}
4439 			}
4440 			net->partial_bytes_acked = 0;
4441 			net->flight_size = 0;
4442 		}
4443 		asoc->total_flight = 0;
4444 		asoc->total_flight_count = 0;
4445 		return;
4446 	}
4447 	/*
4448 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4449 	 * things. The total byte count acked is tracked in netAckSz AND
4450 	 * netAck2 is used to track the total bytes acked that are un-
4451 	 * amibguious and were never retransmitted. We track these on a per
4452 	 * destination address basis.
4453 	 */
4454 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4455 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4456 			/* Drag along the window_tsn for cwr's */
4457 			net->cwr_window_tsn = cum_ack;
4458 		}
4459 		net->prev_cwnd = net->cwnd;
4460 		net->net_ack = 0;
4461 		net->net_ack2 = 0;
4462 
4463 		/*
4464 		 * CMT: Reset CUC and Fast recovery algo variables before
4465 		 * SACK processing
4466 		 */
4467 		net->new_pseudo_cumack = 0;
4468 		net->will_exit_fast_recovery = 0;
4469 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4470 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4471 		}
4472 	}
4473 	/* process the new consecutive TSN first */
4474 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4475 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4476 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4477 				accum_moved = 1;
4478 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4479 					/*
4480 					 * If it is less than ACKED, it is
4481 					 * now no-longer in flight. Higher
4482 					 * values may occur during marking
4483 					 */
4484 					if ((tp1->whoTo->dest_state &
4485 					    SCTP_ADDR_UNCONFIRMED) &&
4486 					    (tp1->snd_count < 2)) {
4487 						/*
4488 						 * If there was no retran
4489 						 * and the address is
4490 						 * un-confirmed and we sent
4491 						 * there and are now
4492 						 * sacked.. its confirmed,
4493 						 * mark it so.
4494 						 */
4495 						tp1->whoTo->dest_state &=
4496 						    ~SCTP_ADDR_UNCONFIRMED;
4497 					}
4498 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4499 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4500 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4501 							    tp1->whoTo->flight_size,
4502 							    tp1->book_size,
4503 							    (uintptr_t) tp1->whoTo,
4504 							    tp1->rec.data.TSN_seq);
4505 						}
4506 						sctp_flight_size_decrease(tp1);
4507 						sctp_total_flight_decrease(stcb, tp1);
4508 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4509 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4510 							    tp1);
4511 						}
4512 					}
4513 					tp1->whoTo->net_ack += tp1->send_size;
4514 
4515 					/* CMT SFR and DAC algos */
4516 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4517 					tp1->whoTo->saw_newack = 1;
4518 
4519 					if (tp1->snd_count < 2) {
4520 						/*
4521 						 * True non-retransmited
4522 						 * chunk
4523 						 */
4524 						tp1->whoTo->net_ack2 +=
4525 						    tp1->send_size;
4526 
4527 						/* update RTO too? */
4528 						if (tp1->do_rtt) {
4529 							tp1->whoTo->RTO =
4530 							    sctp_calculate_rto(stcb,
4531 							    asoc, tp1->whoTo,
4532 							    &tp1->sent_rcv_time,
4533 							    sctp_align_safe_nocopy,
4534 							    SCTP_DETERMINE_LL_OK);
4535 							tp1->do_rtt = 0;
4536 						}
4537 					}
4538 					/*
4539 					 * CMT: CUCv2 algorithm. From the
4540 					 * cumack'd TSNs, for each TSN being
4541 					 * acked for the first time, set the
4542 					 * following variables for the
4543 					 * corresp destination.
4544 					 * new_pseudo_cumack will trigger a
4545 					 * cwnd update.
4546 					 * find_(rtx_)pseudo_cumack will
4547 					 * trigger search for the next
4548 					 * expected (rtx-)pseudo-cumack.
4549 					 */
4550 					tp1->whoTo->new_pseudo_cumack = 1;
4551 					tp1->whoTo->find_pseudo_cumack = 1;
4552 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4553 
4554 
4555 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4556 						sctp_log_sack(asoc->last_acked_seq,
4557 						    cum_ack,
4558 						    tp1->rec.data.TSN_seq,
4559 						    0,
4560 						    0,
4561 						    SCTP_LOG_TSN_ACKED);
4562 					}
4563 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4564 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4565 					}
4566 				}
4567 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4568 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4569 #ifdef SCTP_AUDITING_ENABLED
4570 					sctp_audit_log(0xB3,
4571 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4572 #endif
4573 				}
4574 				if (tp1->rec.data.chunk_was_revoked) {
4575 					/* deflate the cwnd */
4576 					tp1->whoTo->cwnd -= tp1->book_size;
4577 					tp1->rec.data.chunk_was_revoked = 0;
4578 				}
4579 				tp1->sent = SCTP_DATAGRAM_ACKED;
4580 			}
4581 		} else {
4582 			break;
4583 		}
4584 	}
4585 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4586 	/* always set this up to cum-ack */
4587 	asoc->this_sack_highest_gap = last_tsn;
4588 
4589 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4590 
4591 		/*
4592 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4593 		 * to be greater than the cumack. Also reset saw_newack to 0
4594 		 * for all dests.
4595 		 */
4596 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4597 			net->saw_newack = 0;
4598 			net->this_sack_highest_newack = last_tsn;
4599 		}
4600 
4601 		/*
4602 		 * thisSackHighestGap will increase while handling NEW
4603 		 * segments this_sack_highest_newack will increase while
4604 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4605 		 * used for CMT DAC algo. saw_newack will also change.
4606 		 */
4607 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4608 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4609 		    num_seg, num_nr_seg, &ecn_seg_sums)) {
4610 			wake_him++;
4611 		}
4612 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4613 			/*
4614 			 * validate the biggest_tsn_acked in the gap acks if
4615 			 * strict adherence is wanted.
4616 			 */
4617 			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4618 				/*
4619 				 * peer is either confused or we are under
4620 				 * attack. We must abort.
4621 				 */
4622 				printf("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4623 				    biggest_tsn_acked,
4624 				    send_s);
4625 
4626 				goto hopeless_peer;
4627 			}
4628 		}
4629 	}
4630 	/*******************************************/
4631 	/* cancel ALL T3-send timer if accum moved */
4632 	/*******************************************/
4633 	if (asoc->sctp_cmt_on_off > 0) {
4634 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4635 			if (net->new_pseudo_cumack)
4636 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4637 				    stcb, net,
4638 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4639 
4640 		}
4641 	} else {
4642 		if (accum_moved) {
4643 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4644 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4645 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4646 			}
4647 		}
4648 	}
4649 	/********************************************/
4650 	/* drop the acked chunks from the sentqueue */
4651 	/********************************************/
4652 	asoc->last_acked_seq = cum_ack;
4653 
4654 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4655 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4656 			break;
4657 		}
4658 		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4659 			/* no more sent on list */
4660 			printf("Warning, tp1->sent == %d and its now acked?\n",
4661 			    tp1->sent);
4662 		}
4663 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4664 		if (tp1->pr_sctp_on) {
4665 			if (asoc->pr_sctp_cnt != 0)
4666 				asoc->pr_sctp_cnt--;
4667 		}
4668 		asoc->sent_queue_cnt--;
4669 		if (tp1->data) {
4670 			/* sa_ignore NO_NULL_CHK */
4671 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4672 			sctp_m_freem(tp1->data);
4673 			tp1->data = NULL;
4674 			if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4675 				asoc->sent_queue_cnt_removeable--;
4676 			}
4677 		}
4678 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4679 			sctp_log_sack(asoc->last_acked_seq,
4680 			    cum_ack,
4681 			    tp1->rec.data.TSN_seq,
4682 			    0,
4683 			    0,
4684 			    SCTP_LOG_FREE_SENT);
4685 		}
4686 		sctp_free_a_chunk(stcb, tp1);
4687 		wake_him++;
4688 	}
4689 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4690 #ifdef INVARIANTS
4691 		panic("Warning flight size is postive and should be 0");
4692 #else
4693 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4694 		    asoc->total_flight);
4695 #endif
4696 		asoc->total_flight = 0;
4697 	}
4698 	/* sa_ignore NO_NULL_CHK */
4699 	if ((wake_him) && (stcb->sctp_socket)) {
4700 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4701 		struct socket *so;
4702 
4703 #endif
4704 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4705 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4706 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
4707 		}
4708 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4709 		so = SCTP_INP_SO(stcb->sctp_ep);
4710 		atomic_add_int(&stcb->asoc.refcnt, 1);
4711 		SCTP_TCB_UNLOCK(stcb);
4712 		SCTP_SOCKET_LOCK(so, 1);
4713 		SCTP_TCB_LOCK(stcb);
4714 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4715 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4716 			/* assoc was freed while we were unlocked */
4717 			SCTP_SOCKET_UNLOCK(so, 1);
4718 			return;
4719 		}
4720 #endif
4721 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4722 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4723 		SCTP_SOCKET_UNLOCK(so, 1);
4724 #endif
4725 	} else {
4726 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4727 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
4728 		}
4729 	}
4730 
4731 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4732 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4733 			/* Setup so we will exit RFC2582 fast recovery */
4734 			will_exit_fast_recovery = 1;
4735 		}
4736 	}
4737 	/*
4738 	 * Check for revoked fragments:
4739 	 *
4740 	 * if Previous sack - Had no frags then we can't have any revoked if
4741 	 * Previous sack - Had frag's then - If we now have frags aka
4742 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4743 	 * some of them. else - The peer revoked all ACKED fragments, since
4744 	 * we had some before and now we have NONE.
4745 	 */
4746 
4747 	if (num_seg) {
4748 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4749 		asoc->saw_sack_with_frags = 1;
4750 	} else if (asoc->saw_sack_with_frags) {
4751 		int cnt_revoked = 0;
4752 
4753 		/* Peer revoked all dg's marked or acked */
4754 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4755 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4756 				tp1->sent = SCTP_DATAGRAM_SENT;
4757 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4758 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4759 					    tp1->whoTo->flight_size,
4760 					    tp1->book_size,
4761 					    (uintptr_t) tp1->whoTo,
4762 					    tp1->rec.data.TSN_seq);
4763 				}
4764 				sctp_flight_size_increase(tp1);
4765 				sctp_total_flight_increase(stcb, tp1);
4766 				tp1->rec.data.chunk_was_revoked = 1;
4767 				/*
4768 				 * To ensure that this increase in
4769 				 * flightsize, which is artificial, does not
4770 				 * throttle the sender, we also increase the
4771 				 * cwnd artificially.
4772 				 */
4773 				tp1->whoTo->cwnd += tp1->book_size;
4774 				cnt_revoked++;
4775 			}
4776 		}
4777 		if (cnt_revoked) {
4778 			reneged_all = 1;
4779 		}
4780 		asoc->saw_sack_with_frags = 0;
4781 	}
4782 	if (num_nr_seg > 0)
4783 		asoc->saw_sack_with_nr_frags = 1;
4784 	else
4785 		asoc->saw_sack_with_nr_frags = 0;
4786 
4787 	/* JRS - Use the congestion control given in the CC module */
4788 	if (ecne_seen == 0)
4789 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4790 
4791 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4792 		/* nothing left in-flight */
4793 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4794 			/* stop all timers */
4795 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4796 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4797 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4798 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4799 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4800 				}
4801 			}
4802 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4803 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4804 			net->flight_size = 0;
4805 			net->partial_bytes_acked = 0;
4806 		}
4807 		asoc->total_flight = 0;
4808 		asoc->total_flight_count = 0;
4809 	}
4810 	/**********************************/
4811 	/* Now what about shutdown issues */
4812 	/**********************************/
4813 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4814 		/* nothing left on sendqueue.. consider done */
4815 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4816 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4817 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4818 		}
4819 		asoc->peers_rwnd = a_rwnd;
4820 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4821 			/* SWS sender side engages */
4822 			asoc->peers_rwnd = 0;
4823 		}
4824 		/* clean up */
4825 		if ((asoc->stream_queue_cnt == 1) &&
4826 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4827 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4828 		    (asoc->locked_on_sending)
4829 		    ) {
4830 			struct sctp_stream_queue_pending *sp;
4831 
4832 			/*
4833 			 * I may be in a state where we got all across.. but
4834 			 * cannot write more due to a shutdown... we abort
4835 			 * since the user did not indicate EOR in this case.
4836 			 */
4837 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4838 			    sctp_streamhead);
4839 			if ((sp) && (sp->length == 0)) {
4840 				asoc->locked_on_sending = NULL;
4841 				if (sp->msg_is_complete) {
4842 					asoc->stream_queue_cnt--;
4843 				} else {
4844 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4845 					asoc->stream_queue_cnt--;
4846 				}
4847 			}
4848 		}
4849 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4850 		    (asoc->stream_queue_cnt == 0)) {
4851 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4852 				/* Need to abort here */
4853 				struct mbuf *oper;
4854 
4855 		abort_out_now:
4856 				*abort_now = 1;
4857 				/* XXX */
4858 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4859 				    0, M_DONTWAIT, 1, MT_DATA);
4860 				if (oper) {
4861 					struct sctp_paramhdr *ph;
4862 					uint32_t *ippp;
4863 
4864 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4865 					    sizeof(uint32_t);
4866 					ph = mtod(oper, struct sctp_paramhdr *);
4867 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4868 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4869 					ippp = (uint32_t *) (ph + 1);
4870 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4871 				}
4872 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4873 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4874 				return;
4875 			} else {
4876 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4877 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4878 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4879 				}
4880 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4881 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4882 				sctp_stop_timers_for_shutdown(stcb);
4883 				sctp_send_shutdown(stcb,
4884 				    stcb->asoc.primary_destination);
4885 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4886 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4887 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4888 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4889 			}
4890 			return;
4891 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4892 		    (asoc->stream_queue_cnt == 0)) {
4893 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4894 				goto abort_out_now;
4895 			}
4896 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4897 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4898 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4899 			sctp_send_shutdown_ack(stcb,
4900 			    stcb->asoc.primary_destination);
4901 			sctp_stop_timers_for_shutdown(stcb);
4902 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4903 			    stcb->sctp_ep, stcb, asoc->primary_destination);
4904 			return;
4905 		}
4906 	}
4907 	/*
4908 	 * Now here we are going to recycle net_ack for a different use...
4909 	 * HEADS UP.
4910 	 */
4911 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4912 		net->net_ack = 0;
4913 	}
4914 
4915 	/*
4916 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4917 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4918 	 * automatically ensure that.
4919 	 */
4920 	if ((asoc->sctp_cmt_on_off > 0) &&
4921 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4922 	    (cmt_dac_flag == 0)) {
4923 		this_sack_lowest_newack = cum_ack;
4924 	}
4925 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4926 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4927 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4928 	}
4929 	/* JRS - Use the congestion control given in the CC module */
4930 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4931 
4932 	/* Now are we exiting loss recovery ? */
4933 	if (will_exit_fast_recovery) {
4934 		/* Ok, we must exit fast recovery */
4935 		asoc->fast_retran_loss_recovery = 0;
4936 	}
4937 	if ((asoc->sat_t3_loss_recovery) &&
4938 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4939 		/* end satellite t3 loss recovery */
4940 		asoc->sat_t3_loss_recovery = 0;
4941 	}
4942 	/*
4943 	 * CMT Fast recovery
4944 	 */
4945 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4946 		if (net->will_exit_fast_recovery) {
4947 			/* Ok, we must exit fast recovery */
4948 			net->fast_retran_loss_recovery = 0;
4949 		}
4950 	}
4951 
4952 	/* Adjust and set the new rwnd value */
4953 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4954 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4955 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4956 	}
4957 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4958 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4959 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4960 		/* SWS sender side engages */
4961 		asoc->peers_rwnd = 0;
4962 	}
4963 	if (asoc->peers_rwnd > old_rwnd) {
4964 		win_probe_recovery = 1;
4965 	}
4966 	/*
4967 	 * Now we must setup so we have a timer up for anyone with
4968 	 * outstanding data.
4969 	 */
4970 	done_once = 0;
4971 again:
4972 	j = 0;
4973 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4974 		if (win_probe_recovery && (net->window_probe)) {
4975 			win_probe_recovered = 1;
4976 			/*-
4977 			 * Find first chunk that was used with
4978 			 * window probe and clear the event. Put
4979 			 * it back into the send queue as if has
4980 			 * not been sent.
4981 			 */
4982 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4983 				if (tp1->window_probe) {
4984 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
4985 					break;
4986 				}
4987 			}
4988 		}
4989 		if (net->flight_size) {
4990 			j++;
4991 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4992 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4993 				    stcb->sctp_ep, stcb, net);
4994 			}
4995 			if (net->window_probe) {
4996 				net->window_probe = 0;
4997 			}
4998 		} else {
4999 			if (net->window_probe) {
5000 				/*
5001 				 * In window probes we must assure a timer
5002 				 * is still running there
5003 				 */
5004 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5005 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5006 					    stcb->sctp_ep, stcb, net);
5007 
5008 				}
5009 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5010 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5011 				    stcb, net,
5012 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5013 			}
5014 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5015 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5016 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5017 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5018 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5019 				}
5020 			}
5021 		}
5022 	}
5023 	if ((j == 0) &&
5024 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5025 	    (asoc->sent_queue_retran_cnt == 0) &&
5026 	    (win_probe_recovered == 0) &&
5027 	    (done_once == 0)) {
5028 		/*
5029 		 * huh, this should not happen unless all packets are
5030 		 * PR-SCTP and marked to skip of course.
5031 		 */
5032 		if (sctp_fs_audit(asoc)) {
5033 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5034 				net->flight_size = 0;
5035 			}
5036 			asoc->total_flight = 0;
5037 			asoc->total_flight_count = 0;
5038 			asoc->sent_queue_retran_cnt = 0;
5039 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5040 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5041 					sctp_flight_size_increase(tp1);
5042 					sctp_total_flight_increase(stcb, tp1);
5043 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5044 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5045 				}
5046 			}
5047 		}
5048 		done_once = 1;
5049 		goto again;
5050 	}
5051 	/*********************************************/
5052 	/* Here we perform PR-SCTP procedures        */
5053 	/* (section 4.2)                             */
5054 	/*********************************************/
5055 	/* C1. update advancedPeerAckPoint */
5056 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5057 		asoc->advanced_peer_ack_point = cum_ack;
5058 	}
5059 	/* C2. try to further move advancedPeerAckPoint ahead */
5060 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5061 		struct sctp_tmit_chunk *lchk;
5062 		uint32_t old_adv_peer_ack_point;
5063 
5064 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5065 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5066 		/* C3. See if we need to send a Fwd-TSN */
5067 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5068 			/*
5069 			 * ISSUE with ECN, see FWD-TSN processing.
5070 			 */
5071 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5072 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5073 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5074 				    old_adv_peer_ack_point);
5075 			}
5076 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5077 				send_forward_tsn(stcb, asoc);
5078 			} else if (lchk) {
5079 				/* try to FR fwd-tsn's that get lost too */
5080 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5081 					send_forward_tsn(stcb, asoc);
5082 				}
5083 			}
5084 		}
5085 		if (lchk) {
5086 			/* Assure a timer is up */
5087 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5088 			    stcb->sctp_ep, stcb, lchk->whoTo);
5089 		}
5090 	}
5091 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5092 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5093 		    a_rwnd,
5094 		    stcb->asoc.peers_rwnd,
5095 		    stcb->asoc.total_flight,
5096 		    stcb->asoc.total_output_queue_size);
5097 	}
5098 }
5099 
5100 void
5101 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5102     struct sctp_nets *netp, int *abort_flag)
5103 {
5104 	/* Copy cum-ack */
5105 	uint32_t cum_ack, a_rwnd;
5106 
5107 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5108 	/* Arrange so a_rwnd does NOT change */
5109 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5110 
5111 	/* Now call the express sack handling */
5112 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5113 }
5114 
5115 static void
5116 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5117     struct sctp_stream_in *strmin)
5118 {
5119 	struct sctp_queued_to_read *ctl, *nctl;
5120 	struct sctp_association *asoc;
5121 	uint16_t tt;
5122 
5123 	asoc = &stcb->asoc;
5124 	tt = strmin->last_sequence_delivered;
5125 	/*
5126 	 * First deliver anything prior to and including the stream no that
5127 	 * came in
5128 	 */
5129 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5130 		if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
5131 			/* this is deliverable now */
5132 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5133 			/* subtract pending on streams */
5134 			asoc->size_on_all_streams -= ctl->length;
5135 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5136 			/* deliver it to at least the delivery-q */
5137 			if (stcb->sctp_socket) {
5138 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5139 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5140 				    ctl,
5141 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5142 			}
5143 		} else {
5144 			/* no more delivery now. */
5145 			break;
5146 		}
5147 	}
5148 	/*
5149 	 * now we must deliver things in queue the normal way  if any are
5150 	 * now ready.
5151 	 */
5152 	tt = strmin->last_sequence_delivered + 1;
5153 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5154 		if (tt == ctl->sinfo_ssn) {
5155 			/* this is deliverable now */
5156 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5157 			/* subtract pending on streams */
5158 			asoc->size_on_all_streams -= ctl->length;
5159 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5160 			/* deliver it to at least the delivery-q */
5161 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5162 			if (stcb->sctp_socket) {
5163 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5164 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5165 				    ctl,
5166 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5167 
5168 			}
5169 			tt = strmin->last_sequence_delivered + 1;
5170 		} else {
5171 			break;
5172 		}
5173 	}
5174 }
5175 
5176 static void
5177 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5178     struct sctp_association *asoc,
5179     uint16_t stream, uint16_t seq)
5180 {
5181 	struct sctp_tmit_chunk *chk, *nchk;
5182 
5183 	/* For each one on here see if we need to toss it */
5184 	/*
5185 	 * For now large messages held on the reasmqueue that are complete
5186 	 * will be tossed too. We could in theory do more work to spin
5187 	 * through and stop after dumping one msg aka seeing the start of a
5188 	 * new msg at the head, and call the delivery function... to see if
5189 	 * it can be delivered... But for now we just dump everything on the
5190 	 * queue.
5191 	 */
5192 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5193 		/*
5194 		 * Do not toss it if on a different stream or marked for
5195 		 * unordered delivery in which case the stream sequence
5196 		 * number has no meaning.
5197 		 */
5198 		if ((chk->rec.data.stream_number != stream) ||
5199 		    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5200 			continue;
5201 		}
5202 		if (chk->rec.data.stream_seq == seq) {
5203 			/* It needs to be tossed */
5204 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5205 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5206 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5207 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5208 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5209 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5210 			}
5211 			asoc->size_on_reasm_queue -= chk->send_size;
5212 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5213 
5214 			/* Clear up any stream problem */
5215 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5216 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5217 				/*
5218 				 * We must dump forward this streams
5219 				 * sequence number if the chunk is not
5220 				 * unordered that is being skipped. There is
5221 				 * a chance that if the peer does not
5222 				 * include the last fragment in its FWD-TSN
5223 				 * we WILL have a problem here since you
5224 				 * would have a partial chunk in queue that
5225 				 * may not be deliverable. Also if a Partial
5226 				 * delivery API as started the user may get
5227 				 * a partial chunk. The next read returning
5228 				 * a new chunk... really ugly but I see no
5229 				 * way around it! Maybe a notify??
5230 				 */
5231 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5232 			}
5233 			if (chk->data) {
5234 				sctp_m_freem(chk->data);
5235 				chk->data = NULL;
5236 			}
5237 			sctp_free_a_chunk(stcb, chk);
5238 		} else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5239 			/*
5240 			 * If the stream_seq is > than the purging one, we
5241 			 * are done
5242 			 */
5243 			break;
5244 		}
5245 	}
5246 }
5247 
5248 
5249 void
5250 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5251     struct sctp_forward_tsn_chunk *fwd,
5252     int *abort_flag, struct mbuf *m, int offset)
5253 {
5254 	/* The pr-sctp fwd tsn */
5255 	/*
5256 	 * here we will perform all the data receiver side steps for
5257 	 * processing FwdTSN, as required in by pr-sctp draft:
5258 	 *
5259 	 * Assume we get FwdTSN(x):
5260 	 *
5261 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5262 	 * others we have 3) examine and update re-ordering queue on
5263 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5264 	 * report where we are.
5265 	 */
5266 	struct sctp_association *asoc;
5267 	uint32_t new_cum_tsn, gap;
5268 	unsigned int i, fwd_sz, cumack_set_flag, m_size;
5269 	uint32_t str_seq;
5270 	struct sctp_stream_in *strm;
5271 	struct sctp_tmit_chunk *chk, *nchk;
5272 	struct sctp_queued_to_read *ctl, *sv;
5273 
5274 	cumack_set_flag = 0;
5275 	asoc = &stcb->asoc;
5276 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5277 		SCTPDBG(SCTP_DEBUG_INDATA1,
5278 		    "Bad size too small/big fwd-tsn\n");
5279 		return;
5280 	}
5281 	m_size = (stcb->asoc.mapping_array_size << 3);
5282 	/*************************************************************/
5283 	/* 1. Here we update local cumTSN and shift the bitmap array */
5284 	/*************************************************************/
5285 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5286 
5287 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5288 		/* Already got there ... */
5289 		return;
5290 	}
5291 	/*
5292 	 * now we know the new TSN is more advanced, let's find the actual
5293 	 * gap
5294 	 */
5295 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5296 	asoc->cumulative_tsn = new_cum_tsn;
5297 	if (gap >= m_size) {
5298 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5299 			struct mbuf *oper;
5300 
5301 			/*
5302 			 * out of range (of single byte chunks in the rwnd I
5303 			 * give out). This must be an attacker.
5304 			 */
5305 			*abort_flag = 1;
5306 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5307 			    0, M_DONTWAIT, 1, MT_DATA);
5308 			if (oper) {
5309 				struct sctp_paramhdr *ph;
5310 				uint32_t *ippp;
5311 
5312 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5313 				    (sizeof(uint32_t) * 3);
5314 				ph = mtod(oper, struct sctp_paramhdr *);
5315 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5316 				ph->param_length = htons(SCTP_BUF_LEN(oper));
5317 				ippp = (uint32_t *) (ph + 1);
5318 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5319 				ippp++;
5320 				*ippp = asoc->highest_tsn_inside_map;
5321 				ippp++;
5322 				*ippp = new_cum_tsn;
5323 			}
5324 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5325 			sctp_abort_an_association(stcb->sctp_ep, stcb,
5326 			    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
5327 			return;
5328 		}
5329 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5330 
5331 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5332 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5333 		asoc->highest_tsn_inside_map = new_cum_tsn;
5334 
5335 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5336 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5337 
5338 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5339 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5340 		}
5341 	} else {
5342 		SCTP_TCB_LOCK_ASSERT(stcb);
5343 		for (i = 0; i <= gap; i++) {
5344 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5345 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5346 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5347 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5348 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5349 				}
5350 			}
5351 		}
5352 	}
5353 	/*************************************************************/
5354 	/* 2. Clear up re-assembly queue                             */
5355 	/*************************************************************/
5356 	/*
5357 	 * First service it if pd-api is up, just in case we can progress it
5358 	 * forward
5359 	 */
5360 	if (asoc->fragmented_delivery_inprogress) {
5361 		sctp_service_reassembly(stcb, asoc);
5362 	}
5363 	/* For each one on here see if we need to toss it */
5364 	/*
5365 	 * For now large messages held on the reasmqueue that are complete
5366 	 * will be tossed too. We could in theory do more work to spin
5367 	 * through and stop after dumping one msg aka seeing the start of a
5368 	 * new msg at the head, and call the delivery function... to see if
5369 	 * it can be delivered... But for now we just dump everything on the
5370 	 * queue.
5371 	 */
5372 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5373 		if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5374 			/* It needs to be tossed */
5375 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5376 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5377 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5378 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5379 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5380 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5381 			}
5382 			asoc->size_on_reasm_queue -= chk->send_size;
5383 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5384 
5385 			/* Clear up any stream problem */
5386 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5387 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5388 				/*
5389 				 * We must dump forward this streams
5390 				 * sequence number if the chunk is not
5391 				 * unordered that is being skipped. There is
5392 				 * a chance that if the peer does not
5393 				 * include the last fragment in its FWD-TSN
5394 				 * we WILL have a problem here since you
5395 				 * would have a partial chunk in queue that
5396 				 * may not be deliverable. Also if a Partial
5397 				 * delivery API as started the user may get
5398 				 * a partial chunk. The next read returning
5399 				 * a new chunk... really ugly but I see no
5400 				 * way around it! Maybe a notify??
5401 				 */
5402 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5403 			}
5404 			if (chk->data) {
5405 				sctp_m_freem(chk->data);
5406 				chk->data = NULL;
5407 			}
5408 			sctp_free_a_chunk(stcb, chk);
5409 		} else {
5410 			/*
5411 			 * Ok we have gone beyond the end of the fwd-tsn's
5412 			 * mark.
5413 			 */
5414 			break;
5415 		}
5416 	}
5417 	/*******************************************************/
5418 	/* 3. Update the PR-stream re-ordering queues and fix  */
5419 	/* delivery issues as needed.                       */
5420 	/*******************************************************/
5421 	fwd_sz -= sizeof(*fwd);
5422 	if (m && fwd_sz) {
5423 		/* New method. */
5424 		unsigned int num_str;
5425 		struct sctp_strseq *stseq, strseqbuf;
5426 
5427 		offset += sizeof(*fwd);
5428 
5429 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5430 		num_str = fwd_sz / sizeof(struct sctp_strseq);
5431 		for (i = 0; i < num_str; i++) {
5432 			uint16_t st;
5433 
5434 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5435 			    sizeof(struct sctp_strseq),
5436 			    (uint8_t *) & strseqbuf);
5437 			offset += sizeof(struct sctp_strseq);
5438 			if (stseq == NULL) {
5439 				break;
5440 			}
5441 			/* Convert */
5442 			st = ntohs(stseq->stream);
5443 			stseq->stream = st;
5444 			st = ntohs(stseq->sequence);
5445 			stseq->sequence = st;
5446 
5447 			/* now process */
5448 
5449 			/*
5450 			 * Ok we now look for the stream/seq on the read
5451 			 * queue where its not all delivered. If we find it
5452 			 * we transmute the read entry into a PDI_ABORTED.
5453 			 */
5454 			if (stseq->stream >= asoc->streamincnt) {
5455 				/* screwed up streams, stop!  */
5456 				break;
5457 			}
5458 			if ((asoc->str_of_pdapi == stseq->stream) &&
5459 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5460 				/*
5461 				 * If this is the one we were partially
5462 				 * delivering now then we no longer are.
5463 				 * Note this will change with the reassembly
5464 				 * re-write.
5465 				 */
5466 				asoc->fragmented_delivery_inprogress = 0;
5467 			}
5468 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5469 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5470 				if ((ctl->sinfo_stream == stseq->stream) &&
5471 				    (ctl->sinfo_ssn == stseq->sequence)) {
5472 					str_seq = (stseq->stream << 16) | stseq->sequence;
5473 					ctl->end_added = 1;
5474 					ctl->pdapi_aborted = 1;
5475 					sv = stcb->asoc.control_pdapi;
5476 					stcb->asoc.control_pdapi = ctl;
5477 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5478 					    stcb,
5479 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5480 					    (void *)&str_seq,
5481 					    SCTP_SO_NOT_LOCKED);
5482 					stcb->asoc.control_pdapi = sv;
5483 					break;
5484 				} else if ((ctl->sinfo_stream == stseq->stream) &&
5485 				    SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5486 					/* We are past our victim SSN */
5487 					break;
5488 				}
5489 			}
5490 			strm = &asoc->strmin[stseq->stream];
5491 			if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5492 				/* Update the sequence number */
5493 				strm->last_sequence_delivered = stseq->sequence;
5494 			}
5495 			/* now kick the stream the new way */
5496 			/* sa_ignore NO_NULL_CHK */
5497 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5498 		}
5499 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5500 	}
5501 	/*
5502 	 * Now slide thing forward.
5503 	 */
5504 	sctp_slide_mapping_arrays(stcb);
5505 
5506 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5507 		/* now lets kick out and check for more fragmented delivery */
5508 		/* sa_ignore NO_NULL_CHK */
5509 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5510 	}
5511 }
5512