xref: /freebsd/sys/netinet/sctp_indata.c (revision 884a2a699669ec61e2366e3e358342dbc94be24a)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *   this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *   the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $	 */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctputil.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_input.h>
46 #include <netinet/sctp_indata.h>
47 #include <netinet/sctp_uio.h>
48 #include <netinet/sctp_timer.h>
49 
50 
51 /*
52  * NOTES: On the outbound side of things I need to check the sack timer to
53  * see if I should generate a sack into the chunk queue (if I have data to
54  * send that is and will be sending it .. for bundling.
55  *
56  * The callback in sctp_usrreq.c will get called when the socket is read from.
57  * This will cause sctp_service_queues() to get called on the top entry in
58  * the list.
59  */
60 
61 void
62 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
63 {
64 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65 }
66 
67 /* Calculate what the rwnd would be */
68 uint32_t
69 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
70 {
71 	uint32_t calc = 0;
72 
73 	/*
74 	 * This is really set wrong with respect to a 1-2-m socket. Since
75 	 * the sb_cc is the count that everyone as put up. When we re-write
76 	 * sctp_soreceive then we will fix this so that ONLY this
77 	 * associations data is taken into account.
78 	 */
79 	if (stcb->sctp_socket == NULL)
80 		return (calc);
81 
82 	if (stcb->asoc.sb_cc == 0 &&
83 	    asoc->size_on_reasm_queue == 0 &&
84 	    asoc->size_on_all_streams == 0) {
85 		/* Full rwnd granted */
86 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87 		return (calc);
88 	}
89 	/* get actual space */
90 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91 
92 	/*
93 	 * take out what has NOT been put on socket queue and we yet hold
94 	 * for putting up.
95 	 */
96 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
97 	    asoc->cnt_on_reasm_queue * MSIZE));
98 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
99 	    asoc->cnt_on_all_streams * MSIZE));
100 
101 	if (calc == 0) {
102 		/* out of space */
103 		return (calc);
104 	}
105 	/* what is the overhead of all these rwnd's */
106 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
107 	/*
108 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
109 	 * even it is 0. SWS engaged
110 	 */
111 	if (calc < stcb->asoc.my_rwnd_control_len) {
112 		calc = 1;
113 	}
114 	return (calc);
115 }
116 
117 
118 
119 /*
120  * Build out our readq entry based on the incoming packet.
121  */
122 struct sctp_queued_to_read *
123 sctp_build_readq_entry(struct sctp_tcb *stcb,
124     struct sctp_nets *net,
125     uint32_t tsn, uint32_t ppid,
126     uint32_t context, uint16_t stream_no,
127     uint16_t stream_seq, uint8_t flags,
128     struct mbuf *dm)
129 {
130 	struct sctp_queued_to_read *read_queue_e = NULL;
131 
132 	sctp_alloc_a_readq(stcb, read_queue_e);
133 	if (read_queue_e == NULL) {
134 		goto failed_build;
135 	}
136 	read_queue_e->sinfo_stream = stream_no;
137 	read_queue_e->sinfo_ssn = stream_seq;
138 	read_queue_e->sinfo_flags = (flags << 8);
139 	read_queue_e->sinfo_ppid = ppid;
140 	read_queue_e->sinfo_context = stcb->asoc.context;
141 	read_queue_e->sinfo_timetolive = 0;
142 	read_queue_e->sinfo_tsn = tsn;
143 	read_queue_e->sinfo_cumtsn = tsn;
144 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
145 	read_queue_e->whoFrom = net;
146 	read_queue_e->length = 0;
147 	atomic_add_int(&net->ref_count, 1);
148 	read_queue_e->data = dm;
149 	read_queue_e->spec_flags = 0;
150 	read_queue_e->tail_mbuf = NULL;
151 	read_queue_e->aux_data = NULL;
152 	read_queue_e->stcb = stcb;
153 	read_queue_e->port_from = stcb->rport;
154 	read_queue_e->do_not_ref_stcb = 0;
155 	read_queue_e->end_added = 0;
156 	read_queue_e->some_taken = 0;
157 	read_queue_e->pdapi_aborted = 0;
158 failed_build:
159 	return (read_queue_e);
160 }
161 
162 
163 /*
164  * Build out our readq entry based on the incoming packet.
165  */
166 static struct sctp_queued_to_read *
167 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
168     struct sctp_tmit_chunk *chk)
169 {
170 	struct sctp_queued_to_read *read_queue_e = NULL;
171 
172 	sctp_alloc_a_readq(stcb, read_queue_e);
173 	if (read_queue_e == NULL) {
174 		goto failed_build;
175 	}
176 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
177 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
178 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
179 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
180 	read_queue_e->sinfo_context = stcb->asoc.context;
181 	read_queue_e->sinfo_timetolive = 0;
182 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
183 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
184 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
185 	read_queue_e->whoFrom = chk->whoTo;
186 	read_queue_e->aux_data = NULL;
187 	read_queue_e->length = 0;
188 	atomic_add_int(&chk->whoTo->ref_count, 1);
189 	read_queue_e->data = chk->data;
190 	read_queue_e->tail_mbuf = NULL;
191 	read_queue_e->stcb = stcb;
192 	read_queue_e->port_from = stcb->rport;
193 	read_queue_e->spec_flags = 0;
194 	read_queue_e->do_not_ref_stcb = 0;
195 	read_queue_e->end_added = 0;
196 	read_queue_e->some_taken = 0;
197 	read_queue_e->pdapi_aborted = 0;
198 failed_build:
199 	return (read_queue_e);
200 }
201 
202 
203 struct mbuf *
204 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
205     struct sctp_sndrcvinfo *sinfo)
206 {
207 	struct sctp_sndrcvinfo *outinfo;
208 	struct cmsghdr *cmh;
209 	struct mbuf *ret;
210 	int len;
211 	int use_extended = 0;
212 
213 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
214 		/* user does not want the sndrcv ctl */
215 		return (NULL);
216 	}
217 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
218 		use_extended = 1;
219 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
220 	} else {
221 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
222 	}
223 
224 
225 	ret = sctp_get_mbuf_for_msg(len,
226 	    0, M_DONTWAIT, 1, MT_DATA);
227 
228 	if (ret == NULL) {
229 		/* No space */
230 		return (ret);
231 	}
232 	/* We need a CMSG header followed by the struct  */
233 	cmh = mtod(ret, struct cmsghdr *);
234 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
235 	cmh->cmsg_level = IPPROTO_SCTP;
236 	if (use_extended) {
237 		cmh->cmsg_type = SCTP_EXTRCV;
238 		cmh->cmsg_len = len;
239 		memcpy(outinfo, sinfo, len);
240 	} else {
241 		cmh->cmsg_type = SCTP_SNDRCV;
242 		cmh->cmsg_len = len;
243 		*outinfo = *sinfo;
244 	}
245 	SCTP_BUF_LEN(ret) = cmh->cmsg_len;
246 	return (ret);
247 }
248 
249 
250 static void
251 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
252 {
253 	uint32_t gap, i, cumackp1;
254 	int fnd = 0;
255 
256 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
257 		return;
258 	}
259 	cumackp1 = asoc->cumulative_tsn + 1;
260 	if (SCTP_TSN_GT(cumackp1, tsn)) {
261 		/*
262 		 * this tsn is behind the cum ack and thus we don't need to
263 		 * worry about it being moved from one to the other.
264 		 */
265 		return;
266 	}
267 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
268 	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
269 		printf("gap:%x tsn:%x\n", gap, tsn);
270 		sctp_print_mapping_array(asoc);
271 #ifdef INVARIANTS
272 		panic("Things are really messed up now!!");
273 #endif
274 	}
275 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
276 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
277 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
278 		asoc->highest_tsn_inside_nr_map = tsn;
279 	}
280 	if (tsn == asoc->highest_tsn_inside_map) {
281 		/* We must back down to see what the new highest is */
282 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
283 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
284 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
285 				asoc->highest_tsn_inside_map = i;
286 				fnd = 1;
287 				break;
288 			}
289 		}
290 		if (!fnd) {
291 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
292 		}
293 	}
294 }
295 
296 
297 /*
298  * We are delivering currently from the reassembly queue. We must continue to
299  * deliver until we either: 1) run out of space. 2) run out of sequential
300  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
301  */
302 static void
303 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
304 {
305 	struct sctp_tmit_chunk *chk, *nchk;
306 	uint16_t nxt_todel;
307 	uint16_t stream_no;
308 	int end = 0;
309 	int cntDel;
310 	struct sctp_queued_to_read *control, *ctl, *nctl;
311 
312 	if (stcb == NULL)
313 		return;
314 
315 	cntDel = stream_no = 0;
316 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
317 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
318 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
319 		/* socket above is long gone or going.. */
320 abandon:
321 		asoc->fragmented_delivery_inprogress = 0;
322 		TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
323 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
324 			asoc->size_on_reasm_queue -= chk->send_size;
325 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
326 			/*
327 			 * Lose the data pointer, since its in the socket
328 			 * buffer
329 			 */
330 			if (chk->data) {
331 				sctp_m_freem(chk->data);
332 				chk->data = NULL;
333 			}
334 			/* Now free the address and data */
335 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
336 			/* sa_ignore FREED_MEMORY */
337 		}
338 		return;
339 	}
340 	SCTP_TCB_LOCK_ASSERT(stcb);
341 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
342 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
343 			/* Can't deliver more :< */
344 			return;
345 		}
346 		stream_no = chk->rec.data.stream_number;
347 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
348 		if (nxt_todel != chk->rec.data.stream_seq &&
349 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
350 			/*
351 			 * Not the next sequence to deliver in its stream OR
352 			 * unordered
353 			 */
354 			return;
355 		}
356 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
357 
358 			control = sctp_build_readq_entry_chk(stcb, chk);
359 			if (control == NULL) {
360 				/* out of memory? */
361 				return;
362 			}
363 			/* save it off for our future deliveries */
364 			stcb->asoc.control_pdapi = control;
365 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
366 				end = 1;
367 			else
368 				end = 0;
369 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
370 			sctp_add_to_readq(stcb->sctp_ep,
371 			    stcb, control, &stcb->sctp_socket->so_rcv, end,
372 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
373 			cntDel++;
374 		} else {
375 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
376 				end = 1;
377 			else
378 				end = 0;
379 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
380 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
381 			    stcb->asoc.control_pdapi,
382 			    chk->data, end, chk->rec.data.TSN_seq,
383 			    &stcb->sctp_socket->so_rcv)) {
384 				/*
385 				 * something is very wrong, either
386 				 * control_pdapi is NULL, or the tail_mbuf
387 				 * is corrupt, or there is a EOM already on
388 				 * the mbuf chain.
389 				 */
390 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
391 					goto abandon;
392 				} else {
393 #ifdef INVARIANTS
394 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
395 						panic("This should not happen control_pdapi NULL?");
396 					}
397 					/* if we did not panic, it was a EOM */
398 					panic("Bad chunking ??");
399 #else
400 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
401 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
402 					}
403 					SCTP_PRINTF("Bad chunking ??\n");
404 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
405 
406 #endif
407 					goto abandon;
408 				}
409 			}
410 			cntDel++;
411 		}
412 		/* pull it we did it */
413 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
414 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
415 			asoc->fragmented_delivery_inprogress = 0;
416 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
417 				asoc->strmin[stream_no].last_sequence_delivered++;
418 			}
419 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
420 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
421 			}
422 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
423 			/*
424 			 * turn the flag back on since we just  delivered
425 			 * yet another one.
426 			 */
427 			asoc->fragmented_delivery_inprogress = 1;
428 		}
429 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
430 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
431 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
432 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
433 
434 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
435 		asoc->size_on_reasm_queue -= chk->send_size;
436 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
437 		/* free up the chk */
438 		chk->data = NULL;
439 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
440 
441 		if (asoc->fragmented_delivery_inprogress == 0) {
442 			/*
443 			 * Now lets see if we can deliver the next one on
444 			 * the stream
445 			 */
446 			struct sctp_stream_in *strm;
447 
448 			strm = &asoc->strmin[stream_no];
449 			nxt_todel = strm->last_sequence_delivered + 1;
450 			TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
451 				/* Deliver more if we can. */
452 				if (nxt_todel == ctl->sinfo_ssn) {
453 					TAILQ_REMOVE(&strm->inqueue, ctl, next);
454 					asoc->size_on_all_streams -= ctl->length;
455 					sctp_ucount_decr(asoc->cnt_on_all_streams);
456 					strm->last_sequence_delivered++;
457 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
458 					sctp_add_to_readq(stcb->sctp_ep, stcb,
459 					    ctl,
460 					    &stcb->sctp_socket->so_rcv, 1,
461 					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
462 				} else {
463 					break;
464 				}
465 				nxt_todel = strm->last_sequence_delivered + 1;
466 			}
467 			break;
468 		}
469 	}
470 }
471 
472 /*
473  * Queue the chunk either right into the socket buffer if it is the next one
474  * to go OR put it in the correct place in the delivery queue.  If we do
475  * append to the so_buf, keep doing so until we are out of order. One big
476  * question still remains, what to do when the socket buffer is FULL??
477  */
478 static void
479 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
480     struct sctp_queued_to_read *control, int *abort_flag)
481 {
482 	/*
483 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
484 	 * all the data in one stream this could happen quite rapidly. One
485 	 * could use the TSN to keep track of things, but this scheme breaks
486 	 * down in the other type of stream useage that could occur. Send a
487 	 * single msg to stream 0, send 4Billion messages to stream 1, now
488 	 * send a message to stream 0. You have a situation where the TSN
489 	 * has wrapped but not in the stream. Is this worth worrying about
490 	 * or should we just change our queue sort at the bottom to be by
491 	 * TSN.
492 	 *
493 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
494 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
495 	 * assignment this could happen... and I don't see how this would be
496 	 * a violation. So for now I am undecided an will leave the sort by
497 	 * SSN alone. Maybe a hybred approach is the answer
498 	 *
499 	 */
500 	struct sctp_stream_in *strm;
501 	struct sctp_queued_to_read *at;
502 	int queue_needed;
503 	uint16_t nxt_todel;
504 	struct mbuf *oper;
505 
506 	queue_needed = 1;
507 	asoc->size_on_all_streams += control->length;
508 	sctp_ucount_incr(asoc->cnt_on_all_streams);
509 	strm = &asoc->strmin[control->sinfo_stream];
510 	nxt_todel = strm->last_sequence_delivered + 1;
511 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
512 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
513 	}
514 	SCTPDBG(SCTP_DEBUG_INDATA1,
515 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
516 	    (uint32_t) control->sinfo_stream,
517 	    (uint32_t) strm->last_sequence_delivered,
518 	    (uint32_t) nxt_todel);
519 	if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
520 		/* The incoming sseq is behind where we last delivered? */
521 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
522 		    control->sinfo_ssn, strm->last_sequence_delivered);
523 protocol_error:
524 		/*
525 		 * throw it in the stream so it gets cleaned up in
526 		 * association destruction
527 		 */
528 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
529 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
530 		    0, M_DONTWAIT, 1, MT_DATA);
531 		if (oper) {
532 			struct sctp_paramhdr *ph;
533 			uint32_t *ippp;
534 
535 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
536 			    (sizeof(uint32_t) * 3);
537 			ph = mtod(oper, struct sctp_paramhdr *);
538 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
539 			ph->param_length = htons(SCTP_BUF_LEN(oper));
540 			ippp = (uint32_t *) (ph + 1);
541 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
542 			ippp++;
543 			*ippp = control->sinfo_tsn;
544 			ippp++;
545 			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
546 		}
547 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
548 		sctp_abort_an_association(stcb->sctp_ep, stcb,
549 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
550 
551 		*abort_flag = 1;
552 		return;
553 
554 	}
555 	if (nxt_todel == control->sinfo_ssn) {
556 		/* can be delivered right away? */
557 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
558 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
559 		}
560 		/* EY it wont be queued if it could be delivered directly */
561 		queue_needed = 0;
562 		asoc->size_on_all_streams -= control->length;
563 		sctp_ucount_decr(asoc->cnt_on_all_streams);
564 		strm->last_sequence_delivered++;
565 
566 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
567 		sctp_add_to_readq(stcb->sctp_ep, stcb,
568 		    control,
569 		    &stcb->sctp_socket->so_rcv, 1,
570 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
571 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
572 			/* all delivered */
573 			nxt_todel = strm->last_sequence_delivered + 1;
574 			if (nxt_todel == control->sinfo_ssn) {
575 				TAILQ_REMOVE(&strm->inqueue, control, next);
576 				asoc->size_on_all_streams -= control->length;
577 				sctp_ucount_decr(asoc->cnt_on_all_streams);
578 				strm->last_sequence_delivered++;
579 				/*
580 				 * We ignore the return of deliver_data here
581 				 * since we always can hold the chunk on the
582 				 * d-queue. And we have a finite number that
583 				 * can be delivered from the strq.
584 				 */
585 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
586 					sctp_log_strm_del(control, NULL,
587 					    SCTP_STR_LOG_FROM_IMMED_DEL);
588 				}
589 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
590 				sctp_add_to_readq(stcb->sctp_ep, stcb,
591 				    control,
592 				    &stcb->sctp_socket->so_rcv, 1,
593 				    SCTP_READ_LOCK_NOT_HELD,
594 				    SCTP_SO_NOT_LOCKED);
595 				continue;
596 			}
597 			break;
598 		}
599 	}
600 	if (queue_needed) {
601 		/*
602 		 * Ok, we did not deliver this guy, find the correct place
603 		 * to put it on the queue.
604 		 */
605 		if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
606 			goto protocol_error;
607 		}
608 		if (TAILQ_EMPTY(&strm->inqueue)) {
609 			/* Empty queue */
610 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
611 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
612 			}
613 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
614 		} else {
615 			TAILQ_FOREACH(at, &strm->inqueue, next) {
616 				if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
617 					/*
618 					 * one in queue is bigger than the
619 					 * new one, insert before this one
620 					 */
621 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
622 						sctp_log_strm_del(control, at,
623 						    SCTP_STR_LOG_FROM_INSERT_MD);
624 					}
625 					TAILQ_INSERT_BEFORE(at, control, next);
626 					break;
627 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
628 					/*
629 					 * Gak, He sent me a duplicate str
630 					 * seq number
631 					 */
632 					/*
633 					 * foo bar, I guess I will just free
634 					 * this new guy, should we abort
635 					 * too? FIX ME MAYBE? Or it COULD be
636 					 * that the SSN's have wrapped.
637 					 * Maybe I should compare to TSN
638 					 * somehow... sigh for now just blow
639 					 * away the chunk!
640 					 */
641 
642 					if (control->data)
643 						sctp_m_freem(control->data);
644 					control->data = NULL;
645 					asoc->size_on_all_streams -= control->length;
646 					sctp_ucount_decr(asoc->cnt_on_all_streams);
647 					if (control->whoFrom) {
648 						sctp_free_remote_addr(control->whoFrom);
649 						control->whoFrom = NULL;
650 					}
651 					sctp_free_a_readq(stcb, control);
652 					return;
653 				} else {
654 					if (TAILQ_NEXT(at, next) == NULL) {
655 						/*
656 						 * We are at the end, insert
657 						 * it after this one
658 						 */
659 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
660 							sctp_log_strm_del(control, at,
661 							    SCTP_STR_LOG_FROM_INSERT_TL);
662 						}
663 						TAILQ_INSERT_AFTER(&strm->inqueue,
664 						    at, control, next);
665 						break;
666 					}
667 				}
668 			}
669 		}
670 	}
671 }
672 
673 /*
674  * Returns two things: You get the total size of the deliverable parts of the
675  * first fragmented message on the reassembly queue. And you get a 1 back if
676  * all of the message is ready or a 0 back if the message is still incomplete
677  */
678 static int
679 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
680 {
681 	struct sctp_tmit_chunk *chk;
682 	uint32_t tsn;
683 
684 	*t_size = 0;
685 	chk = TAILQ_FIRST(&asoc->reasmqueue);
686 	if (chk == NULL) {
687 		/* nothing on the queue */
688 		return (0);
689 	}
690 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
691 		/* Not a first on the queue */
692 		return (0);
693 	}
694 	tsn = chk->rec.data.TSN_seq;
695 	TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
696 		if (tsn != chk->rec.data.TSN_seq) {
697 			return (0);
698 		}
699 		*t_size += chk->send_size;
700 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
701 			return (1);
702 		}
703 		tsn++;
704 	}
705 	return (0);
706 }
707 
708 static void
709 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
710 {
711 	struct sctp_tmit_chunk *chk;
712 	uint16_t nxt_todel;
713 	uint32_t tsize, pd_point;
714 
715 doit_again:
716 	chk = TAILQ_FIRST(&asoc->reasmqueue);
717 	if (chk == NULL) {
718 		/* Huh? */
719 		asoc->size_on_reasm_queue = 0;
720 		asoc->cnt_on_reasm_queue = 0;
721 		return;
722 	}
723 	if (asoc->fragmented_delivery_inprogress == 0) {
724 		nxt_todel =
725 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
726 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
727 		    (nxt_todel == chk->rec.data.stream_seq ||
728 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
729 			/*
730 			 * Yep the first one is here and its ok to deliver
731 			 * but should we?
732 			 */
733 			if (stcb->sctp_socket) {
734 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
735 				    stcb->sctp_ep->partial_delivery_point);
736 			} else {
737 				pd_point = stcb->sctp_ep->partial_delivery_point;
738 			}
739 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
740 
741 				/*
742 				 * Yes, we setup to start reception, by
743 				 * backing down the TSN just in case we
744 				 * can't deliver. If we
745 				 */
746 				asoc->fragmented_delivery_inprogress = 1;
747 				asoc->tsn_last_delivered =
748 				    chk->rec.data.TSN_seq - 1;
749 				asoc->str_of_pdapi =
750 				    chk->rec.data.stream_number;
751 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
752 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
753 				asoc->fragment_flags = chk->rec.data.rcv_flags;
754 				sctp_service_reassembly(stcb, asoc);
755 			}
756 		}
757 	} else {
758 		/*
759 		 * Service re-assembly will deliver stream data queued at
760 		 * the end of fragmented delivery.. but it wont know to go
761 		 * back and call itself again... we do that here with the
762 		 * got doit_again
763 		 */
764 		sctp_service_reassembly(stcb, asoc);
765 		if (asoc->fragmented_delivery_inprogress == 0) {
766 			/*
767 			 * finished our Fragmented delivery, could be more
768 			 * waiting?
769 			 */
770 			goto doit_again;
771 		}
772 	}
773 }
774 
775 /*
776  * Dump onto the re-assembly queue, in its proper place. After dumping on the
777  * queue, see if anthing can be delivered. If so pull it off (or as much as
778  * we can. If we run out of space then we must dump what we can and set the
779  * appropriate flag to say we queued what we could.
780  */
781 static void
782 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
783     struct sctp_tmit_chunk *chk, int *abort_flag)
784 {
785 	struct mbuf *oper;
786 	uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
787 	u_char last_flags;
788 	struct sctp_tmit_chunk *at, *prev, *next;
789 
790 	prev = next = NULL;
791 	cum_ackp1 = asoc->tsn_last_delivered + 1;
792 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
793 		/* This is the first one on the queue */
794 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
795 		/*
796 		 * we do not check for delivery of anything when only one
797 		 * fragment is here
798 		 */
799 		asoc->size_on_reasm_queue = chk->send_size;
800 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
801 		if (chk->rec.data.TSN_seq == cum_ackp1) {
802 			if (asoc->fragmented_delivery_inprogress == 0 &&
803 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
804 			    SCTP_DATA_FIRST_FRAG) {
805 				/*
806 				 * An empty queue, no delivery inprogress,
807 				 * we hit the next one and it does NOT have
808 				 * a FIRST fragment mark.
809 				 */
810 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
811 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
812 				    0, M_DONTWAIT, 1, MT_DATA);
813 
814 				if (oper) {
815 					struct sctp_paramhdr *ph;
816 					uint32_t *ippp;
817 
818 					SCTP_BUF_LEN(oper) =
819 					    sizeof(struct sctp_paramhdr) +
820 					    (sizeof(uint32_t) * 3);
821 					ph = mtod(oper, struct sctp_paramhdr *);
822 					ph->param_type =
823 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
824 					ph->param_length = htons(SCTP_BUF_LEN(oper));
825 					ippp = (uint32_t *) (ph + 1);
826 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
827 					ippp++;
828 					*ippp = chk->rec.data.TSN_seq;
829 					ippp++;
830 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
831 
832 				}
833 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
834 				sctp_abort_an_association(stcb->sctp_ep, stcb,
835 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
836 				*abort_flag = 1;
837 			} else if (asoc->fragmented_delivery_inprogress &&
838 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
839 				/*
840 				 * We are doing a partial delivery and the
841 				 * NEXT chunk MUST be either the LAST or
842 				 * MIDDLE fragment NOT a FIRST
843 				 */
844 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
845 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
846 				    0, M_DONTWAIT, 1, MT_DATA);
847 				if (oper) {
848 					struct sctp_paramhdr *ph;
849 					uint32_t *ippp;
850 
851 					SCTP_BUF_LEN(oper) =
852 					    sizeof(struct sctp_paramhdr) +
853 					    (3 * sizeof(uint32_t));
854 					ph = mtod(oper, struct sctp_paramhdr *);
855 					ph->param_type =
856 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
857 					ph->param_length = htons(SCTP_BUF_LEN(oper));
858 					ippp = (uint32_t *) (ph + 1);
859 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
860 					ippp++;
861 					*ippp = chk->rec.data.TSN_seq;
862 					ippp++;
863 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
864 				}
865 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
866 				sctp_abort_an_association(stcb->sctp_ep, stcb,
867 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
868 				*abort_flag = 1;
869 			} else if (asoc->fragmented_delivery_inprogress) {
870 				/*
871 				 * Here we are ok with a MIDDLE or LAST
872 				 * piece
873 				 */
874 				if (chk->rec.data.stream_number !=
875 				    asoc->str_of_pdapi) {
876 					/* Got to be the right STR No */
877 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
878 					    chk->rec.data.stream_number,
879 					    asoc->str_of_pdapi);
880 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
881 					    0, M_DONTWAIT, 1, MT_DATA);
882 					if (oper) {
883 						struct sctp_paramhdr *ph;
884 						uint32_t *ippp;
885 
886 						SCTP_BUF_LEN(oper) =
887 						    sizeof(struct sctp_paramhdr) +
888 						    (sizeof(uint32_t) * 3);
889 						ph = mtod(oper,
890 						    struct sctp_paramhdr *);
891 						ph->param_type =
892 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
893 						ph->param_length =
894 						    htons(SCTP_BUF_LEN(oper));
895 						ippp = (uint32_t *) (ph + 1);
896 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
897 						ippp++;
898 						*ippp = chk->rec.data.TSN_seq;
899 						ippp++;
900 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
901 					}
902 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
903 					sctp_abort_an_association(stcb->sctp_ep,
904 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
905 					*abort_flag = 1;
906 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
907 					    SCTP_DATA_UNORDERED &&
908 				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
909 					/* Got to be the right STR Seq */
910 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
911 					    chk->rec.data.stream_seq,
912 					    asoc->ssn_of_pdapi);
913 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
914 					    0, M_DONTWAIT, 1, MT_DATA);
915 					if (oper) {
916 						struct sctp_paramhdr *ph;
917 						uint32_t *ippp;
918 
919 						SCTP_BUF_LEN(oper) =
920 						    sizeof(struct sctp_paramhdr) +
921 						    (3 * sizeof(uint32_t));
922 						ph = mtod(oper,
923 						    struct sctp_paramhdr *);
924 						ph->param_type =
925 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
926 						ph->param_length =
927 						    htons(SCTP_BUF_LEN(oper));
928 						ippp = (uint32_t *) (ph + 1);
929 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
930 						ippp++;
931 						*ippp = chk->rec.data.TSN_seq;
932 						ippp++;
933 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
934 
935 					}
936 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
937 					sctp_abort_an_association(stcb->sctp_ep,
938 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
939 					*abort_flag = 1;
940 				}
941 			}
942 		}
943 		return;
944 	}
945 	/* Find its place */
946 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
947 		if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
948 			/*
949 			 * one in queue is bigger than the new one, insert
950 			 * before this one
951 			 */
952 			/* A check */
953 			asoc->size_on_reasm_queue += chk->send_size;
954 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
955 			next = at;
956 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
957 			break;
958 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
959 			/* Gak, He sent me a duplicate str seq number */
960 			/*
961 			 * foo bar, I guess I will just free this new guy,
962 			 * should we abort too? FIX ME MAYBE? Or it COULD be
963 			 * that the SSN's have wrapped. Maybe I should
964 			 * compare to TSN somehow... sigh for now just blow
965 			 * away the chunk!
966 			 */
967 			if (chk->data) {
968 				sctp_m_freem(chk->data);
969 				chk->data = NULL;
970 			}
971 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
972 			return;
973 		} else {
974 			last_flags = at->rec.data.rcv_flags;
975 			last_tsn = at->rec.data.TSN_seq;
976 			prev = at;
977 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
978 				/*
979 				 * We are at the end, insert it after this
980 				 * one
981 				 */
982 				/* check it first */
983 				asoc->size_on_reasm_queue += chk->send_size;
984 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
985 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
986 				break;
987 			}
988 		}
989 	}
990 	/* Now the audits */
991 	if (prev) {
992 		prev_tsn = chk->rec.data.TSN_seq - 1;
993 		if (prev_tsn == prev->rec.data.TSN_seq) {
994 			/*
995 			 * Ok the one I am dropping onto the end is the
996 			 * NEXT. A bit of valdiation here.
997 			 */
998 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
999 			    SCTP_DATA_FIRST_FRAG ||
1000 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1001 			    SCTP_DATA_MIDDLE_FRAG) {
1002 				/*
1003 				 * Insert chk MUST be a MIDDLE or LAST
1004 				 * fragment
1005 				 */
1006 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1007 				    SCTP_DATA_FIRST_FRAG) {
1008 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1009 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1010 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1011 					    0, M_DONTWAIT, 1, MT_DATA);
1012 					if (oper) {
1013 						struct sctp_paramhdr *ph;
1014 						uint32_t *ippp;
1015 
1016 						SCTP_BUF_LEN(oper) =
1017 						    sizeof(struct sctp_paramhdr) +
1018 						    (3 * sizeof(uint32_t));
1019 						ph = mtod(oper,
1020 						    struct sctp_paramhdr *);
1021 						ph->param_type =
1022 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1023 						ph->param_length =
1024 						    htons(SCTP_BUF_LEN(oper));
1025 						ippp = (uint32_t *) (ph + 1);
1026 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1027 						ippp++;
1028 						*ippp = chk->rec.data.TSN_seq;
1029 						ippp++;
1030 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1031 
1032 					}
1033 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1034 					sctp_abort_an_association(stcb->sctp_ep,
1035 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1036 					*abort_flag = 1;
1037 					return;
1038 				}
1039 				if (chk->rec.data.stream_number !=
1040 				    prev->rec.data.stream_number) {
1041 					/*
1042 					 * Huh, need the correct STR here,
1043 					 * they must be the same.
1044 					 */
1045 					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1046 					    chk->rec.data.stream_number,
1047 					    prev->rec.data.stream_number);
1048 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1049 					    0, M_DONTWAIT, 1, MT_DATA);
1050 					if (oper) {
1051 						struct sctp_paramhdr *ph;
1052 						uint32_t *ippp;
1053 
1054 						SCTP_BUF_LEN(oper) =
1055 						    sizeof(struct sctp_paramhdr) +
1056 						    (3 * sizeof(uint32_t));
1057 						ph = mtod(oper,
1058 						    struct sctp_paramhdr *);
1059 						ph->param_type =
1060 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1061 						ph->param_length =
1062 						    htons(SCTP_BUF_LEN(oper));
1063 						ippp = (uint32_t *) (ph + 1);
1064 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1065 						ippp++;
1066 						*ippp = chk->rec.data.TSN_seq;
1067 						ippp++;
1068 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1069 					}
1070 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1071 					sctp_abort_an_association(stcb->sctp_ep,
1072 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1073 
1074 					*abort_flag = 1;
1075 					return;
1076 				}
1077 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1078 				    chk->rec.data.stream_seq !=
1079 				    prev->rec.data.stream_seq) {
1080 					/*
1081 					 * Huh, need the correct STR here,
1082 					 * they must be the same.
1083 					 */
1084 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1085 					    chk->rec.data.stream_seq,
1086 					    prev->rec.data.stream_seq);
1087 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1088 					    0, M_DONTWAIT, 1, MT_DATA);
1089 					if (oper) {
1090 						struct sctp_paramhdr *ph;
1091 						uint32_t *ippp;
1092 
1093 						SCTP_BUF_LEN(oper) =
1094 						    sizeof(struct sctp_paramhdr) +
1095 						    (3 * sizeof(uint32_t));
1096 						ph = mtod(oper,
1097 						    struct sctp_paramhdr *);
1098 						ph->param_type =
1099 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1100 						ph->param_length =
1101 						    htons(SCTP_BUF_LEN(oper));
1102 						ippp = (uint32_t *) (ph + 1);
1103 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1104 						ippp++;
1105 						*ippp = chk->rec.data.TSN_seq;
1106 						ippp++;
1107 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1108 					}
1109 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1110 					sctp_abort_an_association(stcb->sctp_ep,
1111 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1112 
1113 					*abort_flag = 1;
1114 					return;
1115 				}
1116 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1117 			    SCTP_DATA_LAST_FRAG) {
1118 				/* Insert chk MUST be a FIRST */
1119 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1120 				    SCTP_DATA_FIRST_FRAG) {
1121 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1122 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1123 					    0, M_DONTWAIT, 1, MT_DATA);
1124 					if (oper) {
1125 						struct sctp_paramhdr *ph;
1126 						uint32_t *ippp;
1127 
1128 						SCTP_BUF_LEN(oper) =
1129 						    sizeof(struct sctp_paramhdr) +
1130 						    (3 * sizeof(uint32_t));
1131 						ph = mtod(oper,
1132 						    struct sctp_paramhdr *);
1133 						ph->param_type =
1134 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1135 						ph->param_length =
1136 						    htons(SCTP_BUF_LEN(oper));
1137 						ippp = (uint32_t *) (ph + 1);
1138 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1139 						ippp++;
1140 						*ippp = chk->rec.data.TSN_seq;
1141 						ippp++;
1142 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1143 
1144 					}
1145 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1146 					sctp_abort_an_association(stcb->sctp_ep,
1147 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1148 
1149 					*abort_flag = 1;
1150 					return;
1151 				}
1152 			}
1153 		}
1154 	}
1155 	if (next) {
1156 		post_tsn = chk->rec.data.TSN_seq + 1;
1157 		if (post_tsn == next->rec.data.TSN_seq) {
1158 			/*
1159 			 * Ok the one I am inserting ahead of is my NEXT
1160 			 * one. A bit of valdiation here.
1161 			 */
1162 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1163 				/* Insert chk MUST be a last fragment */
1164 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1165 				    != SCTP_DATA_LAST_FRAG) {
1166 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1167 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1168 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1169 					    0, M_DONTWAIT, 1, MT_DATA);
1170 					if (oper) {
1171 						struct sctp_paramhdr *ph;
1172 						uint32_t *ippp;
1173 
1174 						SCTP_BUF_LEN(oper) =
1175 						    sizeof(struct sctp_paramhdr) +
1176 						    (3 * sizeof(uint32_t));
1177 						ph = mtod(oper,
1178 						    struct sctp_paramhdr *);
1179 						ph->param_type =
1180 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1181 						ph->param_length =
1182 						    htons(SCTP_BUF_LEN(oper));
1183 						ippp = (uint32_t *) (ph + 1);
1184 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1185 						ippp++;
1186 						*ippp = chk->rec.data.TSN_seq;
1187 						ippp++;
1188 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1189 					}
1190 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1191 					sctp_abort_an_association(stcb->sctp_ep,
1192 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1193 
1194 					*abort_flag = 1;
1195 					return;
1196 				}
1197 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1198 				    SCTP_DATA_MIDDLE_FRAG ||
1199 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1200 			    SCTP_DATA_LAST_FRAG) {
1201 				/*
1202 				 * Insert chk CAN be MIDDLE or FIRST NOT
1203 				 * LAST
1204 				 */
1205 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1206 				    SCTP_DATA_LAST_FRAG) {
1207 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1208 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1209 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1210 					    0, M_DONTWAIT, 1, MT_DATA);
1211 					if (oper) {
1212 						struct sctp_paramhdr *ph;
1213 						uint32_t *ippp;
1214 
1215 						SCTP_BUF_LEN(oper) =
1216 						    sizeof(struct sctp_paramhdr) +
1217 						    (3 * sizeof(uint32_t));
1218 						ph = mtod(oper,
1219 						    struct sctp_paramhdr *);
1220 						ph->param_type =
1221 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1222 						ph->param_length =
1223 						    htons(SCTP_BUF_LEN(oper));
1224 						ippp = (uint32_t *) (ph + 1);
1225 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1226 						ippp++;
1227 						*ippp = chk->rec.data.TSN_seq;
1228 						ippp++;
1229 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1230 
1231 					}
1232 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1233 					sctp_abort_an_association(stcb->sctp_ep,
1234 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1235 
1236 					*abort_flag = 1;
1237 					return;
1238 				}
1239 				if (chk->rec.data.stream_number !=
1240 				    next->rec.data.stream_number) {
1241 					/*
1242 					 * Huh, need the correct STR here,
1243 					 * they must be the same.
1244 					 */
1245 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1246 					    chk->rec.data.stream_number,
1247 					    next->rec.data.stream_number);
1248 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1249 					    0, M_DONTWAIT, 1, MT_DATA);
1250 					if (oper) {
1251 						struct sctp_paramhdr *ph;
1252 						uint32_t *ippp;
1253 
1254 						SCTP_BUF_LEN(oper) =
1255 						    sizeof(struct sctp_paramhdr) +
1256 						    (3 * sizeof(uint32_t));
1257 						ph = mtod(oper,
1258 						    struct sctp_paramhdr *);
1259 						ph->param_type =
1260 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1261 						ph->param_length =
1262 						    htons(SCTP_BUF_LEN(oper));
1263 						ippp = (uint32_t *) (ph + 1);
1264 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1265 						ippp++;
1266 						*ippp = chk->rec.data.TSN_seq;
1267 						ippp++;
1268 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1269 
1270 					}
1271 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1272 					sctp_abort_an_association(stcb->sctp_ep,
1273 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1274 
1275 					*abort_flag = 1;
1276 					return;
1277 				}
1278 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1279 				    chk->rec.data.stream_seq !=
1280 				    next->rec.data.stream_seq) {
1281 					/*
1282 					 * Huh, need the correct STR here,
1283 					 * they must be the same.
1284 					 */
1285 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1286 					    chk->rec.data.stream_seq,
1287 					    next->rec.data.stream_seq);
1288 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1289 					    0, M_DONTWAIT, 1, MT_DATA);
1290 					if (oper) {
1291 						struct sctp_paramhdr *ph;
1292 						uint32_t *ippp;
1293 
1294 						SCTP_BUF_LEN(oper) =
1295 						    sizeof(struct sctp_paramhdr) +
1296 						    (3 * sizeof(uint32_t));
1297 						ph = mtod(oper,
1298 						    struct sctp_paramhdr *);
1299 						ph->param_type =
1300 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1301 						ph->param_length =
1302 						    htons(SCTP_BUF_LEN(oper));
1303 						ippp = (uint32_t *) (ph + 1);
1304 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1305 						ippp++;
1306 						*ippp = chk->rec.data.TSN_seq;
1307 						ippp++;
1308 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1309 					}
1310 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1311 					sctp_abort_an_association(stcb->sctp_ep,
1312 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1313 
1314 					*abort_flag = 1;
1315 					return;
1316 				}
1317 			}
1318 		}
1319 	}
1320 	/* Do we need to do some delivery? check */
1321 	sctp_deliver_reasm_check(stcb, asoc);
1322 }
1323 
1324 /*
1325  * This is an unfortunate routine. It checks to make sure a evil guy is not
1326  * stuffing us full of bad packet fragments. A broken peer could also do this
1327  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1328  * :< more cycles.
1329  */
1330 static int
1331 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1332     uint32_t TSN_seq)
1333 {
1334 	struct sctp_tmit_chunk *at;
1335 	uint32_t tsn_est;
1336 
1337 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1338 		if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1339 			/* is it one bigger? */
1340 			tsn_est = at->rec.data.TSN_seq + 1;
1341 			if (tsn_est == TSN_seq) {
1342 				/* yep. It better be a last then */
1343 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1344 				    SCTP_DATA_LAST_FRAG) {
1345 					/*
1346 					 * Ok this guy belongs next to a guy
1347 					 * that is NOT last, it should be a
1348 					 * middle/last, not a complete
1349 					 * chunk.
1350 					 */
1351 					return (1);
1352 				} else {
1353 					/*
1354 					 * This guy is ok since its a LAST
1355 					 * and the new chunk is a fully
1356 					 * self- contained one.
1357 					 */
1358 					return (0);
1359 				}
1360 			}
1361 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1362 			/* Software error since I have a dup? */
1363 			return (1);
1364 		} else {
1365 			/*
1366 			 * Ok, 'at' is larger than new chunk but does it
1367 			 * need to be right before it.
1368 			 */
1369 			tsn_est = TSN_seq + 1;
1370 			if (tsn_est == at->rec.data.TSN_seq) {
1371 				/* Yep, It better be a first */
1372 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1373 				    SCTP_DATA_FIRST_FRAG) {
1374 					return (1);
1375 				} else {
1376 					return (0);
1377 				}
1378 			}
1379 		}
1380 	}
1381 	return (0);
1382 }
1383 
1384 
1385 static int
1386 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1387     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1388     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1389     int *break_flag, int last_chunk)
1390 {
1391 	/* Process a data chunk */
1392 	/* struct sctp_tmit_chunk *chk; */
1393 	struct sctp_tmit_chunk *chk;
1394 	uint32_t tsn, gap;
1395 	struct mbuf *dmbuf;
1396 	int indx, the_len;
1397 	int need_reasm_check = 0;
1398 	uint16_t strmno, strmseq;
1399 	struct mbuf *oper;
1400 	struct sctp_queued_to_read *control;
1401 	int ordered;
1402 	uint32_t protocol_id;
1403 	uint8_t chunk_flags;
1404 	struct sctp_stream_reset_list *liste;
1405 
1406 	chk = NULL;
1407 	tsn = ntohl(ch->dp.tsn);
1408 	chunk_flags = ch->ch.chunk_flags;
1409 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1410 		asoc->send_sack = 1;
1411 	}
1412 	protocol_id = ch->dp.protocol_id;
1413 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1414 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1415 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1416 	}
1417 	if (stcb == NULL) {
1418 		return (0);
1419 	}
1420 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1421 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1422 		/* It is a duplicate */
1423 		SCTP_STAT_INCR(sctps_recvdupdata);
1424 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1425 			/* Record a dup for the next outbound sack */
1426 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1427 			asoc->numduptsns++;
1428 		}
1429 		asoc->send_sack = 1;
1430 		return (0);
1431 	}
1432 	/* Calculate the number of TSN's between the base and this TSN */
1433 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1434 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1435 		/* Can't hold the bit in the mapping at max array, toss it */
1436 		return (0);
1437 	}
1438 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1439 		SCTP_TCB_LOCK_ASSERT(stcb);
1440 		if (sctp_expand_mapping_array(asoc, gap)) {
1441 			/* Can't expand, drop it */
1442 			return (0);
1443 		}
1444 	}
1445 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1446 		*high_tsn = tsn;
1447 	}
1448 	/* See if we have received this one already */
1449 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1450 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1451 		SCTP_STAT_INCR(sctps_recvdupdata);
1452 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1453 			/* Record a dup for the next outbound sack */
1454 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1455 			asoc->numduptsns++;
1456 		}
1457 		asoc->send_sack = 1;
1458 		return (0);
1459 	}
1460 	/*
1461 	 * Check to see about the GONE flag, duplicates would cause a sack
1462 	 * to be sent up above
1463 	 */
1464 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1465 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1466 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1467 	    ) {
1468 		/*
1469 		 * wait a minute, this guy is gone, there is no longer a
1470 		 * receiver. Send peer an ABORT!
1471 		 */
1472 		struct mbuf *op_err;
1473 
1474 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1475 		sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1476 		*abort_flag = 1;
1477 		return (0);
1478 	}
1479 	/*
1480 	 * Now before going further we see if there is room. If NOT then we
1481 	 * MAY let one through only IF this TSN is the one we are waiting
1482 	 * for on a partial delivery API.
1483 	 */
1484 
1485 	/* now do the tests */
1486 	if (((asoc->cnt_on_all_streams +
1487 	    asoc->cnt_on_reasm_queue +
1488 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1489 	    (((int)asoc->my_rwnd) <= 0)) {
1490 		/*
1491 		 * When we have NO room in the rwnd we check to make sure
1492 		 * the reader is doing its job...
1493 		 */
1494 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1495 			/* some to read, wake-up */
1496 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1497 			struct socket *so;
1498 
1499 			so = SCTP_INP_SO(stcb->sctp_ep);
1500 			atomic_add_int(&stcb->asoc.refcnt, 1);
1501 			SCTP_TCB_UNLOCK(stcb);
1502 			SCTP_SOCKET_LOCK(so, 1);
1503 			SCTP_TCB_LOCK(stcb);
1504 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1505 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1506 				/* assoc was freed while we were unlocked */
1507 				SCTP_SOCKET_UNLOCK(so, 1);
1508 				return (0);
1509 			}
1510 #endif
1511 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1512 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1513 			SCTP_SOCKET_UNLOCK(so, 1);
1514 #endif
1515 		}
1516 		/* now is it in the mapping array of what we have accepted? */
1517 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1518 		    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1519 			/* Nope not in the valid range dump it */
1520 			sctp_set_rwnd(stcb, asoc);
1521 			if ((asoc->cnt_on_all_streams +
1522 			    asoc->cnt_on_reasm_queue +
1523 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1524 				SCTP_STAT_INCR(sctps_datadropchklmt);
1525 			} else {
1526 				SCTP_STAT_INCR(sctps_datadroprwnd);
1527 			}
1528 			indx = *break_flag;
1529 			*break_flag = 1;
1530 			return (0);
1531 		}
1532 	}
1533 	strmno = ntohs(ch->dp.stream_id);
1534 	if (strmno >= asoc->streamincnt) {
1535 		struct sctp_paramhdr *phdr;
1536 		struct mbuf *mb;
1537 
1538 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1539 		    0, M_DONTWAIT, 1, MT_DATA);
1540 		if (mb != NULL) {
1541 			/* add some space up front so prepend will work well */
1542 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1543 			phdr = mtod(mb, struct sctp_paramhdr *);
1544 			/*
1545 			 * Error causes are just param's and this one has
1546 			 * two back to back phdr, one with the error type
1547 			 * and size, the other with the streamid and a rsvd
1548 			 */
1549 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1550 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1551 			phdr->param_length =
1552 			    htons(sizeof(struct sctp_paramhdr) * 2);
1553 			phdr++;
1554 			/* We insert the stream in the type field */
1555 			phdr->param_type = ch->dp.stream_id;
1556 			/* And set the length to 0 for the rsvd field */
1557 			phdr->param_length = 0;
1558 			sctp_queue_op_err(stcb, mb);
1559 		}
1560 		SCTP_STAT_INCR(sctps_badsid);
1561 		SCTP_TCB_LOCK_ASSERT(stcb);
1562 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1563 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1564 			asoc->highest_tsn_inside_nr_map = tsn;
1565 		}
1566 		if (tsn == (asoc->cumulative_tsn + 1)) {
1567 			/* Update cum-ack */
1568 			asoc->cumulative_tsn = tsn;
1569 		}
1570 		return (0);
1571 	}
1572 	/*
1573 	 * Before we continue lets validate that we are not being fooled by
1574 	 * an evil attacker. We can only have 4k chunks based on our TSN
1575 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1576 	 * way our stream sequence numbers could have wrapped. We of course
1577 	 * only validate the FIRST fragment so the bit must be set.
1578 	 */
1579 	strmseq = ntohs(ch->dp.stream_sequence);
1580 #ifdef SCTP_ASOCLOG_OF_TSNS
1581 	SCTP_TCB_LOCK_ASSERT(stcb);
1582 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1583 		asoc->tsn_in_at = 0;
1584 		asoc->tsn_in_wrapped = 1;
1585 	}
1586 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1587 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1588 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1589 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1590 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1591 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1592 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1593 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1594 	asoc->tsn_in_at++;
1595 #endif
1596 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1597 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1598 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1599 	    SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1600 		/* The incoming sseq is behind where we last delivered? */
1601 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1602 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1603 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1604 		    0, M_DONTWAIT, 1, MT_DATA);
1605 		if (oper) {
1606 			struct sctp_paramhdr *ph;
1607 			uint32_t *ippp;
1608 
1609 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1610 			    (3 * sizeof(uint32_t));
1611 			ph = mtod(oper, struct sctp_paramhdr *);
1612 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1613 			ph->param_length = htons(SCTP_BUF_LEN(oper));
1614 			ippp = (uint32_t *) (ph + 1);
1615 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1616 			ippp++;
1617 			*ippp = tsn;
1618 			ippp++;
1619 			*ippp = ((strmno << 16) | strmseq);
1620 
1621 		}
1622 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1623 		sctp_abort_an_association(stcb->sctp_ep, stcb,
1624 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1625 		*abort_flag = 1;
1626 		return (0);
1627 	}
1628 	/************************************
1629 	 * From here down we may find ch-> invalid
1630 	 * so its a good idea NOT to use it.
1631 	 *************************************/
1632 
1633 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1634 	if (last_chunk == 0) {
1635 		dmbuf = SCTP_M_COPYM(*m,
1636 		    (offset + sizeof(struct sctp_data_chunk)),
1637 		    the_len, M_DONTWAIT);
1638 #ifdef SCTP_MBUF_LOGGING
1639 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1640 			struct mbuf *mat;
1641 
1642 			mat = dmbuf;
1643 			while (mat) {
1644 				if (SCTP_BUF_IS_EXTENDED(mat)) {
1645 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1646 				}
1647 				mat = SCTP_BUF_NEXT(mat);
1648 			}
1649 		}
1650 #endif
1651 	} else {
1652 		/* We can steal the last chunk */
1653 		int l_len;
1654 
1655 		dmbuf = *m;
1656 		/* lop off the top part */
1657 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1658 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1659 			l_len = SCTP_BUF_LEN(dmbuf);
1660 		} else {
1661 			/*
1662 			 * need to count up the size hopefully does not hit
1663 			 * this to often :-0
1664 			 */
1665 			struct mbuf *lat;
1666 
1667 			l_len = 0;
1668 			lat = dmbuf;
1669 			while (lat) {
1670 				l_len += SCTP_BUF_LEN(lat);
1671 				lat = SCTP_BUF_NEXT(lat);
1672 			}
1673 		}
1674 		if (l_len > the_len) {
1675 			/* Trim the end round bytes off  too */
1676 			m_adj(dmbuf, -(l_len - the_len));
1677 		}
1678 	}
1679 	if (dmbuf == NULL) {
1680 		SCTP_STAT_INCR(sctps_nomem);
1681 		return (0);
1682 	}
1683 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1684 	    asoc->fragmented_delivery_inprogress == 0 &&
1685 	    TAILQ_EMPTY(&asoc->resetHead) &&
1686 	    ((ordered == 0) ||
1687 	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1688 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1689 		/* Candidate for express delivery */
1690 		/*
1691 		 * Its not fragmented, No PD-API is up, Nothing in the
1692 		 * delivery queue, Its un-ordered OR ordered and the next to
1693 		 * deliver AND nothing else is stuck on the stream queue,
1694 		 * And there is room for it in the socket buffer. Lets just
1695 		 * stuff it up the buffer....
1696 		 */
1697 
1698 		/* It would be nice to avoid this copy if we could :< */
1699 		sctp_alloc_a_readq(stcb, control);
1700 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1701 		    protocol_id,
1702 		    stcb->asoc.context,
1703 		    strmno, strmseq,
1704 		    chunk_flags,
1705 		    dmbuf);
1706 		if (control == NULL) {
1707 			goto failed_express_del;
1708 		}
1709 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1710 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1711 			asoc->highest_tsn_inside_nr_map = tsn;
1712 		}
1713 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1714 		    control, &stcb->sctp_socket->so_rcv,
1715 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1716 
1717 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1718 			/* for ordered, bump what we delivered */
1719 			asoc->strmin[strmno].last_sequence_delivered++;
1720 		}
1721 		SCTP_STAT_INCR(sctps_recvexpress);
1722 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1723 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1724 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1725 		}
1726 		control = NULL;
1727 
1728 		goto finish_express_del;
1729 	}
1730 failed_express_del:
1731 	/* If we reach here this is a new chunk */
1732 	chk = NULL;
1733 	control = NULL;
1734 	/* Express for fragmented delivery? */
1735 	if ((asoc->fragmented_delivery_inprogress) &&
1736 	    (stcb->asoc.control_pdapi) &&
1737 	    (asoc->str_of_pdapi == strmno) &&
1738 	    (asoc->ssn_of_pdapi == strmseq)
1739 	    ) {
1740 		control = stcb->asoc.control_pdapi;
1741 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1742 			/* Can't be another first? */
1743 			goto failed_pdapi_express_del;
1744 		}
1745 		if (tsn == (control->sinfo_tsn + 1)) {
1746 			/* Yep, we can add it on */
1747 			int end = 0;
1748 			uint32_t cumack;
1749 
1750 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1751 				end = 1;
1752 			}
1753 			cumack = asoc->cumulative_tsn;
1754 			if ((cumack + 1) == tsn)
1755 				cumack = tsn;
1756 
1757 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1758 			    tsn,
1759 			    &stcb->sctp_socket->so_rcv)) {
1760 				SCTP_PRINTF("Append fails end:%d\n", end);
1761 				goto failed_pdapi_express_del;
1762 			}
1763 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1764 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1765 				asoc->highest_tsn_inside_nr_map = tsn;
1766 			}
1767 			SCTP_STAT_INCR(sctps_recvexpressm);
1768 			control->sinfo_tsn = tsn;
1769 			asoc->tsn_last_delivered = tsn;
1770 			asoc->fragment_flags = chunk_flags;
1771 			asoc->tsn_of_pdapi_last_delivered = tsn;
1772 			asoc->last_flags_delivered = chunk_flags;
1773 			asoc->last_strm_seq_delivered = strmseq;
1774 			asoc->last_strm_no_delivered = strmno;
1775 			if (end) {
1776 				/* clean up the flags and such */
1777 				asoc->fragmented_delivery_inprogress = 0;
1778 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1779 					asoc->strmin[strmno].last_sequence_delivered++;
1780 				}
1781 				stcb->asoc.control_pdapi = NULL;
1782 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1783 					/*
1784 					 * There could be another message
1785 					 * ready
1786 					 */
1787 					need_reasm_check = 1;
1788 				}
1789 			}
1790 			control = NULL;
1791 			goto finish_express_del;
1792 		}
1793 	}
1794 failed_pdapi_express_del:
1795 	control = NULL;
1796 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1797 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1798 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1799 			asoc->highest_tsn_inside_nr_map = tsn;
1800 		}
1801 	} else {
1802 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1803 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1804 			asoc->highest_tsn_inside_map = tsn;
1805 		}
1806 	}
1807 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1808 		sctp_alloc_a_chunk(stcb, chk);
1809 		if (chk == NULL) {
1810 			/* No memory so we drop the chunk */
1811 			SCTP_STAT_INCR(sctps_nomem);
1812 			if (last_chunk == 0) {
1813 				/* we copied it, free the copy */
1814 				sctp_m_freem(dmbuf);
1815 			}
1816 			return (0);
1817 		}
1818 		chk->rec.data.TSN_seq = tsn;
1819 		chk->no_fr_allowed = 0;
1820 		chk->rec.data.stream_seq = strmseq;
1821 		chk->rec.data.stream_number = strmno;
1822 		chk->rec.data.payloadtype = protocol_id;
1823 		chk->rec.data.context = stcb->asoc.context;
1824 		chk->rec.data.doing_fast_retransmit = 0;
1825 		chk->rec.data.rcv_flags = chunk_flags;
1826 		chk->asoc = asoc;
1827 		chk->send_size = the_len;
1828 		chk->whoTo = net;
1829 		atomic_add_int(&net->ref_count, 1);
1830 		chk->data = dmbuf;
1831 	} else {
1832 		sctp_alloc_a_readq(stcb, control);
1833 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1834 		    protocol_id,
1835 		    stcb->asoc.context,
1836 		    strmno, strmseq,
1837 		    chunk_flags,
1838 		    dmbuf);
1839 		if (control == NULL) {
1840 			/* No memory so we drop the chunk */
1841 			SCTP_STAT_INCR(sctps_nomem);
1842 			if (last_chunk == 0) {
1843 				/* we copied it, free the copy */
1844 				sctp_m_freem(dmbuf);
1845 			}
1846 			return (0);
1847 		}
1848 		control->length = the_len;
1849 	}
1850 
1851 	/* Mark it as received */
1852 	/* Now queue it where it belongs */
1853 	if (control != NULL) {
1854 		/* First a sanity check */
1855 		if (asoc->fragmented_delivery_inprogress) {
1856 			/*
1857 			 * Ok, we have a fragmented delivery in progress if
1858 			 * this chunk is next to deliver OR belongs in our
1859 			 * view to the reassembly, the peer is evil or
1860 			 * broken.
1861 			 */
1862 			uint32_t estimate_tsn;
1863 
1864 			estimate_tsn = asoc->tsn_last_delivered + 1;
1865 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1866 			    (estimate_tsn == control->sinfo_tsn)) {
1867 				/* Evil/Broke peer */
1868 				sctp_m_freem(control->data);
1869 				control->data = NULL;
1870 				if (control->whoFrom) {
1871 					sctp_free_remote_addr(control->whoFrom);
1872 					control->whoFrom = NULL;
1873 				}
1874 				sctp_free_a_readq(stcb, control);
1875 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1876 				    0, M_DONTWAIT, 1, MT_DATA);
1877 				if (oper) {
1878 					struct sctp_paramhdr *ph;
1879 					uint32_t *ippp;
1880 
1881 					SCTP_BUF_LEN(oper) =
1882 					    sizeof(struct sctp_paramhdr) +
1883 					    (3 * sizeof(uint32_t));
1884 					ph = mtod(oper, struct sctp_paramhdr *);
1885 					ph->param_type =
1886 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1887 					ph->param_length = htons(SCTP_BUF_LEN(oper));
1888 					ippp = (uint32_t *) (ph + 1);
1889 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1890 					ippp++;
1891 					*ippp = tsn;
1892 					ippp++;
1893 					*ippp = ((strmno << 16) | strmseq);
1894 				}
1895 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1896 				sctp_abort_an_association(stcb->sctp_ep, stcb,
1897 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1898 
1899 				*abort_flag = 1;
1900 				return (0);
1901 			} else {
1902 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1903 					sctp_m_freem(control->data);
1904 					control->data = NULL;
1905 					if (control->whoFrom) {
1906 						sctp_free_remote_addr(control->whoFrom);
1907 						control->whoFrom = NULL;
1908 					}
1909 					sctp_free_a_readq(stcb, control);
1910 
1911 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1912 					    0, M_DONTWAIT, 1, MT_DATA);
1913 					if (oper) {
1914 						struct sctp_paramhdr *ph;
1915 						uint32_t *ippp;
1916 
1917 						SCTP_BUF_LEN(oper) =
1918 						    sizeof(struct sctp_paramhdr) +
1919 						    (3 * sizeof(uint32_t));
1920 						ph = mtod(oper,
1921 						    struct sctp_paramhdr *);
1922 						ph->param_type =
1923 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1924 						ph->param_length =
1925 						    htons(SCTP_BUF_LEN(oper));
1926 						ippp = (uint32_t *) (ph + 1);
1927 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1928 						ippp++;
1929 						*ippp = tsn;
1930 						ippp++;
1931 						*ippp = ((strmno << 16) | strmseq);
1932 					}
1933 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1934 					sctp_abort_an_association(stcb->sctp_ep,
1935 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1936 
1937 					*abort_flag = 1;
1938 					return (0);
1939 				}
1940 			}
1941 		} else {
1942 			/* No PDAPI running */
1943 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1944 				/*
1945 				 * Reassembly queue is NOT empty validate
1946 				 * that this tsn does not need to be in
1947 				 * reasembly queue. If it does then our peer
1948 				 * is broken or evil.
1949 				 */
1950 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1951 					sctp_m_freem(control->data);
1952 					control->data = NULL;
1953 					if (control->whoFrom) {
1954 						sctp_free_remote_addr(control->whoFrom);
1955 						control->whoFrom = NULL;
1956 					}
1957 					sctp_free_a_readq(stcb, control);
1958 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1959 					    0, M_DONTWAIT, 1, MT_DATA);
1960 					if (oper) {
1961 						struct sctp_paramhdr *ph;
1962 						uint32_t *ippp;
1963 
1964 						SCTP_BUF_LEN(oper) =
1965 						    sizeof(struct sctp_paramhdr) +
1966 						    (3 * sizeof(uint32_t));
1967 						ph = mtod(oper,
1968 						    struct sctp_paramhdr *);
1969 						ph->param_type =
1970 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1971 						ph->param_length =
1972 						    htons(SCTP_BUF_LEN(oper));
1973 						ippp = (uint32_t *) (ph + 1);
1974 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
1975 						ippp++;
1976 						*ippp = tsn;
1977 						ippp++;
1978 						*ippp = ((strmno << 16) | strmseq);
1979 					}
1980 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1981 					sctp_abort_an_association(stcb->sctp_ep,
1982 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1983 
1984 					*abort_flag = 1;
1985 					return (0);
1986 				}
1987 			}
1988 		}
1989 		/* ok, if we reach here we have passed the sanity checks */
1990 		if (chunk_flags & SCTP_DATA_UNORDERED) {
1991 			/* queue directly into socket buffer */
1992 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1993 			sctp_add_to_readq(stcb->sctp_ep, stcb,
1994 			    control,
1995 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1996 		} else {
1997 			/*
1998 			 * Special check for when streams are resetting. We
1999 			 * could be more smart about this and check the
2000 			 * actual stream to see if it is not being reset..
2001 			 * that way we would not create a HOLB when amongst
2002 			 * streams being reset and those not being reset.
2003 			 *
2004 			 * We take complete messages that have a stream reset
2005 			 * intervening (aka the TSN is after where our
2006 			 * cum-ack needs to be) off and put them on a
2007 			 * pending_reply_queue. The reassembly ones we do
2008 			 * not have to worry about since they are all sorted
2009 			 * and proceessed by TSN order. It is only the
2010 			 * singletons I must worry about.
2011 			 */
2012 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2013 			    SCTP_TSN_GT(tsn, liste->tsn)) {
2014 				/*
2015 				 * yep its past where we need to reset... go
2016 				 * ahead and queue it.
2017 				 */
2018 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2019 					/* first one on */
2020 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2021 				} else {
2022 					struct sctp_queued_to_read *ctlOn,
2023 					                   *nctlOn;
2024 					unsigned char inserted = 0;
2025 
2026 					TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2027 						if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2028 							continue;
2029 						} else {
2030 							/* found it */
2031 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2032 							inserted = 1;
2033 							break;
2034 						}
2035 					}
2036 					if (inserted == 0) {
2037 						/*
2038 						 * must be put at end, use
2039 						 * prevP (all setup from
2040 						 * loop) to setup nextP.
2041 						 */
2042 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2043 					}
2044 				}
2045 			} else {
2046 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2047 				if (*abort_flag) {
2048 					return (0);
2049 				}
2050 			}
2051 		}
2052 	} else {
2053 		/* Into the re-assembly queue */
2054 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2055 		if (*abort_flag) {
2056 			/*
2057 			 * the assoc is now gone and chk was put onto the
2058 			 * reasm queue, which has all been freed.
2059 			 */
2060 			*m = NULL;
2061 			return (0);
2062 		}
2063 	}
2064 finish_express_del:
2065 	if (tsn == (asoc->cumulative_tsn + 1)) {
2066 		/* Update cum-ack */
2067 		asoc->cumulative_tsn = tsn;
2068 	}
2069 	if (last_chunk) {
2070 		*m = NULL;
2071 	}
2072 	if (ordered) {
2073 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2074 	} else {
2075 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2076 	}
2077 	SCTP_STAT_INCR(sctps_recvdata);
2078 	/* Set it present please */
2079 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2080 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2081 	}
2082 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2083 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2084 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2085 	}
2086 	/* check the special flag for stream resets */
2087 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2088 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2089 		/*
2090 		 * we have finished working through the backlogged TSN's now
2091 		 * time to reset streams. 1: call reset function. 2: free
2092 		 * pending_reply space 3: distribute any chunks in
2093 		 * pending_reply_queue.
2094 		 */
2095 		struct sctp_queued_to_read *ctl, *nctl;
2096 
2097 		sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2098 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2099 		SCTP_FREE(liste, SCTP_M_STRESET);
2100 		/* sa_ignore FREED_MEMORY */
2101 		liste = TAILQ_FIRST(&asoc->resetHead);
2102 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2103 			/* All can be removed */
2104 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2105 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2106 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2107 				if (*abort_flag) {
2108 					return (0);
2109 				}
2110 			}
2111 		} else {
2112 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2113 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2114 					break;
2115 				}
2116 				/*
2117 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2118 				 * process it which is the NOT of
2119 				 * ctl->sinfo_tsn > liste->tsn
2120 				 */
2121 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2122 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2123 				if (*abort_flag) {
2124 					return (0);
2125 				}
2126 			}
2127 		}
2128 		/*
2129 		 * Now service re-assembly to pick up anything that has been
2130 		 * held on reassembly queue?
2131 		 */
2132 		sctp_deliver_reasm_check(stcb, asoc);
2133 		need_reasm_check = 0;
2134 	}
2135 	if (need_reasm_check) {
2136 		/* Another one waits ? */
2137 		sctp_deliver_reasm_check(stcb, asoc);
2138 	}
2139 	return (1);
2140 }
2141 
2142 int8_t sctp_map_lookup_tab[256] = {
2143 	0, 1, 0, 2, 0, 1, 0, 3,
2144 	0, 1, 0, 2, 0, 1, 0, 4,
2145 	0, 1, 0, 2, 0, 1, 0, 3,
2146 	0, 1, 0, 2, 0, 1, 0, 5,
2147 	0, 1, 0, 2, 0, 1, 0, 3,
2148 	0, 1, 0, 2, 0, 1, 0, 4,
2149 	0, 1, 0, 2, 0, 1, 0, 3,
2150 	0, 1, 0, 2, 0, 1, 0, 6,
2151 	0, 1, 0, 2, 0, 1, 0, 3,
2152 	0, 1, 0, 2, 0, 1, 0, 4,
2153 	0, 1, 0, 2, 0, 1, 0, 3,
2154 	0, 1, 0, 2, 0, 1, 0, 5,
2155 	0, 1, 0, 2, 0, 1, 0, 3,
2156 	0, 1, 0, 2, 0, 1, 0, 4,
2157 	0, 1, 0, 2, 0, 1, 0, 3,
2158 	0, 1, 0, 2, 0, 1, 0, 7,
2159 	0, 1, 0, 2, 0, 1, 0, 3,
2160 	0, 1, 0, 2, 0, 1, 0, 4,
2161 	0, 1, 0, 2, 0, 1, 0, 3,
2162 	0, 1, 0, 2, 0, 1, 0, 5,
2163 	0, 1, 0, 2, 0, 1, 0, 3,
2164 	0, 1, 0, 2, 0, 1, 0, 4,
2165 	0, 1, 0, 2, 0, 1, 0, 3,
2166 	0, 1, 0, 2, 0, 1, 0, 6,
2167 	0, 1, 0, 2, 0, 1, 0, 3,
2168 	0, 1, 0, 2, 0, 1, 0, 4,
2169 	0, 1, 0, 2, 0, 1, 0, 3,
2170 	0, 1, 0, 2, 0, 1, 0, 5,
2171 	0, 1, 0, 2, 0, 1, 0, 3,
2172 	0, 1, 0, 2, 0, 1, 0, 4,
2173 	0, 1, 0, 2, 0, 1, 0, 3,
2174 	0, 1, 0, 2, 0, 1, 0, 8
2175 };
2176 
2177 
2178 void
2179 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2180 {
2181 	/*
2182 	 * Now we also need to check the mapping array in a couple of ways.
2183 	 * 1) Did we move the cum-ack point?
2184 	 *
2185 	 * When you first glance at this you might think that all entries that
2186 	 * make up the postion of the cum-ack would be in the nr-mapping
2187 	 * array only.. i.e. things up to the cum-ack are always
2188 	 * deliverable. Thats true with one exception, when its a fragmented
2189 	 * message we may not deliver the data until some threshold (or all
2190 	 * of it) is in place. So we must OR the nr_mapping_array and
2191 	 * mapping_array to get a true picture of the cum-ack.
2192 	 */
2193 	struct sctp_association *asoc;
2194 	int at;
2195 	uint8_t val;
2196 	int slide_from, slide_end, lgap, distance;
2197 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2198 
2199 	asoc = &stcb->asoc;
2200 	at = 0;
2201 
2202 	old_cumack = asoc->cumulative_tsn;
2203 	old_base = asoc->mapping_array_base_tsn;
2204 	old_highest = asoc->highest_tsn_inside_map;
2205 	/*
2206 	 * We could probably improve this a small bit by calculating the
2207 	 * offset of the current cum-ack as the starting point.
2208 	 */
2209 	at = 0;
2210 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2211 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2212 		if (val == 0xff) {
2213 			at += 8;
2214 		} else {
2215 			/* there is a 0 bit */
2216 			at += sctp_map_lookup_tab[val];
2217 			break;
2218 		}
2219 	}
2220 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2221 
2222 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2223 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2224 #ifdef INVARIANTS
2225 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2226 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2227 #else
2228 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2229 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2230 		sctp_print_mapping_array(asoc);
2231 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2232 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2233 		}
2234 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2235 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2236 #endif
2237 	}
2238 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2239 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2240 	} else {
2241 		highest_tsn = asoc->highest_tsn_inside_map;
2242 	}
2243 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2244 		/* The complete array was completed by a single FR */
2245 		/* highest becomes the cum-ack */
2246 		int clr;
2247 
2248 #ifdef INVARIANTS
2249 		unsigned int i;
2250 
2251 #endif
2252 
2253 		/* clear the array */
2254 		clr = ((at + 7) >> 3);
2255 		if (clr > asoc->mapping_array_size) {
2256 			clr = asoc->mapping_array_size;
2257 		}
2258 		memset(asoc->mapping_array, 0, clr);
2259 		memset(asoc->nr_mapping_array, 0, clr);
2260 #ifdef INVARIANTS
2261 		for (i = 0; i < asoc->mapping_array_size; i++) {
2262 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2263 				printf("Error Mapping array's not clean at clear\n");
2264 				sctp_print_mapping_array(asoc);
2265 			}
2266 		}
2267 #endif
2268 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2269 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2270 	} else if (at >= 8) {
2271 		/* we can slide the mapping array down */
2272 		/* slide_from holds where we hit the first NON 0xff byte */
2273 
2274 		/*
2275 		 * now calculate the ceiling of the move using our highest
2276 		 * TSN value
2277 		 */
2278 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2279 		slide_end = (lgap >> 3);
2280 		if (slide_end < slide_from) {
2281 			sctp_print_mapping_array(asoc);
2282 #ifdef INVARIANTS
2283 			panic("impossible slide");
2284 #else
2285 			printf("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2286 			    lgap, slide_end, slide_from, at);
2287 			return;
2288 #endif
2289 		}
2290 		if (slide_end > asoc->mapping_array_size) {
2291 #ifdef INVARIANTS
2292 			panic("would overrun buffer");
2293 #else
2294 			printf("Gak, would have overrun map end:%d slide_end:%d\n",
2295 			    asoc->mapping_array_size, slide_end);
2296 			slide_end = asoc->mapping_array_size;
2297 #endif
2298 		}
2299 		distance = (slide_end - slide_from) + 1;
2300 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2301 			sctp_log_map(old_base, old_cumack, old_highest,
2302 			    SCTP_MAP_PREPARE_SLIDE);
2303 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2304 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2305 		}
2306 		if (distance + slide_from > asoc->mapping_array_size ||
2307 		    distance < 0) {
2308 			/*
2309 			 * Here we do NOT slide forward the array so that
2310 			 * hopefully when more data comes in to fill it up
2311 			 * we will be able to slide it forward. Really I
2312 			 * don't think this should happen :-0
2313 			 */
2314 
2315 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2316 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2317 				    (uint32_t) asoc->mapping_array_size,
2318 				    SCTP_MAP_SLIDE_NONE);
2319 			}
2320 		} else {
2321 			int ii;
2322 
2323 			for (ii = 0; ii < distance; ii++) {
2324 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2325 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2326 
2327 			}
2328 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2329 				asoc->mapping_array[ii] = 0;
2330 				asoc->nr_mapping_array[ii] = 0;
2331 			}
2332 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2333 				asoc->highest_tsn_inside_map += (slide_from << 3);
2334 			}
2335 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2336 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2337 			}
2338 			asoc->mapping_array_base_tsn += (slide_from << 3);
2339 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2340 				sctp_log_map(asoc->mapping_array_base_tsn,
2341 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2342 				    SCTP_MAP_SLIDE_RESULT);
2343 			}
2344 		}
2345 	}
2346 }
2347 
2348 void
2349 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap, int *abort_flag)
2350 {
2351 	struct sctp_association *asoc;
2352 	uint32_t highest_tsn;
2353 
2354 	asoc = &stcb->asoc;
2355 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2356 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2357 	} else {
2358 		highest_tsn = asoc->highest_tsn_inside_map;
2359 	}
2360 
2361 	/*
2362 	 * Now we need to see if we need to queue a sack or just start the
2363 	 * timer (if allowed).
2364 	 */
2365 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2366 		/*
2367 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2368 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2369 		 * SACK
2370 		 */
2371 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2372 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2373 			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2374 		}
2375 		sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2376 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2377 	} else {
2378 		int is_a_gap;
2379 
2380 		/* is there a gap now ? */
2381 		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2382 
2383 		/*
2384 		 * CMT DAC algorithm: increase number of packets received
2385 		 * since last ack
2386 		 */
2387 		stcb->asoc.cmt_dac_pkts_rcvd++;
2388 
2389 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2390 							 * SACK */
2391 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2392 							 * longer is one */
2393 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2394 		    (is_a_gap) ||	/* is still a gap */
2395 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2396 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2397 		    ) {
2398 
2399 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2400 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2401 			    (stcb->asoc.send_sack == 0) &&
2402 			    (stcb->asoc.numduptsns == 0) &&
2403 			    (stcb->asoc.delayed_ack) &&
2404 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2405 
2406 				/*
2407 				 * CMT DAC algorithm: With CMT, delay acks
2408 				 * even in the face of
2409 				 *
2410 				 * reordering. Therefore, if acks that do not
2411 				 * have to be sent because of the above
2412 				 * reasons, will be delayed. That is, acks
2413 				 * that would have been sent due to gap
2414 				 * reports will be delayed with DAC. Start
2415 				 * the delayed ack timer.
2416 				 */
2417 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2418 				    stcb->sctp_ep, stcb, NULL);
2419 			} else {
2420 				/*
2421 				 * Ok we must build a SACK since the timer
2422 				 * is pending, we got our first packet OR
2423 				 * there are gaps or duplicates.
2424 				 */
2425 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2426 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2427 			}
2428 		} else {
2429 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2430 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2431 				    stcb->sctp_ep, stcb, NULL);
2432 			}
2433 		}
2434 	}
2435 }
2436 
2437 void
2438 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2439 {
2440 	struct sctp_tmit_chunk *chk;
2441 	uint32_t tsize, pd_point;
2442 	uint16_t nxt_todel;
2443 
2444 	if (asoc->fragmented_delivery_inprogress) {
2445 		sctp_service_reassembly(stcb, asoc);
2446 	}
2447 	/* Can we proceed further, i.e. the PD-API is complete */
2448 	if (asoc->fragmented_delivery_inprogress) {
2449 		/* no */
2450 		return;
2451 	}
2452 	/*
2453 	 * Now is there some other chunk I can deliver from the reassembly
2454 	 * queue.
2455 	 */
2456 doit_again:
2457 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2458 	if (chk == NULL) {
2459 		asoc->size_on_reasm_queue = 0;
2460 		asoc->cnt_on_reasm_queue = 0;
2461 		return;
2462 	}
2463 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2464 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2465 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2466 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2467 		/*
2468 		 * Yep the first one is here. We setup to start reception,
2469 		 * by backing down the TSN just in case we can't deliver.
2470 		 */
2471 
2472 		/*
2473 		 * Before we start though either all of the message should
2474 		 * be here or the socket buffer max or nothing on the
2475 		 * delivery queue and something can be delivered.
2476 		 */
2477 		if (stcb->sctp_socket) {
2478 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2479 			    stcb->sctp_ep->partial_delivery_point);
2480 		} else {
2481 			pd_point = stcb->sctp_ep->partial_delivery_point;
2482 		}
2483 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2484 			asoc->fragmented_delivery_inprogress = 1;
2485 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2486 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2487 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2488 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2489 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2490 			sctp_service_reassembly(stcb, asoc);
2491 			if (asoc->fragmented_delivery_inprogress == 0) {
2492 				goto doit_again;
2493 			}
2494 		}
2495 	}
2496 }
2497 
2498 int
2499 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2500     struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2501     struct sctp_nets *net, uint32_t * high_tsn)
2502 {
2503 	struct sctp_data_chunk *ch, chunk_buf;
2504 	struct sctp_association *asoc;
2505 	int num_chunks = 0;	/* number of control chunks processed */
2506 	int stop_proc = 0;
2507 	int chk_length, break_flag, last_chunk;
2508 	int abort_flag = 0, was_a_gap;
2509 	struct mbuf *m;
2510 	uint32_t highest_tsn;
2511 
2512 	/* set the rwnd */
2513 	sctp_set_rwnd(stcb, &stcb->asoc);
2514 
2515 	m = *mm;
2516 	SCTP_TCB_LOCK_ASSERT(stcb);
2517 	asoc = &stcb->asoc;
2518 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2519 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2520 	} else {
2521 		highest_tsn = asoc->highest_tsn_inside_map;
2522 	}
2523 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2524 	/*
2525 	 * setup where we got the last DATA packet from for any SACK that
2526 	 * may need to go out. Don't bump the net. This is done ONLY when a
2527 	 * chunk is assigned.
2528 	 */
2529 	asoc->last_data_chunk_from = net;
2530 
2531 	/*-
2532 	 * Now before we proceed we must figure out if this is a wasted
2533 	 * cluster... i.e. it is a small packet sent in and yet the driver
2534 	 * underneath allocated a full cluster for it. If so we must copy it
2535 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2536 	 * with cluster starvation. Note for __Panda__ we don't do this
2537 	 * since it has clusters all the way down to 64 bytes.
2538 	 */
2539 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2540 		/* we only handle mbufs that are singletons.. not chains */
2541 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2542 		if (m) {
2543 			/* ok lets see if we can copy the data up */
2544 			caddr_t *from, *to;
2545 
2546 			/* get the pointers and copy */
2547 			to = mtod(m, caddr_t *);
2548 			from = mtod((*mm), caddr_t *);
2549 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2550 			/* copy the length and free up the old */
2551 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2552 			sctp_m_freem(*mm);
2553 			/* sucess, back copy */
2554 			*mm = m;
2555 		} else {
2556 			/* We are in trouble in the mbuf world .. yikes */
2557 			m = *mm;
2558 		}
2559 	}
2560 	/* get pointer to the first chunk header */
2561 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2562 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2563 	if (ch == NULL) {
2564 		return (1);
2565 	}
2566 	/*
2567 	 * process all DATA chunks...
2568 	 */
2569 	*high_tsn = asoc->cumulative_tsn;
2570 	break_flag = 0;
2571 	asoc->data_pkts_seen++;
2572 	while (stop_proc == 0) {
2573 		/* validate chunk length */
2574 		chk_length = ntohs(ch->ch.chunk_length);
2575 		if (length - *offset < chk_length) {
2576 			/* all done, mutulated chunk */
2577 			stop_proc = 1;
2578 			break;
2579 		}
2580 		if (ch->ch.chunk_type == SCTP_DATA) {
2581 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2582 				/*
2583 				 * Need to send an abort since we had a
2584 				 * invalid data chunk.
2585 				 */
2586 				struct mbuf *op_err;
2587 
2588 				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2589 				    0, M_DONTWAIT, 1, MT_DATA);
2590 
2591 				if (op_err) {
2592 					struct sctp_paramhdr *ph;
2593 					uint32_t *ippp;
2594 
2595 					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2596 					    (2 * sizeof(uint32_t));
2597 					ph = mtod(op_err, struct sctp_paramhdr *);
2598 					ph->param_type =
2599 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2600 					ph->param_length = htons(SCTP_BUF_LEN(op_err));
2601 					ippp = (uint32_t *) (ph + 1);
2602 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2603 					ippp++;
2604 					*ippp = asoc->cumulative_tsn;
2605 
2606 				}
2607 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2608 				sctp_abort_association(inp, stcb, m, iphlen, sh,
2609 				    op_err, 0, net->port);
2610 				return (2);
2611 			}
2612 #ifdef SCTP_AUDITING_ENABLED
2613 			sctp_audit_log(0xB1, 0);
2614 #endif
2615 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2616 				last_chunk = 1;
2617 			} else {
2618 				last_chunk = 0;
2619 			}
2620 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2621 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2622 			    last_chunk)) {
2623 				num_chunks++;
2624 			}
2625 			if (abort_flag)
2626 				return (2);
2627 
2628 			if (break_flag) {
2629 				/*
2630 				 * Set because of out of rwnd space and no
2631 				 * drop rep space left.
2632 				 */
2633 				stop_proc = 1;
2634 				break;
2635 			}
2636 		} else {
2637 			/* not a data chunk in the data region */
2638 			switch (ch->ch.chunk_type) {
2639 			case SCTP_INITIATION:
2640 			case SCTP_INITIATION_ACK:
2641 			case SCTP_SELECTIVE_ACK:
2642 			case SCTP_NR_SELECTIVE_ACK:	/* EY */
2643 			case SCTP_HEARTBEAT_REQUEST:
2644 			case SCTP_HEARTBEAT_ACK:
2645 			case SCTP_ABORT_ASSOCIATION:
2646 			case SCTP_SHUTDOWN:
2647 			case SCTP_SHUTDOWN_ACK:
2648 			case SCTP_OPERATION_ERROR:
2649 			case SCTP_COOKIE_ECHO:
2650 			case SCTP_COOKIE_ACK:
2651 			case SCTP_ECN_ECHO:
2652 			case SCTP_ECN_CWR:
2653 			case SCTP_SHUTDOWN_COMPLETE:
2654 			case SCTP_AUTHENTICATION:
2655 			case SCTP_ASCONF_ACK:
2656 			case SCTP_PACKET_DROPPED:
2657 			case SCTP_STREAM_RESET:
2658 			case SCTP_FORWARD_CUM_TSN:
2659 			case SCTP_ASCONF:
2660 				/*
2661 				 * Now, what do we do with KNOWN chunks that
2662 				 * are NOT in the right place?
2663 				 *
2664 				 * For now, I do nothing but ignore them. We
2665 				 * may later want to add sysctl stuff to
2666 				 * switch out and do either an ABORT() or
2667 				 * possibly process them.
2668 				 */
2669 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2670 					struct mbuf *op_err;
2671 
2672 					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2673 					sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
2674 					return (2);
2675 				}
2676 				break;
2677 			default:
2678 				/* unknown chunk type, use bit rules */
2679 				if (ch->ch.chunk_type & 0x40) {
2680 					/* Add a error report to the queue */
2681 					struct mbuf *merr;
2682 					struct sctp_paramhdr *phd;
2683 
2684 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2685 					if (merr) {
2686 						phd = mtod(merr, struct sctp_paramhdr *);
2687 						/*
2688 						 * We cheat and use param
2689 						 * type since we did not
2690 						 * bother to define a error
2691 						 * cause struct. They are
2692 						 * the same basic format
2693 						 * with different names.
2694 						 */
2695 						phd->param_type =
2696 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2697 						phd->param_length =
2698 						    htons(chk_length + sizeof(*phd));
2699 						SCTP_BUF_LEN(merr) = sizeof(*phd);
2700 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
2701 						    SCTP_SIZE32(chk_length),
2702 						    M_DONTWAIT);
2703 						if (SCTP_BUF_NEXT(merr)) {
2704 							sctp_queue_op_err(stcb, merr);
2705 						} else {
2706 							sctp_m_freem(merr);
2707 						}
2708 					}
2709 				}
2710 				if ((ch->ch.chunk_type & 0x80) == 0) {
2711 					/* discard the rest of this packet */
2712 					stop_proc = 1;
2713 				}	/* else skip this bad chunk and
2714 					 * continue... */
2715 				break;
2716 			};	/* switch of chunk type */
2717 		}
2718 		*offset += SCTP_SIZE32(chk_length);
2719 		if ((*offset >= length) || stop_proc) {
2720 			/* no more data left in the mbuf chain */
2721 			stop_proc = 1;
2722 			continue;
2723 		}
2724 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2725 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2726 		if (ch == NULL) {
2727 			*offset = length;
2728 			stop_proc = 1;
2729 			break;
2730 
2731 		}
2732 	}			/* while */
2733 	if (break_flag) {
2734 		/*
2735 		 * we need to report rwnd overrun drops.
2736 		 */
2737 		sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2738 	}
2739 	if (num_chunks) {
2740 		/*
2741 		 * Did we get data, if so update the time for auto-close and
2742 		 * give peer credit for being alive.
2743 		 */
2744 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2745 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2746 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2747 			    stcb->asoc.overall_error_count,
2748 			    0,
2749 			    SCTP_FROM_SCTP_INDATA,
2750 			    __LINE__);
2751 		}
2752 		stcb->asoc.overall_error_count = 0;
2753 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2754 	}
2755 	/* now service all of the reassm queue if needed */
2756 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2757 		sctp_service_queues(stcb, asoc);
2758 
2759 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2760 		/* Assure that we ack right away */
2761 		stcb->asoc.send_sack = 1;
2762 	}
2763 	/* Start a sack timer or QUEUE a SACK for sending */
2764 	sctp_sack_check(stcb, was_a_gap, &abort_flag);
2765 	if (abort_flag)
2766 		return (2);
2767 
2768 	return (0);
2769 }
2770 
2771 static int
2772 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2773     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2774     int *num_frs,
2775     uint32_t * biggest_newly_acked_tsn,
2776     uint32_t * this_sack_lowest_newack,
2777     int *ecn_seg_sums, int *rto_ok)
2778 {
2779 	struct sctp_tmit_chunk *tp1;
2780 	unsigned int theTSN;
2781 	int j, wake_him = 0, circled = 0;
2782 
2783 	/* Recover the tp1 we last saw */
2784 	tp1 = *p_tp1;
2785 	if (tp1 == NULL) {
2786 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2787 	}
2788 	for (j = frag_strt; j <= frag_end; j++) {
2789 		theTSN = j + last_tsn;
2790 		while (tp1) {
2791 			if (tp1->rec.data.doing_fast_retransmit)
2792 				(*num_frs) += 1;
2793 
2794 			/*-
2795 			 * CMT: CUCv2 algorithm. For each TSN being
2796 			 * processed from the sent queue, track the
2797 			 * next expected pseudo-cumack, or
2798 			 * rtx_pseudo_cumack, if required. Separate
2799 			 * cumack trackers for first transmissions,
2800 			 * and retransmissions.
2801 			 */
2802 			if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2803 			    (tp1->snd_count == 1)) {
2804 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2805 				tp1->whoTo->find_pseudo_cumack = 0;
2806 			}
2807 			if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2808 			    (tp1->snd_count > 1)) {
2809 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2810 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2811 			}
2812 			if (tp1->rec.data.TSN_seq == theTSN) {
2813 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2814 					/*-
2815 					 * must be held until
2816 					 * cum-ack passes
2817 					 */
2818 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2819 						/*-
2820 						 * If it is less than RESEND, it is
2821 						 * now no-longer in flight.
2822 						 * Higher values may already be set
2823 						 * via previous Gap Ack Blocks...
2824 						 * i.e. ACKED or RESEND.
2825 						 */
2826 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2827 						    *biggest_newly_acked_tsn)) {
2828 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2829 						}
2830 						/*-
2831 						 * CMT: SFR algo (and HTNA) - set
2832 						 * saw_newack to 1 for dest being
2833 						 * newly acked. update
2834 						 * this_sack_highest_newack if
2835 						 * appropriate.
2836 						 */
2837 						if (tp1->rec.data.chunk_was_revoked == 0)
2838 							tp1->whoTo->saw_newack = 1;
2839 
2840 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2841 						    tp1->whoTo->this_sack_highest_newack)) {
2842 							tp1->whoTo->this_sack_highest_newack =
2843 							    tp1->rec.data.TSN_seq;
2844 						}
2845 						/*-
2846 						 * CMT DAC algo: also update
2847 						 * this_sack_lowest_newack
2848 						 */
2849 						if (*this_sack_lowest_newack == 0) {
2850 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2851 								sctp_log_sack(*this_sack_lowest_newack,
2852 								    last_tsn,
2853 								    tp1->rec.data.TSN_seq,
2854 								    0,
2855 								    0,
2856 								    SCTP_LOG_TSN_ACKED);
2857 							}
2858 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2859 						}
2860 						/*-
2861 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2862 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2863 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2864 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2865 						 * Separate pseudo_cumack trackers for first transmissions and
2866 						 * retransmissions.
2867 						 */
2868 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2869 							if (tp1->rec.data.chunk_was_revoked == 0) {
2870 								tp1->whoTo->new_pseudo_cumack = 1;
2871 							}
2872 							tp1->whoTo->find_pseudo_cumack = 1;
2873 						}
2874 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2875 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2876 						}
2877 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2878 							if (tp1->rec.data.chunk_was_revoked == 0) {
2879 								tp1->whoTo->new_pseudo_cumack = 1;
2880 							}
2881 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2882 						}
2883 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2884 							sctp_log_sack(*biggest_newly_acked_tsn,
2885 							    last_tsn,
2886 							    tp1->rec.data.TSN_seq,
2887 							    frag_strt,
2888 							    frag_end,
2889 							    SCTP_LOG_TSN_ACKED);
2890 						}
2891 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2892 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2893 							    tp1->whoTo->flight_size,
2894 							    tp1->book_size,
2895 							    (uintptr_t) tp1->whoTo,
2896 							    tp1->rec.data.TSN_seq);
2897 						}
2898 						sctp_flight_size_decrease(tp1);
2899 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2900 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2901 							    tp1);
2902 						}
2903 						sctp_total_flight_decrease(stcb, tp1);
2904 
2905 						tp1->whoTo->net_ack += tp1->send_size;
2906 						if (tp1->snd_count < 2) {
2907 							/*-
2908 							 * True non-retransmited chunk
2909 							 */
2910 							tp1->whoTo->net_ack2 += tp1->send_size;
2911 
2912 							/*-
2913 							 * update RTO too ?
2914 							 */
2915 							if (tp1->do_rtt) {
2916 								if (*rto_ok) {
2917 									tp1->whoTo->RTO =
2918 									    sctp_calculate_rto(stcb,
2919 									    &stcb->asoc,
2920 									    tp1->whoTo,
2921 									    &tp1->sent_rcv_time,
2922 									    sctp_align_safe_nocopy,
2923 									    SCTP_RTT_FROM_DATA);
2924 									*rto_ok = 0;
2925 								}
2926 								if (tp1->whoTo->rto_needed == 0) {
2927 									tp1->whoTo->rto_needed = 1;
2928 								}
2929 								tp1->do_rtt = 0;
2930 							}
2931 						}
2932 					}
2933 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2934 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2935 						    stcb->asoc.this_sack_highest_gap)) {
2936 							stcb->asoc.this_sack_highest_gap =
2937 							    tp1->rec.data.TSN_seq;
2938 						}
2939 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2940 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2941 #ifdef SCTP_AUDITING_ENABLED
2942 							sctp_audit_log(0xB2,
2943 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2944 #endif
2945 						}
2946 					}
2947 					/*-
2948 					 * All chunks NOT UNSENT fall through here and are marked
2949 					 * (leave PR-SCTP ones that are to skip alone though)
2950 					 */
2951 					if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
2952 						tp1->sent = SCTP_DATAGRAM_MARKED;
2953 
2954 					if (tp1->rec.data.chunk_was_revoked) {
2955 						/* deflate the cwnd */
2956 						tp1->whoTo->cwnd -= tp1->book_size;
2957 						tp1->rec.data.chunk_was_revoked = 0;
2958 					}
2959 					/* NR Sack code here */
2960 					if (nr_sacking) {
2961 						if (tp1->data) {
2962 							/*
2963 							 * sa_ignore
2964 							 * NO_NULL_CHK
2965 							 */
2966 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2967 							sctp_m_freem(tp1->data);
2968 							tp1->data = NULL;
2969 						}
2970 						wake_him++;
2971 					}
2972 				}
2973 				break;
2974 			}	/* if (tp1->TSN_seq == theTSN) */
2975 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2976 				break;
2977 			}
2978 			tp1 = TAILQ_NEXT(tp1, sctp_next);
2979 			if ((tp1 == NULL) && (circled == 0)) {
2980 				circled++;
2981 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2982 			}
2983 		}		/* end while (tp1) */
2984 		if (tp1 == NULL) {
2985 			circled = 0;
2986 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2987 		}
2988 		/* In case the fragments were not in order we must reset */
2989 	}			/* end for (j = fragStart */
2990 	*p_tp1 = tp1;
2991 	return (wake_him);	/* Return value only used for nr-sack */
2992 }
2993 
2994 
2995 static int
2996 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2997     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2998     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2999     int num_seg, int num_nr_seg, int *ecn_seg_sums,
3000     int *rto_ok)
3001 {
3002 	struct sctp_gap_ack_block *frag, block;
3003 	struct sctp_tmit_chunk *tp1;
3004 	int i;
3005 	int num_frs = 0;
3006 	int chunk_freed;
3007 	int non_revocable;
3008 	uint16_t frag_strt, frag_end, prev_frag_end;
3009 
3010 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3011 	prev_frag_end = 0;
3012 	chunk_freed = 0;
3013 
3014 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3015 		if (i == num_seg) {
3016 			prev_frag_end = 0;
3017 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3018 		}
3019 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3020 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3021 		*offset += sizeof(block);
3022 		if (frag == NULL) {
3023 			return (chunk_freed);
3024 		}
3025 		frag_strt = ntohs(frag->start);
3026 		frag_end = ntohs(frag->end);
3027 
3028 		if (frag_strt > frag_end) {
3029 			/* This gap report is malformed, skip it. */
3030 			continue;
3031 		}
3032 		if (frag_strt <= prev_frag_end) {
3033 			/* This gap report is not in order, so restart. */
3034 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3035 		}
3036 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3037 			*biggest_tsn_acked = last_tsn + frag_end;
3038 		}
3039 		if (i < num_seg) {
3040 			non_revocable = 0;
3041 		} else {
3042 			non_revocable = 1;
3043 		}
3044 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3045 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3046 		    this_sack_lowest_newack, ecn_seg_sums, rto_ok)) {
3047 			chunk_freed = 1;
3048 		}
3049 		prev_frag_end = frag_end;
3050 	}
3051 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3052 		if (num_frs)
3053 			sctp_log_fr(*biggest_tsn_acked,
3054 			    *biggest_newly_acked_tsn,
3055 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3056 	}
3057 	return (chunk_freed);
3058 }
3059 
3060 static void
3061 sctp_check_for_revoked(struct sctp_tcb *stcb,
3062     struct sctp_association *asoc, uint32_t cumack,
3063     uint32_t biggest_tsn_acked)
3064 {
3065 	struct sctp_tmit_chunk *tp1;
3066 	int tot_revoked = 0;
3067 
3068 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3069 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3070 			/*
3071 			 * ok this guy is either ACK or MARKED. If it is
3072 			 * ACKED it has been previously acked but not this
3073 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3074 			 * again.
3075 			 */
3076 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3077 				break;
3078 			}
3079 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3080 				/* it has been revoked */
3081 				tp1->sent = SCTP_DATAGRAM_SENT;
3082 				tp1->rec.data.chunk_was_revoked = 1;
3083 				/*
3084 				 * We must add this stuff back in to assure
3085 				 * timers and such get started.
3086 				 */
3087 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3088 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3089 					    tp1->whoTo->flight_size,
3090 					    tp1->book_size,
3091 					    (uintptr_t) tp1->whoTo,
3092 					    tp1->rec.data.TSN_seq);
3093 				}
3094 				sctp_flight_size_increase(tp1);
3095 				sctp_total_flight_increase(stcb, tp1);
3096 				/*
3097 				 * We inflate the cwnd to compensate for our
3098 				 * artificial inflation of the flight_size.
3099 				 */
3100 				tp1->whoTo->cwnd += tp1->book_size;
3101 				tot_revoked++;
3102 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3103 					sctp_log_sack(asoc->last_acked_seq,
3104 					    cumack,
3105 					    tp1->rec.data.TSN_seq,
3106 					    0,
3107 					    0,
3108 					    SCTP_LOG_TSN_REVOKED);
3109 				}
3110 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3111 				/* it has been re-acked in this SACK */
3112 				tp1->sent = SCTP_DATAGRAM_ACKED;
3113 			}
3114 		}
3115 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3116 			break;
3117 	}
3118 }
3119 
3120 
3121 static void
3122 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3123     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3124 {
3125 	struct sctp_tmit_chunk *tp1;
3126 	int strike_flag = 0;
3127 	struct timeval now;
3128 	int tot_retrans = 0;
3129 	uint32_t sending_seq;
3130 	struct sctp_nets *net;
3131 	int num_dests_sacked = 0;
3132 
3133 	/*
3134 	 * select the sending_seq, this is either the next thing ready to be
3135 	 * sent but not transmitted, OR, the next seq we assign.
3136 	 */
3137 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3138 	if (tp1 == NULL) {
3139 		sending_seq = asoc->sending_seq;
3140 	} else {
3141 		sending_seq = tp1->rec.data.TSN_seq;
3142 	}
3143 
3144 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3145 	if ((asoc->sctp_cmt_on_off > 0) &&
3146 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3147 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3148 			if (net->saw_newack)
3149 				num_dests_sacked++;
3150 		}
3151 	}
3152 	if (stcb->asoc.peer_supports_prsctp) {
3153 		(void)SCTP_GETTIME_TIMEVAL(&now);
3154 	}
3155 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3156 		strike_flag = 0;
3157 		if (tp1->no_fr_allowed) {
3158 			/* this one had a timeout or something */
3159 			continue;
3160 		}
3161 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3162 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3163 				sctp_log_fr(biggest_tsn_newly_acked,
3164 				    tp1->rec.data.TSN_seq,
3165 				    tp1->sent,
3166 				    SCTP_FR_LOG_CHECK_STRIKE);
3167 		}
3168 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3169 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3170 			/* done */
3171 			break;
3172 		}
3173 		if (stcb->asoc.peer_supports_prsctp) {
3174 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3175 				/* Is it expired? */
3176 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3177 					/* Yes so drop it */
3178 					if (tp1->data != NULL) {
3179 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3180 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3181 						    SCTP_SO_NOT_LOCKED);
3182 					}
3183 					continue;
3184 				}
3185 			}
3186 		}
3187 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3188 			/* we are beyond the tsn in the sack  */
3189 			break;
3190 		}
3191 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3192 			/* either a RESEND, ACKED, or MARKED */
3193 			/* skip */
3194 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3195 				/* Continue strikin FWD-TSN chunks */
3196 				tp1->rec.data.fwd_tsn_cnt++;
3197 			}
3198 			continue;
3199 		}
3200 		/*
3201 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3202 		 */
3203 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3204 			/*
3205 			 * No new acks were receieved for data sent to this
3206 			 * dest. Therefore, according to the SFR algo for
3207 			 * CMT, no data sent to this dest can be marked for
3208 			 * FR using this SACK.
3209 			 */
3210 			continue;
3211 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3212 		    tp1->whoTo->this_sack_highest_newack)) {
3213 			/*
3214 			 * CMT: New acks were receieved for data sent to
3215 			 * this dest. But no new acks were seen for data
3216 			 * sent after tp1. Therefore, according to the SFR
3217 			 * algo for CMT, tp1 cannot be marked for FR using
3218 			 * this SACK. This step covers part of the DAC algo
3219 			 * and the HTNA algo as well.
3220 			 */
3221 			continue;
3222 		}
3223 		/*
3224 		 * Here we check to see if we were have already done a FR
3225 		 * and if so we see if the biggest TSN we saw in the sack is
3226 		 * smaller than the recovery point. If so we don't strike
3227 		 * the tsn... otherwise we CAN strike the TSN.
3228 		 */
3229 		/*
3230 		 * @@@ JRI: Check for CMT if (accum_moved &&
3231 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3232 		 * 0)) {
3233 		 */
3234 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3235 			/*
3236 			 * Strike the TSN if in fast-recovery and cum-ack
3237 			 * moved.
3238 			 */
3239 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3240 				sctp_log_fr(biggest_tsn_newly_acked,
3241 				    tp1->rec.data.TSN_seq,
3242 				    tp1->sent,
3243 				    SCTP_FR_LOG_STRIKE_CHUNK);
3244 			}
3245 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3246 				tp1->sent++;
3247 			}
3248 			if ((asoc->sctp_cmt_on_off > 0) &&
3249 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3250 				/*
3251 				 * CMT DAC algorithm: If SACK flag is set to
3252 				 * 0, then lowest_newack test will not pass
3253 				 * because it would have been set to the
3254 				 * cumack earlier. If not already to be
3255 				 * rtx'd, If not a mixed sack and if tp1 is
3256 				 * not between two sacked TSNs, then mark by
3257 				 * one more. NOTE that we are marking by one
3258 				 * additional time since the SACK DAC flag
3259 				 * indicates that two packets have been
3260 				 * received after this missing TSN.
3261 				 */
3262 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3263 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3264 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3265 						sctp_log_fr(16 + num_dests_sacked,
3266 						    tp1->rec.data.TSN_seq,
3267 						    tp1->sent,
3268 						    SCTP_FR_LOG_STRIKE_CHUNK);
3269 					}
3270 					tp1->sent++;
3271 				}
3272 			}
3273 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3274 		    (asoc->sctp_cmt_on_off == 0)) {
3275 			/*
3276 			 * For those that have done a FR we must take
3277 			 * special consideration if we strike. I.e the
3278 			 * biggest_newly_acked must be higher than the
3279 			 * sending_seq at the time we did the FR.
3280 			 */
3281 			if (
3282 #ifdef SCTP_FR_TO_ALTERNATE
3283 			/*
3284 			 * If FR's go to new networks, then we must only do
3285 			 * this for singly homed asoc's. However if the FR's
3286 			 * go to the same network (Armando's work) then its
3287 			 * ok to FR multiple times.
3288 			 */
3289 			    (asoc->numnets < 2)
3290 #else
3291 			    (1)
3292 #endif
3293 			    ) {
3294 
3295 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3296 				    tp1->rec.data.fast_retran_tsn)) {
3297 					/*
3298 					 * Strike the TSN, since this ack is
3299 					 * beyond where things were when we
3300 					 * did a FR.
3301 					 */
3302 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3303 						sctp_log_fr(biggest_tsn_newly_acked,
3304 						    tp1->rec.data.TSN_seq,
3305 						    tp1->sent,
3306 						    SCTP_FR_LOG_STRIKE_CHUNK);
3307 					}
3308 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3309 						tp1->sent++;
3310 					}
3311 					strike_flag = 1;
3312 					if ((asoc->sctp_cmt_on_off > 0) &&
3313 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3314 						/*
3315 						 * CMT DAC algorithm: If
3316 						 * SACK flag is set to 0,
3317 						 * then lowest_newack test
3318 						 * will not pass because it
3319 						 * would have been set to
3320 						 * the cumack earlier. If
3321 						 * not already to be rtx'd,
3322 						 * If not a mixed sack and
3323 						 * if tp1 is not between two
3324 						 * sacked TSNs, then mark by
3325 						 * one more. NOTE that we
3326 						 * are marking by one
3327 						 * additional time since the
3328 						 * SACK DAC flag indicates
3329 						 * that two packets have
3330 						 * been received after this
3331 						 * missing TSN.
3332 						 */
3333 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3334 						    (num_dests_sacked == 1) &&
3335 						    SCTP_TSN_GT(this_sack_lowest_newack,
3336 						    tp1->rec.data.TSN_seq)) {
3337 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3338 								sctp_log_fr(32 + num_dests_sacked,
3339 								    tp1->rec.data.TSN_seq,
3340 								    tp1->sent,
3341 								    SCTP_FR_LOG_STRIKE_CHUNK);
3342 							}
3343 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3344 								tp1->sent++;
3345 							}
3346 						}
3347 					}
3348 				}
3349 			}
3350 			/*
3351 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3352 			 * algo covers HTNA.
3353 			 */
3354 		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3355 		    biggest_tsn_newly_acked)) {
3356 			/*
3357 			 * We don't strike these: This is the  HTNA
3358 			 * algorithm i.e. we don't strike If our TSN is
3359 			 * larger than the Highest TSN Newly Acked.
3360 			 */
3361 			;
3362 		} else {
3363 			/* Strike the TSN */
3364 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3365 				sctp_log_fr(biggest_tsn_newly_acked,
3366 				    tp1->rec.data.TSN_seq,
3367 				    tp1->sent,
3368 				    SCTP_FR_LOG_STRIKE_CHUNK);
3369 			}
3370 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3371 				tp1->sent++;
3372 			}
3373 			if ((asoc->sctp_cmt_on_off > 0) &&
3374 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3375 				/*
3376 				 * CMT DAC algorithm: If SACK flag is set to
3377 				 * 0, then lowest_newack test will not pass
3378 				 * because it would have been set to the
3379 				 * cumack earlier. If not already to be
3380 				 * rtx'd, If not a mixed sack and if tp1 is
3381 				 * not between two sacked TSNs, then mark by
3382 				 * one more. NOTE that we are marking by one
3383 				 * additional time since the SACK DAC flag
3384 				 * indicates that two packets have been
3385 				 * received after this missing TSN.
3386 				 */
3387 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3388 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3389 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3390 						sctp_log_fr(48 + num_dests_sacked,
3391 						    tp1->rec.data.TSN_seq,
3392 						    tp1->sent,
3393 						    SCTP_FR_LOG_STRIKE_CHUNK);
3394 					}
3395 					tp1->sent++;
3396 				}
3397 			}
3398 		}
3399 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3400 			struct sctp_nets *alt;
3401 
3402 			/* fix counts and things */
3403 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3404 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3405 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3406 				    tp1->book_size,
3407 				    (uintptr_t) tp1->whoTo,
3408 				    tp1->rec.data.TSN_seq);
3409 			}
3410 			if (tp1->whoTo) {
3411 				tp1->whoTo->net_ack++;
3412 				sctp_flight_size_decrease(tp1);
3413 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3414 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3415 					    tp1);
3416 				}
3417 			}
3418 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3419 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3420 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3421 			}
3422 			/* add back to the rwnd */
3423 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3424 
3425 			/* remove from the total flight */
3426 			sctp_total_flight_decrease(stcb, tp1);
3427 
3428 			if ((stcb->asoc.peer_supports_prsctp) &&
3429 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3430 				/*
3431 				 * Has it been retransmitted tv_sec times? -
3432 				 * we store the retran count there.
3433 				 */
3434 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3435 					/* Yes, so drop it */
3436 					if (tp1->data != NULL) {
3437 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3438 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3439 						    SCTP_SO_NOT_LOCKED);
3440 					}
3441 					/* Make sure to flag we had a FR */
3442 					tp1->whoTo->net_ack++;
3443 					continue;
3444 				}
3445 			}
3446 			/* printf("OK, we are now ready to FR this guy\n"); */
3447 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3448 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3449 				    0, SCTP_FR_MARKED);
3450 			}
3451 			if (strike_flag) {
3452 				/* This is a subsequent FR */
3453 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3454 			}
3455 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3456 			if (asoc->sctp_cmt_on_off > 0) {
3457 				/*
3458 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3459 				 * If CMT is being used, then pick dest with
3460 				 * largest ssthresh for any retransmission.
3461 				 */
3462 				tp1->no_fr_allowed = 1;
3463 				alt = tp1->whoTo;
3464 				/* sa_ignore NO_NULL_CHK */
3465 				if (asoc->sctp_cmt_pf > 0) {
3466 					/*
3467 					 * JRS 5/18/07 - If CMT PF is on,
3468 					 * use the PF version of
3469 					 * find_alt_net()
3470 					 */
3471 					alt = sctp_find_alternate_net(stcb, alt, 2);
3472 				} else {
3473 					/*
3474 					 * JRS 5/18/07 - If only CMT is on,
3475 					 * use the CMT version of
3476 					 * find_alt_net()
3477 					 */
3478 					/* sa_ignore NO_NULL_CHK */
3479 					alt = sctp_find_alternate_net(stcb, alt, 1);
3480 				}
3481 				if (alt == NULL) {
3482 					alt = tp1->whoTo;
3483 				}
3484 				/*
3485 				 * CUCv2: If a different dest is picked for
3486 				 * the retransmission, then new
3487 				 * (rtx-)pseudo_cumack needs to be tracked
3488 				 * for orig dest. Let CUCv2 track new (rtx-)
3489 				 * pseudo-cumack always.
3490 				 */
3491 				if (tp1->whoTo) {
3492 					tp1->whoTo->find_pseudo_cumack = 1;
3493 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3494 				}
3495 			} else {/* CMT is OFF */
3496 
3497 #ifdef SCTP_FR_TO_ALTERNATE
3498 				/* Can we find an alternate? */
3499 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3500 #else
3501 				/*
3502 				 * default behavior is to NOT retransmit
3503 				 * FR's to an alternate. Armando Caro's
3504 				 * paper details why.
3505 				 */
3506 				alt = tp1->whoTo;
3507 #endif
3508 			}
3509 
3510 			tp1->rec.data.doing_fast_retransmit = 1;
3511 			tot_retrans++;
3512 			/* mark the sending seq for possible subsequent FR's */
3513 			/*
3514 			 * printf("Marking TSN for FR new value %x\n",
3515 			 * (uint32_t)tpi->rec.data.TSN_seq);
3516 			 */
3517 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3518 				/*
3519 				 * If the queue of send is empty then its
3520 				 * the next sequence number that will be
3521 				 * assigned so we subtract one from this to
3522 				 * get the one we last sent.
3523 				 */
3524 				tp1->rec.data.fast_retran_tsn = sending_seq;
3525 			} else {
3526 				/*
3527 				 * If there are chunks on the send queue
3528 				 * (unsent data that has made it from the
3529 				 * stream queues but not out the door, we
3530 				 * take the first one (which will have the
3531 				 * lowest TSN) and subtract one to get the
3532 				 * one we last sent.
3533 				 */
3534 				struct sctp_tmit_chunk *ttt;
3535 
3536 				ttt = TAILQ_FIRST(&asoc->send_queue);
3537 				tp1->rec.data.fast_retran_tsn =
3538 				    ttt->rec.data.TSN_seq;
3539 			}
3540 
3541 			if (tp1->do_rtt) {
3542 				/*
3543 				 * this guy had a RTO calculation pending on
3544 				 * it, cancel it
3545 				 */
3546 				if (tp1->whoTo->rto_needed == 0) {
3547 					tp1->whoTo->rto_needed = 1;
3548 				}
3549 				tp1->do_rtt = 0;
3550 			}
3551 			if (alt != tp1->whoTo) {
3552 				/* yes, there is an alternate. */
3553 				sctp_free_remote_addr(tp1->whoTo);
3554 				/* sa_ignore FREED_MEMORY */
3555 				tp1->whoTo = alt;
3556 				atomic_add_int(&alt->ref_count, 1);
3557 			}
3558 		}
3559 	}
3560 }
3561 
3562 struct sctp_tmit_chunk *
3563 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3564     struct sctp_association *asoc)
3565 {
3566 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3567 	struct timeval now;
3568 	int now_filled = 0;
3569 
3570 	if (asoc->peer_supports_prsctp == 0) {
3571 		return (NULL);
3572 	}
3573 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3574 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3575 		    tp1->sent != SCTP_DATAGRAM_RESEND) {
3576 			/* no chance to advance, out of here */
3577 			break;
3578 		}
3579 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3580 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3581 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3582 				    asoc->advanced_peer_ack_point,
3583 				    tp1->rec.data.TSN_seq, 0, 0);
3584 			}
3585 		}
3586 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3587 			/*
3588 			 * We can't fwd-tsn past any that are reliable aka
3589 			 * retransmitted until the asoc fails.
3590 			 */
3591 			break;
3592 		}
3593 		if (!now_filled) {
3594 			(void)SCTP_GETTIME_TIMEVAL(&now);
3595 			now_filled = 1;
3596 		}
3597 		/*
3598 		 * now we got a chunk which is marked for another
3599 		 * retransmission to a PR-stream but has run out its chances
3600 		 * already maybe OR has been marked to skip now. Can we skip
3601 		 * it if its a resend?
3602 		 */
3603 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3604 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3605 			/*
3606 			 * Now is this one marked for resend and its time is
3607 			 * now up?
3608 			 */
3609 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3610 				/* Yes so drop it */
3611 				if (tp1->data) {
3612 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3613 					    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3614 					    SCTP_SO_NOT_LOCKED);
3615 				}
3616 			} else {
3617 				/*
3618 				 * No, we are done when hit one for resend
3619 				 * whos time as not expired.
3620 				 */
3621 				break;
3622 			}
3623 		}
3624 		/*
3625 		 * Ok now if this chunk is marked to drop it we can clean up
3626 		 * the chunk, advance our peer ack point and we can check
3627 		 * the next chunk.
3628 		 */
3629 		if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3630 			/* advance PeerAckPoint goes forward */
3631 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3632 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3633 				a_adv = tp1;
3634 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3635 				/* No update but we do save the chk */
3636 				a_adv = tp1;
3637 			}
3638 		} else {
3639 			/*
3640 			 * If it is still in RESEND we can advance no
3641 			 * further
3642 			 */
3643 			break;
3644 		}
3645 	}
3646 	return (a_adv);
3647 }
3648 
3649 static int
3650 sctp_fs_audit(struct sctp_association *asoc)
3651 {
3652 	struct sctp_tmit_chunk *chk;
3653 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3654 	int entry_flight, entry_cnt, ret;
3655 
3656 	entry_flight = asoc->total_flight;
3657 	entry_cnt = asoc->total_flight_count;
3658 	ret = 0;
3659 
3660 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3661 		return (0);
3662 
3663 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3664 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3665 			printf("Chk TSN:%u size:%d inflight cnt:%d\n",
3666 			    chk->rec.data.TSN_seq,
3667 			    chk->send_size,
3668 			    chk->snd_count
3669 			    );
3670 			inflight++;
3671 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3672 			resend++;
3673 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3674 			inbetween++;
3675 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3676 			above++;
3677 		} else {
3678 			acked++;
3679 		}
3680 	}
3681 
3682 	if ((inflight > 0) || (inbetween > 0)) {
3683 #ifdef INVARIANTS
3684 		panic("Flight size-express incorrect? \n");
3685 #else
3686 		printf("asoc->total_flight:%d cnt:%d\n",
3687 		    entry_flight, entry_cnt);
3688 
3689 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3690 		    inflight, inbetween, resend, above, acked);
3691 		ret = 1;
3692 #endif
3693 	}
3694 	return (ret);
3695 }
3696 
3697 
3698 static void
3699 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3700     struct sctp_association *asoc,
3701     struct sctp_nets *net,
3702     struct sctp_tmit_chunk *tp1)
3703 {
3704 	tp1->window_probe = 0;
3705 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3706 		/* TSN's skipped we do NOT move back. */
3707 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3708 		    tp1->whoTo->flight_size,
3709 		    tp1->book_size,
3710 		    (uintptr_t) tp1->whoTo,
3711 		    tp1->rec.data.TSN_seq);
3712 		return;
3713 	}
3714 	/* First setup this by shrinking flight */
3715 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3716 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3717 		    tp1);
3718 	}
3719 	sctp_flight_size_decrease(tp1);
3720 	sctp_total_flight_decrease(stcb, tp1);
3721 	/* Now mark for resend */
3722 	tp1->sent = SCTP_DATAGRAM_RESEND;
3723 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3724 
3725 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3726 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3727 		    tp1->whoTo->flight_size,
3728 		    tp1->book_size,
3729 		    (uintptr_t) tp1->whoTo,
3730 		    tp1->rec.data.TSN_seq);
3731 	}
3732 }
3733 
3734 void
3735 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3736     uint32_t rwnd, int *abort_now, int ecne_seen)
3737 {
3738 	struct sctp_nets *net;
3739 	struct sctp_association *asoc;
3740 	struct sctp_tmit_chunk *tp1, *tp2;
3741 	uint32_t old_rwnd;
3742 	int win_probe_recovery = 0;
3743 	int win_probe_recovered = 0;
3744 	int j, done_once = 0;
3745 	int rto_ok = 1;
3746 
3747 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3748 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3749 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3750 	}
3751 	SCTP_TCB_LOCK_ASSERT(stcb);
3752 #ifdef SCTP_ASOCLOG_OF_TSNS
3753 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3754 	stcb->asoc.cumack_log_at++;
3755 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3756 		stcb->asoc.cumack_log_at = 0;
3757 	}
3758 #endif
3759 	asoc = &stcb->asoc;
3760 	old_rwnd = asoc->peers_rwnd;
3761 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3762 		/* old ack */
3763 		return;
3764 	} else if (asoc->last_acked_seq == cumack) {
3765 		/* Window update sack */
3766 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3767 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3768 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3769 			/* SWS sender side engages */
3770 			asoc->peers_rwnd = 0;
3771 		}
3772 		if (asoc->peers_rwnd > old_rwnd) {
3773 			goto again;
3774 		}
3775 		return;
3776 	}
3777 	/* First setup for CC stuff */
3778 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3779 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3780 			/* Drag along the window_tsn for cwr's */
3781 			net->cwr_window_tsn = cumack;
3782 		}
3783 		net->prev_cwnd = net->cwnd;
3784 		net->net_ack = 0;
3785 		net->net_ack2 = 0;
3786 
3787 		/*
3788 		 * CMT: Reset CUC and Fast recovery algo variables before
3789 		 * SACK processing
3790 		 */
3791 		net->new_pseudo_cumack = 0;
3792 		net->will_exit_fast_recovery = 0;
3793 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3794 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3795 		}
3796 	}
3797 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3798 		uint32_t send_s;
3799 
3800 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3801 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3802 			    sctpchunk_listhead);
3803 			send_s = tp1->rec.data.TSN_seq + 1;
3804 		} else {
3805 			send_s = asoc->sending_seq;
3806 		}
3807 		if (SCTP_TSN_GE(cumack, send_s)) {
3808 #ifndef INVARIANTS
3809 			struct mbuf *oper;
3810 
3811 #endif
3812 #ifdef INVARIANTS
3813 			panic("Impossible sack 1");
3814 #else
3815 
3816 			*abort_now = 1;
3817 			/* XXX */
3818 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3819 			    0, M_DONTWAIT, 1, MT_DATA);
3820 			if (oper) {
3821 				struct sctp_paramhdr *ph;
3822 				uint32_t *ippp;
3823 
3824 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3825 				    sizeof(uint32_t);
3826 				ph = mtod(oper, struct sctp_paramhdr *);
3827 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3828 				ph->param_length = htons(SCTP_BUF_LEN(oper));
3829 				ippp = (uint32_t *) (ph + 1);
3830 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3831 			}
3832 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3833 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
3834 			return;
3835 #endif
3836 		}
3837 	}
3838 	asoc->this_sack_highest_gap = cumack;
3839 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3840 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3841 		    stcb->asoc.overall_error_count,
3842 		    0,
3843 		    SCTP_FROM_SCTP_INDATA,
3844 		    __LINE__);
3845 	}
3846 	stcb->asoc.overall_error_count = 0;
3847 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3848 		/* process the new consecutive TSN first */
3849 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3850 			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3851 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3852 					printf("Warning, an unsent is now acked?\n");
3853 				}
3854 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3855 					/*
3856 					 * If it is less than ACKED, it is
3857 					 * now no-longer in flight. Higher
3858 					 * values may occur during marking
3859 					 */
3860 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3861 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3862 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3863 							    tp1->whoTo->flight_size,
3864 							    tp1->book_size,
3865 							    (uintptr_t) tp1->whoTo,
3866 							    tp1->rec.data.TSN_seq);
3867 						}
3868 						sctp_flight_size_decrease(tp1);
3869 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3870 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3871 							    tp1);
3872 						}
3873 						/* sa_ignore NO_NULL_CHK */
3874 						sctp_total_flight_decrease(stcb, tp1);
3875 					}
3876 					tp1->whoTo->net_ack += tp1->send_size;
3877 					if (tp1->snd_count < 2) {
3878 						/*
3879 						 * True non-retransmited
3880 						 * chunk
3881 						 */
3882 						tp1->whoTo->net_ack2 +=
3883 						    tp1->send_size;
3884 
3885 						/* update RTO too? */
3886 						if (tp1->do_rtt) {
3887 							if (rto_ok) {
3888 								tp1->whoTo->RTO =
3889 								/*
3890 								 * sa_ignore
3891 								 * NO_NULL_CH
3892 								 * K
3893 								 */
3894 								    sctp_calculate_rto(stcb,
3895 								    asoc, tp1->whoTo,
3896 								    &tp1->sent_rcv_time,
3897 								    sctp_align_safe_nocopy,
3898 								    SCTP_RTT_FROM_DATA);
3899 								rto_ok = 0;
3900 							}
3901 							if (tp1->whoTo->rto_needed == 0) {
3902 								tp1->whoTo->rto_needed = 1;
3903 							}
3904 							tp1->do_rtt = 0;
3905 						}
3906 					}
3907 					/*
3908 					 * CMT: CUCv2 algorithm. From the
3909 					 * cumack'd TSNs, for each TSN being
3910 					 * acked for the first time, set the
3911 					 * following variables for the
3912 					 * corresp destination.
3913 					 * new_pseudo_cumack will trigger a
3914 					 * cwnd update.
3915 					 * find_(rtx_)pseudo_cumack will
3916 					 * trigger search for the next
3917 					 * expected (rtx-)pseudo-cumack.
3918 					 */
3919 					tp1->whoTo->new_pseudo_cumack = 1;
3920 					tp1->whoTo->find_pseudo_cumack = 1;
3921 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3922 
3923 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3924 						/* sa_ignore NO_NULL_CHK */
3925 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3926 					}
3927 				}
3928 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3929 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3930 				}
3931 				if (tp1->rec.data.chunk_was_revoked) {
3932 					/* deflate the cwnd */
3933 					tp1->whoTo->cwnd -= tp1->book_size;
3934 					tp1->rec.data.chunk_was_revoked = 0;
3935 				}
3936 				tp1->sent = SCTP_DATAGRAM_ACKED;
3937 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3938 				if (tp1->data) {
3939 					/* sa_ignore NO_NULL_CHK */
3940 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3941 					sctp_m_freem(tp1->data);
3942 					tp1->data = NULL;
3943 				}
3944 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3945 					sctp_log_sack(asoc->last_acked_seq,
3946 					    cumack,
3947 					    tp1->rec.data.TSN_seq,
3948 					    0,
3949 					    0,
3950 					    SCTP_LOG_FREE_SENT);
3951 				}
3952 				asoc->sent_queue_cnt--;
3953 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3954 			} else {
3955 				break;
3956 			}
3957 		}
3958 
3959 	}
3960 	/* sa_ignore NO_NULL_CHK */
3961 	if (stcb->sctp_socket) {
3962 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3963 		struct socket *so;
3964 
3965 #endif
3966 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3967 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3968 			/* sa_ignore NO_NULL_CHK */
3969 			sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
3970 		}
3971 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3972 		so = SCTP_INP_SO(stcb->sctp_ep);
3973 		atomic_add_int(&stcb->asoc.refcnt, 1);
3974 		SCTP_TCB_UNLOCK(stcb);
3975 		SCTP_SOCKET_LOCK(so, 1);
3976 		SCTP_TCB_LOCK(stcb);
3977 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3978 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3979 			/* assoc was freed while we were unlocked */
3980 			SCTP_SOCKET_UNLOCK(so, 1);
3981 			return;
3982 		}
3983 #endif
3984 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3985 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3986 		SCTP_SOCKET_UNLOCK(so, 1);
3987 #endif
3988 	} else {
3989 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3990 			sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
3991 		}
3992 	}
3993 
3994 	/* JRS - Use the congestion control given in the CC module */
3995 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0))
3996 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
3997 
3998 	asoc->last_acked_seq = cumack;
3999 
4000 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4001 		/* nothing left in-flight */
4002 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4003 			net->flight_size = 0;
4004 			net->partial_bytes_acked = 0;
4005 		}
4006 		asoc->total_flight = 0;
4007 		asoc->total_flight_count = 0;
4008 	}
4009 	/* RWND update */
4010 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4011 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4012 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4013 		/* SWS sender side engages */
4014 		asoc->peers_rwnd = 0;
4015 	}
4016 	if (asoc->peers_rwnd > old_rwnd) {
4017 		win_probe_recovery = 1;
4018 	}
4019 	/* Now assure a timer where data is queued at */
4020 again:
4021 	j = 0;
4022 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4023 		int to_ticks;
4024 
4025 		if (win_probe_recovery && (net->window_probe)) {
4026 			win_probe_recovered = 1;
4027 			/*
4028 			 * Find first chunk that was used with window probe
4029 			 * and clear the sent
4030 			 */
4031 			/* sa_ignore FREED_MEMORY */
4032 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4033 				if (tp1->window_probe) {
4034 					/* move back to data send queue */
4035 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
4036 					break;
4037 				}
4038 			}
4039 		}
4040 		if (net->RTO == 0) {
4041 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4042 		} else {
4043 			to_ticks = MSEC_TO_TICKS(net->RTO);
4044 		}
4045 		if (net->flight_size) {
4046 			j++;
4047 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4048 			    sctp_timeout_handler, &net->rxt_timer);
4049 			if (net->window_probe) {
4050 				net->window_probe = 0;
4051 			}
4052 		} else {
4053 			if (net->window_probe) {
4054 				/*
4055 				 * In window probes we must assure a timer
4056 				 * is still running there
4057 				 */
4058 				net->window_probe = 0;
4059 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4060 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4061 					    sctp_timeout_handler, &net->rxt_timer);
4062 				}
4063 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4064 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4065 				    stcb, net,
4066 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4067 			}
4068 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4069 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4070 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4071 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4072 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4073 				}
4074 			}
4075 		}
4076 	}
4077 	if ((j == 0) &&
4078 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4079 	    (asoc->sent_queue_retran_cnt == 0) &&
4080 	    (win_probe_recovered == 0) &&
4081 	    (done_once == 0)) {
4082 		/*
4083 		 * huh, this should not happen unless all packets are
4084 		 * PR-SCTP and marked to skip of course.
4085 		 */
4086 		if (sctp_fs_audit(asoc)) {
4087 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4088 				net->flight_size = 0;
4089 			}
4090 			asoc->total_flight = 0;
4091 			asoc->total_flight_count = 0;
4092 			asoc->sent_queue_retran_cnt = 0;
4093 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4094 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4095 					sctp_flight_size_increase(tp1);
4096 					sctp_total_flight_increase(stcb, tp1);
4097 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4098 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4099 				}
4100 			}
4101 		}
4102 		done_once = 1;
4103 		goto again;
4104 	}
4105 	/**********************************/
4106 	/* Now what about shutdown issues */
4107 	/**********************************/
4108 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4109 		/* nothing left on sendqueue.. consider done */
4110 		/* clean up */
4111 		if ((asoc->stream_queue_cnt == 1) &&
4112 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4113 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4114 		    (asoc->locked_on_sending)
4115 		    ) {
4116 			struct sctp_stream_queue_pending *sp;
4117 
4118 			/*
4119 			 * I may be in a state where we got all across.. but
4120 			 * cannot write more due to a shutdown... we abort
4121 			 * since the user did not indicate EOR in this case.
4122 			 * The sp will be cleaned during free of the asoc.
4123 			 */
4124 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4125 			    sctp_streamhead);
4126 			if ((sp) && (sp->length == 0)) {
4127 				/* Let cleanup code purge it */
4128 				if (sp->msg_is_complete) {
4129 					asoc->stream_queue_cnt--;
4130 				} else {
4131 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4132 					asoc->locked_on_sending = NULL;
4133 					asoc->stream_queue_cnt--;
4134 				}
4135 			}
4136 		}
4137 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4138 		    (asoc->stream_queue_cnt == 0)) {
4139 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4140 				/* Need to abort here */
4141 				struct mbuf *oper;
4142 
4143 		abort_out_now:
4144 				*abort_now = 1;
4145 				/* XXX */
4146 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4147 				    0, M_DONTWAIT, 1, MT_DATA);
4148 				if (oper) {
4149 					struct sctp_paramhdr *ph;
4150 					uint32_t *ippp;
4151 
4152 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4153 					    sizeof(uint32_t);
4154 					ph = mtod(oper, struct sctp_paramhdr *);
4155 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4156 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4157 					ippp = (uint32_t *) (ph + 1);
4158 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4159 				}
4160 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4161 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4162 			} else {
4163 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4164 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4165 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4166 				}
4167 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4168 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4169 				sctp_stop_timers_for_shutdown(stcb);
4170 				sctp_send_shutdown(stcb,
4171 				    stcb->asoc.primary_destination);
4172 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4173 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4174 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4175 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4176 			}
4177 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4178 		    (asoc->stream_queue_cnt == 0)) {
4179 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4180 				goto abort_out_now;
4181 			}
4182 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4183 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4184 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4185 			sctp_send_shutdown_ack(stcb,
4186 			    stcb->asoc.primary_destination);
4187 			sctp_stop_timers_for_shutdown(stcb);
4188 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4189 			    stcb->sctp_ep, stcb, asoc->primary_destination);
4190 		}
4191 	}
4192 	/*********************************************/
4193 	/* Here we perform PR-SCTP procedures        */
4194 	/* (section 4.2)                             */
4195 	/*********************************************/
4196 	/* C1. update advancedPeerAckPoint */
4197 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4198 		asoc->advanced_peer_ack_point = cumack;
4199 	}
4200 	/* PR-Sctp issues need to be addressed too */
4201 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4202 		struct sctp_tmit_chunk *lchk;
4203 		uint32_t old_adv_peer_ack_point;
4204 
4205 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4206 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4207 		/* C3. See if we need to send a Fwd-TSN */
4208 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4209 			/*
4210 			 * ISSUE with ECN, see FWD-TSN processing.
4211 			 */
4212 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4213 				send_forward_tsn(stcb, asoc);
4214 			} else if (lchk) {
4215 				/* try to FR fwd-tsn's that get lost too */
4216 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4217 					send_forward_tsn(stcb, asoc);
4218 				}
4219 			}
4220 		}
4221 		if (lchk) {
4222 			/* Assure a timer is up */
4223 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4224 			    stcb->sctp_ep, stcb, lchk->whoTo);
4225 		}
4226 	}
4227 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4228 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4229 		    rwnd,
4230 		    stcb->asoc.peers_rwnd,
4231 		    stcb->asoc.total_flight,
4232 		    stcb->asoc.total_output_queue_size);
4233 	}
4234 }
4235 
4236 void
4237 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4238     struct sctp_tcb *stcb, struct sctp_nets *net_from,
4239     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4240     int *abort_now, uint8_t flags,
4241     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4242 {
4243 	struct sctp_association *asoc;
4244 	struct sctp_tmit_chunk *tp1, *tp2;
4245 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4246 	uint32_t sav_cum_ack;
4247 	uint16_t wake_him = 0;
4248 	uint32_t send_s = 0;
4249 	long j;
4250 	int accum_moved = 0;
4251 	int will_exit_fast_recovery = 0;
4252 	uint32_t a_rwnd, old_rwnd;
4253 	int win_probe_recovery = 0;
4254 	int win_probe_recovered = 0;
4255 	struct sctp_nets *net = NULL;
4256 	int ecn_seg_sums = 0;
4257 	int done_once;
4258 	int rto_ok = 1;
4259 	uint8_t reneged_all = 0;
4260 	uint8_t cmt_dac_flag;
4261 
4262 	/*
4263 	 * we take any chance we can to service our queues since we cannot
4264 	 * get awoken when the socket is read from :<
4265 	 */
4266 	/*
4267 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4268 	 * old sack, if so discard. 2) If there is nothing left in the send
4269 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4270 	 * too, update any rwnd change and verify no timers are running.
4271 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4272 	 * moved process these first and note that it moved. 4) Process any
4273 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4274 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4275 	 * sync up flightsizes and things, stop all timers and also check
4276 	 * for shutdown_pending state. If so then go ahead and send off the
4277 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4278 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4279 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4280 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4281 	 * if in shutdown_recv state.
4282 	 */
4283 	SCTP_TCB_LOCK_ASSERT(stcb);
4284 	/* CMT DAC algo */
4285 	this_sack_lowest_newack = 0;
4286 	j = 0;
4287 	SCTP_STAT_INCR(sctps_slowpath_sack);
4288 	last_tsn = cum_ack;
4289 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4290 #ifdef SCTP_ASOCLOG_OF_TSNS
4291 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4292 	stcb->asoc.cumack_log_at++;
4293 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4294 		stcb->asoc.cumack_log_at = 0;
4295 	}
4296 #endif
4297 	a_rwnd = rwnd;
4298 
4299 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4300 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4301 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4302 	}
4303 	old_rwnd = stcb->asoc.peers_rwnd;
4304 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4305 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4306 		    stcb->asoc.overall_error_count,
4307 		    0,
4308 		    SCTP_FROM_SCTP_INDATA,
4309 		    __LINE__);
4310 	}
4311 	stcb->asoc.overall_error_count = 0;
4312 	asoc = &stcb->asoc;
4313 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4314 		sctp_log_sack(asoc->last_acked_seq,
4315 		    cum_ack,
4316 		    0,
4317 		    num_seg,
4318 		    num_dup,
4319 		    SCTP_LOG_NEW_SACK);
4320 	}
4321 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4322 		uint16_t i;
4323 		uint32_t *dupdata, dblock;
4324 
4325 		for (i = 0; i < num_dup; i++) {
4326 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4327 			    sizeof(uint32_t), (uint8_t *) & dblock);
4328 			if (dupdata == NULL) {
4329 				break;
4330 			}
4331 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4332 		}
4333 	}
4334 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4335 		/* reality check */
4336 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4337 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4338 			    sctpchunk_listhead);
4339 			send_s = tp1->rec.data.TSN_seq + 1;
4340 		} else {
4341 			tp1 = NULL;
4342 			send_s = asoc->sending_seq;
4343 		}
4344 		if (SCTP_TSN_GE(cum_ack, send_s)) {
4345 			struct mbuf *oper;
4346 
4347 			/*
4348 			 * no way, we have not even sent this TSN out yet.
4349 			 * Peer is hopelessly messed up with us.
4350 			 */
4351 			printf("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4352 			    cum_ack, send_s);
4353 			if (tp1) {
4354 				printf("Got send_s from tsn:%x + 1 of tp1:%p\n",
4355 				    tp1->rec.data.TSN_seq, tp1);
4356 			}
4357 	hopeless_peer:
4358 			*abort_now = 1;
4359 			/* XXX */
4360 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4361 			    0, M_DONTWAIT, 1, MT_DATA);
4362 			if (oper) {
4363 				struct sctp_paramhdr *ph;
4364 				uint32_t *ippp;
4365 
4366 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4367 				    sizeof(uint32_t);
4368 				ph = mtod(oper, struct sctp_paramhdr *);
4369 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4370 				ph->param_length = htons(SCTP_BUF_LEN(oper));
4371 				ippp = (uint32_t *) (ph + 1);
4372 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4373 			}
4374 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4375 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4376 			return;
4377 		}
4378 	}
4379 	/**********************/
4380 	/* 1) check the range */
4381 	/**********************/
4382 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4383 		/* acking something behind */
4384 		return;
4385 	}
4386 	sav_cum_ack = asoc->last_acked_seq;
4387 
4388 	/* update the Rwnd of the peer */
4389 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4390 	    TAILQ_EMPTY(&asoc->send_queue) &&
4391 	    (asoc->stream_queue_cnt == 0)) {
4392 		/* nothing left on send/sent and strmq */
4393 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4394 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4395 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4396 		}
4397 		asoc->peers_rwnd = a_rwnd;
4398 		if (asoc->sent_queue_retran_cnt) {
4399 			asoc->sent_queue_retran_cnt = 0;
4400 		}
4401 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4402 			/* SWS sender side engages */
4403 			asoc->peers_rwnd = 0;
4404 		}
4405 		/* stop any timers */
4406 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4407 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4408 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4409 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4410 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4411 					SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4412 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4413 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4414 				}
4415 			}
4416 			net->partial_bytes_acked = 0;
4417 			net->flight_size = 0;
4418 		}
4419 		asoc->total_flight = 0;
4420 		asoc->total_flight_count = 0;
4421 		return;
4422 	}
4423 	/*
4424 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4425 	 * things. The total byte count acked is tracked in netAckSz AND
4426 	 * netAck2 is used to track the total bytes acked that are un-
4427 	 * amibguious and were never retransmitted. We track these on a per
4428 	 * destination address basis.
4429 	 */
4430 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4431 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4432 			/* Drag along the window_tsn for cwr's */
4433 			net->cwr_window_tsn = cum_ack;
4434 		}
4435 		net->prev_cwnd = net->cwnd;
4436 		net->net_ack = 0;
4437 		net->net_ack2 = 0;
4438 
4439 		/*
4440 		 * CMT: Reset CUC and Fast recovery algo variables before
4441 		 * SACK processing
4442 		 */
4443 		net->new_pseudo_cumack = 0;
4444 		net->will_exit_fast_recovery = 0;
4445 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4446 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4447 		}
4448 	}
4449 	/* process the new consecutive TSN first */
4450 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4451 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4452 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4453 				accum_moved = 1;
4454 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4455 					/*
4456 					 * If it is less than ACKED, it is
4457 					 * now no-longer in flight. Higher
4458 					 * values may occur during marking
4459 					 */
4460 					if ((tp1->whoTo->dest_state &
4461 					    SCTP_ADDR_UNCONFIRMED) &&
4462 					    (tp1->snd_count < 2)) {
4463 						/*
4464 						 * If there was no retran
4465 						 * and the address is
4466 						 * un-confirmed and we sent
4467 						 * there and are now
4468 						 * sacked.. its confirmed,
4469 						 * mark it so.
4470 						 */
4471 						tp1->whoTo->dest_state &=
4472 						    ~SCTP_ADDR_UNCONFIRMED;
4473 					}
4474 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4475 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4476 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4477 							    tp1->whoTo->flight_size,
4478 							    tp1->book_size,
4479 							    (uintptr_t) tp1->whoTo,
4480 							    tp1->rec.data.TSN_seq);
4481 						}
4482 						sctp_flight_size_decrease(tp1);
4483 						sctp_total_flight_decrease(stcb, tp1);
4484 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4485 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4486 							    tp1);
4487 						}
4488 					}
4489 					tp1->whoTo->net_ack += tp1->send_size;
4490 
4491 					/* CMT SFR and DAC algos */
4492 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4493 					tp1->whoTo->saw_newack = 1;
4494 
4495 					if (tp1->snd_count < 2) {
4496 						/*
4497 						 * True non-retransmited
4498 						 * chunk
4499 						 */
4500 						tp1->whoTo->net_ack2 +=
4501 						    tp1->send_size;
4502 
4503 						/* update RTO too? */
4504 						if (tp1->do_rtt) {
4505 							if (rto_ok) {
4506 								tp1->whoTo->RTO =
4507 								    sctp_calculate_rto(stcb,
4508 								    asoc, tp1->whoTo,
4509 								    &tp1->sent_rcv_time,
4510 								    sctp_align_safe_nocopy,
4511 								    SCTP_RTT_FROM_DATA);
4512 								rto_ok = 0;
4513 							}
4514 							if (tp1->whoTo->rto_needed == 0) {
4515 								tp1->whoTo->rto_needed = 1;
4516 							}
4517 							tp1->do_rtt = 0;
4518 						}
4519 					}
4520 					/*
4521 					 * CMT: CUCv2 algorithm. From the
4522 					 * cumack'd TSNs, for each TSN being
4523 					 * acked for the first time, set the
4524 					 * following variables for the
4525 					 * corresp destination.
4526 					 * new_pseudo_cumack will trigger a
4527 					 * cwnd update.
4528 					 * find_(rtx_)pseudo_cumack will
4529 					 * trigger search for the next
4530 					 * expected (rtx-)pseudo-cumack.
4531 					 */
4532 					tp1->whoTo->new_pseudo_cumack = 1;
4533 					tp1->whoTo->find_pseudo_cumack = 1;
4534 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4535 
4536 
4537 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4538 						sctp_log_sack(asoc->last_acked_seq,
4539 						    cum_ack,
4540 						    tp1->rec.data.TSN_seq,
4541 						    0,
4542 						    0,
4543 						    SCTP_LOG_TSN_ACKED);
4544 					}
4545 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4546 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4547 					}
4548 				}
4549 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4550 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4551 #ifdef SCTP_AUDITING_ENABLED
4552 					sctp_audit_log(0xB3,
4553 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4554 #endif
4555 				}
4556 				if (tp1->rec.data.chunk_was_revoked) {
4557 					/* deflate the cwnd */
4558 					tp1->whoTo->cwnd -= tp1->book_size;
4559 					tp1->rec.data.chunk_was_revoked = 0;
4560 				}
4561 				tp1->sent = SCTP_DATAGRAM_ACKED;
4562 			}
4563 		} else {
4564 			break;
4565 		}
4566 	}
4567 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4568 	/* always set this up to cum-ack */
4569 	asoc->this_sack_highest_gap = last_tsn;
4570 
4571 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4572 
4573 		/*
4574 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4575 		 * to be greater than the cumack. Also reset saw_newack to 0
4576 		 * for all dests.
4577 		 */
4578 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4579 			net->saw_newack = 0;
4580 			net->this_sack_highest_newack = last_tsn;
4581 		}
4582 
4583 		/*
4584 		 * thisSackHighestGap will increase while handling NEW
4585 		 * segments this_sack_highest_newack will increase while
4586 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4587 		 * used for CMT DAC algo. saw_newack will also change.
4588 		 */
4589 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4590 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4591 		    num_seg, num_nr_seg, &ecn_seg_sums,
4592 		    &rto_ok)) {
4593 			wake_him++;
4594 		}
4595 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4596 			/*
4597 			 * validate the biggest_tsn_acked in the gap acks if
4598 			 * strict adherence is wanted.
4599 			 */
4600 			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4601 				/*
4602 				 * peer is either confused or we are under
4603 				 * attack. We must abort.
4604 				 */
4605 				printf("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4606 				    biggest_tsn_acked,
4607 				    send_s);
4608 
4609 				goto hopeless_peer;
4610 			}
4611 		}
4612 	}
4613 	/*******************************************/
4614 	/* cancel ALL T3-send timer if accum moved */
4615 	/*******************************************/
4616 	if (asoc->sctp_cmt_on_off > 0) {
4617 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4618 			if (net->new_pseudo_cumack)
4619 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4620 				    stcb, net,
4621 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4622 
4623 		}
4624 	} else {
4625 		if (accum_moved) {
4626 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4627 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4628 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4629 			}
4630 		}
4631 	}
4632 	/********************************************/
4633 	/* drop the acked chunks from the sentqueue */
4634 	/********************************************/
4635 	asoc->last_acked_seq = cum_ack;
4636 
4637 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4638 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4639 			break;
4640 		}
4641 		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4642 			/* no more sent on list */
4643 			printf("Warning, tp1->sent == %d and its now acked?\n",
4644 			    tp1->sent);
4645 		}
4646 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4647 		if (tp1->pr_sctp_on) {
4648 			if (asoc->pr_sctp_cnt != 0)
4649 				asoc->pr_sctp_cnt--;
4650 		}
4651 		asoc->sent_queue_cnt--;
4652 		if (tp1->data) {
4653 			/* sa_ignore NO_NULL_CHK */
4654 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4655 			sctp_m_freem(tp1->data);
4656 			tp1->data = NULL;
4657 			if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4658 				asoc->sent_queue_cnt_removeable--;
4659 			}
4660 		}
4661 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4662 			sctp_log_sack(asoc->last_acked_seq,
4663 			    cum_ack,
4664 			    tp1->rec.data.TSN_seq,
4665 			    0,
4666 			    0,
4667 			    SCTP_LOG_FREE_SENT);
4668 		}
4669 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4670 		wake_him++;
4671 	}
4672 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4673 #ifdef INVARIANTS
4674 		panic("Warning flight size is postive and should be 0");
4675 #else
4676 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4677 		    asoc->total_flight);
4678 #endif
4679 		asoc->total_flight = 0;
4680 	}
4681 	/* sa_ignore NO_NULL_CHK */
4682 	if ((wake_him) && (stcb->sctp_socket)) {
4683 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4684 		struct socket *so;
4685 
4686 #endif
4687 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4688 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4689 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
4690 		}
4691 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4692 		so = SCTP_INP_SO(stcb->sctp_ep);
4693 		atomic_add_int(&stcb->asoc.refcnt, 1);
4694 		SCTP_TCB_UNLOCK(stcb);
4695 		SCTP_SOCKET_LOCK(so, 1);
4696 		SCTP_TCB_LOCK(stcb);
4697 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4698 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4699 			/* assoc was freed while we were unlocked */
4700 			SCTP_SOCKET_UNLOCK(so, 1);
4701 			return;
4702 		}
4703 #endif
4704 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4705 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4706 		SCTP_SOCKET_UNLOCK(so, 1);
4707 #endif
4708 	} else {
4709 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4710 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
4711 		}
4712 	}
4713 
4714 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4715 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4716 			/* Setup so we will exit RFC2582 fast recovery */
4717 			will_exit_fast_recovery = 1;
4718 		}
4719 	}
4720 	/*
4721 	 * Check for revoked fragments:
4722 	 *
4723 	 * if Previous sack - Had no frags then we can't have any revoked if
4724 	 * Previous sack - Had frag's then - If we now have frags aka
4725 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4726 	 * some of them. else - The peer revoked all ACKED fragments, since
4727 	 * we had some before and now we have NONE.
4728 	 */
4729 
4730 	if (num_seg) {
4731 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4732 		asoc->saw_sack_with_frags = 1;
4733 	} else if (asoc->saw_sack_with_frags) {
4734 		int cnt_revoked = 0;
4735 
4736 		/* Peer revoked all dg's marked or acked */
4737 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4738 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4739 				tp1->sent = SCTP_DATAGRAM_SENT;
4740 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4741 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4742 					    tp1->whoTo->flight_size,
4743 					    tp1->book_size,
4744 					    (uintptr_t) tp1->whoTo,
4745 					    tp1->rec.data.TSN_seq);
4746 				}
4747 				sctp_flight_size_increase(tp1);
4748 				sctp_total_flight_increase(stcb, tp1);
4749 				tp1->rec.data.chunk_was_revoked = 1;
4750 				/*
4751 				 * To ensure that this increase in
4752 				 * flightsize, which is artificial, does not
4753 				 * throttle the sender, we also increase the
4754 				 * cwnd artificially.
4755 				 */
4756 				tp1->whoTo->cwnd += tp1->book_size;
4757 				cnt_revoked++;
4758 			}
4759 		}
4760 		if (cnt_revoked) {
4761 			reneged_all = 1;
4762 		}
4763 		asoc->saw_sack_with_frags = 0;
4764 	}
4765 	if (num_nr_seg > 0)
4766 		asoc->saw_sack_with_nr_frags = 1;
4767 	else
4768 		asoc->saw_sack_with_nr_frags = 0;
4769 
4770 	/* JRS - Use the congestion control given in the CC module */
4771 	if (ecne_seen == 0)
4772 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4773 
4774 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4775 		/* nothing left in-flight */
4776 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4777 			/* stop all timers */
4778 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4779 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4780 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4781 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4782 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4783 				}
4784 			}
4785 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4786 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4787 			net->flight_size = 0;
4788 			net->partial_bytes_acked = 0;
4789 		}
4790 		asoc->total_flight = 0;
4791 		asoc->total_flight_count = 0;
4792 	}
4793 	/**********************************/
4794 	/* Now what about shutdown issues */
4795 	/**********************************/
4796 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4797 		/* nothing left on sendqueue.. consider done */
4798 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4799 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4800 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4801 		}
4802 		asoc->peers_rwnd = a_rwnd;
4803 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4804 			/* SWS sender side engages */
4805 			asoc->peers_rwnd = 0;
4806 		}
4807 		/* clean up */
4808 		if ((asoc->stream_queue_cnt == 1) &&
4809 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4810 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4811 		    (asoc->locked_on_sending)
4812 		    ) {
4813 			struct sctp_stream_queue_pending *sp;
4814 
4815 			/*
4816 			 * I may be in a state where we got all across.. but
4817 			 * cannot write more due to a shutdown... we abort
4818 			 * since the user did not indicate EOR in this case.
4819 			 */
4820 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4821 			    sctp_streamhead);
4822 			if ((sp) && (sp->length == 0)) {
4823 				asoc->locked_on_sending = NULL;
4824 				if (sp->msg_is_complete) {
4825 					asoc->stream_queue_cnt--;
4826 				} else {
4827 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4828 					asoc->stream_queue_cnt--;
4829 				}
4830 			}
4831 		}
4832 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4833 		    (asoc->stream_queue_cnt == 0)) {
4834 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4835 				/* Need to abort here */
4836 				struct mbuf *oper;
4837 
4838 		abort_out_now:
4839 				*abort_now = 1;
4840 				/* XXX */
4841 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4842 				    0, M_DONTWAIT, 1, MT_DATA);
4843 				if (oper) {
4844 					struct sctp_paramhdr *ph;
4845 					uint32_t *ippp;
4846 
4847 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4848 					    sizeof(uint32_t);
4849 					ph = mtod(oper, struct sctp_paramhdr *);
4850 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4851 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4852 					ippp = (uint32_t *) (ph + 1);
4853 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4854 				}
4855 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4856 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4857 				return;
4858 			} else {
4859 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4860 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4861 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4862 				}
4863 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4864 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4865 				sctp_stop_timers_for_shutdown(stcb);
4866 				sctp_send_shutdown(stcb,
4867 				    stcb->asoc.primary_destination);
4868 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4869 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4870 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4871 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4872 			}
4873 			return;
4874 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4875 		    (asoc->stream_queue_cnt == 0)) {
4876 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4877 				goto abort_out_now;
4878 			}
4879 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4880 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4881 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4882 			sctp_send_shutdown_ack(stcb,
4883 			    stcb->asoc.primary_destination);
4884 			sctp_stop_timers_for_shutdown(stcb);
4885 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4886 			    stcb->sctp_ep, stcb, asoc->primary_destination);
4887 			return;
4888 		}
4889 	}
4890 	/*
4891 	 * Now here we are going to recycle net_ack for a different use...
4892 	 * HEADS UP.
4893 	 */
4894 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4895 		net->net_ack = 0;
4896 	}
4897 
4898 	/*
4899 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4900 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4901 	 * automatically ensure that.
4902 	 */
4903 	if ((asoc->sctp_cmt_on_off > 0) &&
4904 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4905 	    (cmt_dac_flag == 0)) {
4906 		this_sack_lowest_newack = cum_ack;
4907 	}
4908 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4909 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4910 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4911 	}
4912 	/* JRS - Use the congestion control given in the CC module */
4913 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4914 
4915 	/* Now are we exiting loss recovery ? */
4916 	if (will_exit_fast_recovery) {
4917 		/* Ok, we must exit fast recovery */
4918 		asoc->fast_retran_loss_recovery = 0;
4919 	}
4920 	if ((asoc->sat_t3_loss_recovery) &&
4921 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4922 		/* end satellite t3 loss recovery */
4923 		asoc->sat_t3_loss_recovery = 0;
4924 	}
4925 	/*
4926 	 * CMT Fast recovery
4927 	 */
4928 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4929 		if (net->will_exit_fast_recovery) {
4930 			/* Ok, we must exit fast recovery */
4931 			net->fast_retran_loss_recovery = 0;
4932 		}
4933 	}
4934 
4935 	/* Adjust and set the new rwnd value */
4936 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4937 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4938 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4939 	}
4940 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4941 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4942 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4943 		/* SWS sender side engages */
4944 		asoc->peers_rwnd = 0;
4945 	}
4946 	if (asoc->peers_rwnd > old_rwnd) {
4947 		win_probe_recovery = 1;
4948 	}
4949 	/*
4950 	 * Now we must setup so we have a timer up for anyone with
4951 	 * outstanding data.
4952 	 */
4953 	done_once = 0;
4954 again:
4955 	j = 0;
4956 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4957 		if (win_probe_recovery && (net->window_probe)) {
4958 			win_probe_recovered = 1;
4959 			/*-
4960 			 * Find first chunk that was used with
4961 			 * window probe and clear the event. Put
4962 			 * it back into the send queue as if has
4963 			 * not been sent.
4964 			 */
4965 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4966 				if (tp1->window_probe) {
4967 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
4968 					break;
4969 				}
4970 			}
4971 		}
4972 		if (net->flight_size) {
4973 			j++;
4974 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4975 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4976 				    stcb->sctp_ep, stcb, net);
4977 			}
4978 			if (net->window_probe) {
4979 				net->window_probe = 0;
4980 			}
4981 		} else {
4982 			if (net->window_probe) {
4983 				/*
4984 				 * In window probes we must assure a timer
4985 				 * is still running there
4986 				 */
4987 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4988 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4989 					    stcb->sctp_ep, stcb, net);
4990 
4991 				}
4992 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4993 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4994 				    stcb, net,
4995 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4996 			}
4997 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4998 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4999 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5000 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5001 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5002 				}
5003 			}
5004 		}
5005 	}
5006 	if ((j == 0) &&
5007 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5008 	    (asoc->sent_queue_retran_cnt == 0) &&
5009 	    (win_probe_recovered == 0) &&
5010 	    (done_once == 0)) {
5011 		/*
5012 		 * huh, this should not happen unless all packets are
5013 		 * PR-SCTP and marked to skip of course.
5014 		 */
5015 		if (sctp_fs_audit(asoc)) {
5016 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5017 				net->flight_size = 0;
5018 			}
5019 			asoc->total_flight = 0;
5020 			asoc->total_flight_count = 0;
5021 			asoc->sent_queue_retran_cnt = 0;
5022 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5023 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5024 					sctp_flight_size_increase(tp1);
5025 					sctp_total_flight_increase(stcb, tp1);
5026 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5027 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5028 				}
5029 			}
5030 		}
5031 		done_once = 1;
5032 		goto again;
5033 	}
5034 	/*********************************************/
5035 	/* Here we perform PR-SCTP procedures        */
5036 	/* (section 4.2)                             */
5037 	/*********************************************/
5038 	/* C1. update advancedPeerAckPoint */
5039 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5040 		asoc->advanced_peer_ack_point = cum_ack;
5041 	}
5042 	/* C2. try to further move advancedPeerAckPoint ahead */
5043 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5044 		struct sctp_tmit_chunk *lchk;
5045 		uint32_t old_adv_peer_ack_point;
5046 
5047 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5048 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5049 		/* C3. See if we need to send a Fwd-TSN */
5050 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5051 			/*
5052 			 * ISSUE with ECN, see FWD-TSN processing.
5053 			 */
5054 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5055 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5056 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5057 				    old_adv_peer_ack_point);
5058 			}
5059 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5060 				send_forward_tsn(stcb, asoc);
5061 			} else if (lchk) {
5062 				/* try to FR fwd-tsn's that get lost too */
5063 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5064 					send_forward_tsn(stcb, asoc);
5065 				}
5066 			}
5067 		}
5068 		if (lchk) {
5069 			/* Assure a timer is up */
5070 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5071 			    stcb->sctp_ep, stcb, lchk->whoTo);
5072 		}
5073 	}
5074 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5075 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5076 		    a_rwnd,
5077 		    stcb->asoc.peers_rwnd,
5078 		    stcb->asoc.total_flight,
5079 		    stcb->asoc.total_output_queue_size);
5080 	}
5081 }
5082 
5083 void
5084 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5085     struct sctp_nets *netp, int *abort_flag)
5086 {
5087 	/* Copy cum-ack */
5088 	uint32_t cum_ack, a_rwnd;
5089 
5090 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5091 	/* Arrange so a_rwnd does NOT change */
5092 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5093 
5094 	/* Now call the express sack handling */
5095 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5096 }
5097 
5098 static void
5099 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5100     struct sctp_stream_in *strmin)
5101 {
5102 	struct sctp_queued_to_read *ctl, *nctl;
5103 	struct sctp_association *asoc;
5104 	uint16_t tt;
5105 
5106 	asoc = &stcb->asoc;
5107 	tt = strmin->last_sequence_delivered;
5108 	/*
5109 	 * First deliver anything prior to and including the stream no that
5110 	 * came in
5111 	 */
5112 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5113 		if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
5114 			/* this is deliverable now */
5115 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5116 			/* subtract pending on streams */
5117 			asoc->size_on_all_streams -= ctl->length;
5118 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5119 			/* deliver it to at least the delivery-q */
5120 			if (stcb->sctp_socket) {
5121 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5122 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5123 				    ctl,
5124 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5125 			}
5126 		} else {
5127 			/* no more delivery now. */
5128 			break;
5129 		}
5130 	}
5131 	/*
5132 	 * now we must deliver things in queue the normal way  if any are
5133 	 * now ready.
5134 	 */
5135 	tt = strmin->last_sequence_delivered + 1;
5136 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5137 		if (tt == ctl->sinfo_ssn) {
5138 			/* this is deliverable now */
5139 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5140 			/* subtract pending on streams */
5141 			asoc->size_on_all_streams -= ctl->length;
5142 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5143 			/* deliver it to at least the delivery-q */
5144 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5145 			if (stcb->sctp_socket) {
5146 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5147 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5148 				    ctl,
5149 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5150 
5151 			}
5152 			tt = strmin->last_sequence_delivered + 1;
5153 		} else {
5154 			break;
5155 		}
5156 	}
5157 }
5158 
5159 static void
5160 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5161     struct sctp_association *asoc,
5162     uint16_t stream, uint16_t seq)
5163 {
5164 	struct sctp_tmit_chunk *chk, *nchk;
5165 
5166 	/* For each one on here see if we need to toss it */
5167 	/*
5168 	 * For now large messages held on the reasmqueue that are complete
5169 	 * will be tossed too. We could in theory do more work to spin
5170 	 * through and stop after dumping one msg aka seeing the start of a
5171 	 * new msg at the head, and call the delivery function... to see if
5172 	 * it can be delivered... But for now we just dump everything on the
5173 	 * queue.
5174 	 */
5175 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5176 		/*
5177 		 * Do not toss it if on a different stream or marked for
5178 		 * unordered delivery in which case the stream sequence
5179 		 * number has no meaning.
5180 		 */
5181 		if ((chk->rec.data.stream_number != stream) ||
5182 		    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5183 			continue;
5184 		}
5185 		if (chk->rec.data.stream_seq == seq) {
5186 			/* It needs to be tossed */
5187 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5188 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5189 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5190 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5191 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5192 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5193 			}
5194 			asoc->size_on_reasm_queue -= chk->send_size;
5195 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5196 
5197 			/* Clear up any stream problem */
5198 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5199 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5200 				/*
5201 				 * We must dump forward this streams
5202 				 * sequence number if the chunk is not
5203 				 * unordered that is being skipped. There is
5204 				 * a chance that if the peer does not
5205 				 * include the last fragment in its FWD-TSN
5206 				 * we WILL have a problem here since you
5207 				 * would have a partial chunk in queue that
5208 				 * may not be deliverable. Also if a Partial
5209 				 * delivery API as started the user may get
5210 				 * a partial chunk. The next read returning
5211 				 * a new chunk... really ugly but I see no
5212 				 * way around it! Maybe a notify??
5213 				 */
5214 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5215 			}
5216 			if (chk->data) {
5217 				sctp_m_freem(chk->data);
5218 				chk->data = NULL;
5219 			}
5220 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5221 		} else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5222 			/*
5223 			 * If the stream_seq is > than the purging one, we
5224 			 * are done
5225 			 */
5226 			break;
5227 		}
5228 	}
5229 }
5230 
5231 
5232 void
5233 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5234     struct sctp_forward_tsn_chunk *fwd,
5235     int *abort_flag, struct mbuf *m, int offset)
5236 {
5237 	/* The pr-sctp fwd tsn */
5238 	/*
5239 	 * here we will perform all the data receiver side steps for
5240 	 * processing FwdTSN, as required in by pr-sctp draft:
5241 	 *
5242 	 * Assume we get FwdTSN(x):
5243 	 *
5244 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5245 	 * others we have 3) examine and update re-ordering queue on
5246 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5247 	 * report where we are.
5248 	 */
5249 	struct sctp_association *asoc;
5250 	uint32_t new_cum_tsn, gap;
5251 	unsigned int i, fwd_sz, cumack_set_flag, m_size;
5252 	uint32_t str_seq;
5253 	struct sctp_stream_in *strm;
5254 	struct sctp_tmit_chunk *chk, *nchk;
5255 	struct sctp_queued_to_read *ctl, *sv;
5256 
5257 	cumack_set_flag = 0;
5258 	asoc = &stcb->asoc;
5259 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5260 		SCTPDBG(SCTP_DEBUG_INDATA1,
5261 		    "Bad size too small/big fwd-tsn\n");
5262 		return;
5263 	}
5264 	m_size = (stcb->asoc.mapping_array_size << 3);
5265 	/*************************************************************/
5266 	/* 1. Here we update local cumTSN and shift the bitmap array */
5267 	/*************************************************************/
5268 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5269 
5270 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5271 		/* Already got there ... */
5272 		return;
5273 	}
5274 	/*
5275 	 * now we know the new TSN is more advanced, let's find the actual
5276 	 * gap
5277 	 */
5278 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5279 	asoc->cumulative_tsn = new_cum_tsn;
5280 	if (gap >= m_size) {
5281 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5282 			struct mbuf *oper;
5283 
5284 			/*
5285 			 * out of range (of single byte chunks in the rwnd I
5286 			 * give out). This must be an attacker.
5287 			 */
5288 			*abort_flag = 1;
5289 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5290 			    0, M_DONTWAIT, 1, MT_DATA);
5291 			if (oper) {
5292 				struct sctp_paramhdr *ph;
5293 				uint32_t *ippp;
5294 
5295 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5296 				    (sizeof(uint32_t) * 3);
5297 				ph = mtod(oper, struct sctp_paramhdr *);
5298 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5299 				ph->param_length = htons(SCTP_BUF_LEN(oper));
5300 				ippp = (uint32_t *) (ph + 1);
5301 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5302 				ippp++;
5303 				*ippp = asoc->highest_tsn_inside_map;
5304 				ippp++;
5305 				*ippp = new_cum_tsn;
5306 			}
5307 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5308 			sctp_abort_an_association(stcb->sctp_ep, stcb,
5309 			    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
5310 			return;
5311 		}
5312 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5313 
5314 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5315 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5316 		asoc->highest_tsn_inside_map = new_cum_tsn;
5317 
5318 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5319 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5320 
5321 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5322 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5323 		}
5324 	} else {
5325 		SCTP_TCB_LOCK_ASSERT(stcb);
5326 		for (i = 0; i <= gap; i++) {
5327 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5328 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5329 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5330 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5331 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5332 				}
5333 			}
5334 		}
5335 	}
5336 	/*************************************************************/
5337 	/* 2. Clear up re-assembly queue                             */
5338 	/*************************************************************/
5339 	/*
5340 	 * First service it if pd-api is up, just in case we can progress it
5341 	 * forward
5342 	 */
5343 	if (asoc->fragmented_delivery_inprogress) {
5344 		sctp_service_reassembly(stcb, asoc);
5345 	}
5346 	/* For each one on here see if we need to toss it */
5347 	/*
5348 	 * For now large messages held on the reasmqueue that are complete
5349 	 * will be tossed too. We could in theory do more work to spin
5350 	 * through and stop after dumping one msg aka seeing the start of a
5351 	 * new msg at the head, and call the delivery function... to see if
5352 	 * it can be delivered... But for now we just dump everything on the
5353 	 * queue.
5354 	 */
5355 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5356 		if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5357 			/* It needs to be tossed */
5358 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5359 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5360 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5361 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5362 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5363 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5364 			}
5365 			asoc->size_on_reasm_queue -= chk->send_size;
5366 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5367 
5368 			/* Clear up any stream problem */
5369 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5370 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5371 				/*
5372 				 * We must dump forward this streams
5373 				 * sequence number if the chunk is not
5374 				 * unordered that is being skipped. There is
5375 				 * a chance that if the peer does not
5376 				 * include the last fragment in its FWD-TSN
5377 				 * we WILL have a problem here since you
5378 				 * would have a partial chunk in queue that
5379 				 * may not be deliverable. Also if a Partial
5380 				 * delivery API as started the user may get
5381 				 * a partial chunk. The next read returning
5382 				 * a new chunk... really ugly but I see no
5383 				 * way around it! Maybe a notify??
5384 				 */
5385 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5386 			}
5387 			if (chk->data) {
5388 				sctp_m_freem(chk->data);
5389 				chk->data = NULL;
5390 			}
5391 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5392 		} else {
5393 			/*
5394 			 * Ok we have gone beyond the end of the fwd-tsn's
5395 			 * mark.
5396 			 */
5397 			break;
5398 		}
5399 	}
5400 	/*******************************************************/
5401 	/* 3. Update the PR-stream re-ordering queues and fix  */
5402 	/* delivery issues as needed.                       */
5403 	/*******************************************************/
5404 	fwd_sz -= sizeof(*fwd);
5405 	if (m && fwd_sz) {
5406 		/* New method. */
5407 		unsigned int num_str;
5408 		struct sctp_strseq *stseq, strseqbuf;
5409 
5410 		offset += sizeof(*fwd);
5411 
5412 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5413 		num_str = fwd_sz / sizeof(struct sctp_strseq);
5414 		for (i = 0; i < num_str; i++) {
5415 			uint16_t st;
5416 
5417 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5418 			    sizeof(struct sctp_strseq),
5419 			    (uint8_t *) & strseqbuf);
5420 			offset += sizeof(struct sctp_strseq);
5421 			if (stseq == NULL) {
5422 				break;
5423 			}
5424 			/* Convert */
5425 			st = ntohs(stseq->stream);
5426 			stseq->stream = st;
5427 			st = ntohs(stseq->sequence);
5428 			stseq->sequence = st;
5429 
5430 			/* now process */
5431 
5432 			/*
5433 			 * Ok we now look for the stream/seq on the read
5434 			 * queue where its not all delivered. If we find it
5435 			 * we transmute the read entry into a PDI_ABORTED.
5436 			 */
5437 			if (stseq->stream >= asoc->streamincnt) {
5438 				/* screwed up streams, stop!  */
5439 				break;
5440 			}
5441 			if ((asoc->str_of_pdapi == stseq->stream) &&
5442 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5443 				/*
5444 				 * If this is the one we were partially
5445 				 * delivering now then we no longer are.
5446 				 * Note this will change with the reassembly
5447 				 * re-write.
5448 				 */
5449 				asoc->fragmented_delivery_inprogress = 0;
5450 			}
5451 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5452 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5453 				if ((ctl->sinfo_stream == stseq->stream) &&
5454 				    (ctl->sinfo_ssn == stseq->sequence)) {
5455 					str_seq = (stseq->stream << 16) | stseq->sequence;
5456 					ctl->end_added = 1;
5457 					ctl->pdapi_aborted = 1;
5458 					sv = stcb->asoc.control_pdapi;
5459 					stcb->asoc.control_pdapi = ctl;
5460 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5461 					    stcb,
5462 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5463 					    (void *)&str_seq,
5464 					    SCTP_SO_NOT_LOCKED);
5465 					stcb->asoc.control_pdapi = sv;
5466 					break;
5467 				} else if ((ctl->sinfo_stream == stseq->stream) &&
5468 				    SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5469 					/* We are past our victim SSN */
5470 					break;
5471 				}
5472 			}
5473 			strm = &asoc->strmin[stseq->stream];
5474 			if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5475 				/* Update the sequence number */
5476 				strm->last_sequence_delivered = stseq->sequence;
5477 			}
5478 			/* now kick the stream the new way */
5479 			/* sa_ignore NO_NULL_CHK */
5480 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5481 		}
5482 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5483 	}
5484 	/*
5485 	 * Now slide thing forward.
5486 	 */
5487 	sctp_slide_mapping_arrays(stcb);
5488 
5489 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5490 		/* now lets kick out and check for more fragmented delivery */
5491 		/* sa_ignore NO_NULL_CHK */
5492 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5493 	}
5494 }
5495