xref: /freebsd/sys/netinet/sctp_indata.c (revision 6486b015fc84e96725fef22b0e3363351399ae83)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $	 */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctputil.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_input.h>
46 #include <netinet/sctp_indata.h>
47 #include <netinet/sctp_uio.h>
48 #include <netinet/sctp_timer.h>
49 
50 
51 /*
52  * NOTES: On the outbound side of things I need to check the sack timer to
53  * see if I should generate a sack into the chunk queue (if I have data to
54  * send that is and will be sending it .. for bundling.
55  *
56  * The callback in sctp_usrreq.c will get called when the socket is read from.
57  * This will cause sctp_service_queues() to get called on the top entry in
58  * the list.
59  */
60 
61 void
62 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
63 {
64 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65 }
66 
67 /* Calculate what the rwnd would be */
68 uint32_t
69 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
70 {
71 	uint32_t calc = 0;
72 
73 	/*
74 	 * This is really set wrong with respect to a 1-2-m socket. Since
75 	 * the sb_cc is the count that everyone as put up. When we re-write
76 	 * sctp_soreceive then we will fix this so that ONLY this
77 	 * associations data is taken into account.
78 	 */
79 	if (stcb->sctp_socket == NULL)
80 		return (calc);
81 
82 	if (stcb->asoc.sb_cc == 0 &&
83 	    asoc->size_on_reasm_queue == 0 &&
84 	    asoc->size_on_all_streams == 0) {
85 		/* Full rwnd granted */
86 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87 		return (calc);
88 	}
89 	/* get actual space */
90 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91 
92 	/*
93 	 * take out what has NOT been put on socket queue and we yet hold
94 	 * for putting up.
95 	 */
96 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
97 	    asoc->cnt_on_reasm_queue * MSIZE));
98 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
99 	    asoc->cnt_on_all_streams * MSIZE));
100 
101 	if (calc == 0) {
102 		/* out of space */
103 		return (calc);
104 	}
105 	/* what is the overhead of all these rwnd's */
106 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
107 	/*
108 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
109 	 * even it is 0. SWS engaged
110 	 */
111 	if (calc < stcb->asoc.my_rwnd_control_len) {
112 		calc = 1;
113 	}
114 	return (calc);
115 }
116 
117 
118 
119 /*
120  * Build out our readq entry based on the incoming packet.
121  */
122 struct sctp_queued_to_read *
123 sctp_build_readq_entry(struct sctp_tcb *stcb,
124     struct sctp_nets *net,
125     uint32_t tsn, uint32_t ppid,
126     uint32_t context, uint16_t stream_no,
127     uint16_t stream_seq, uint8_t flags,
128     struct mbuf *dm)
129 {
130 	struct sctp_queued_to_read *read_queue_e = NULL;
131 
132 	sctp_alloc_a_readq(stcb, read_queue_e);
133 	if (read_queue_e == NULL) {
134 		goto failed_build;
135 	}
136 	read_queue_e->sinfo_stream = stream_no;
137 	read_queue_e->sinfo_ssn = stream_seq;
138 	read_queue_e->sinfo_flags = (flags << 8);
139 	read_queue_e->sinfo_ppid = ppid;
140 	read_queue_e->sinfo_context = context;
141 	read_queue_e->sinfo_timetolive = 0;
142 	read_queue_e->sinfo_tsn = tsn;
143 	read_queue_e->sinfo_cumtsn = tsn;
144 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
145 	read_queue_e->whoFrom = net;
146 	read_queue_e->length = 0;
147 	atomic_add_int(&net->ref_count, 1);
148 	read_queue_e->data = dm;
149 	read_queue_e->spec_flags = 0;
150 	read_queue_e->tail_mbuf = NULL;
151 	read_queue_e->aux_data = NULL;
152 	read_queue_e->stcb = stcb;
153 	read_queue_e->port_from = stcb->rport;
154 	read_queue_e->do_not_ref_stcb = 0;
155 	read_queue_e->end_added = 0;
156 	read_queue_e->some_taken = 0;
157 	read_queue_e->pdapi_aborted = 0;
158 failed_build:
159 	return (read_queue_e);
160 }
161 
162 
163 /*
164  * Build out our readq entry based on the incoming packet.
165  */
166 static struct sctp_queued_to_read *
167 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
168     struct sctp_tmit_chunk *chk)
169 {
170 	struct sctp_queued_to_read *read_queue_e = NULL;
171 
172 	sctp_alloc_a_readq(stcb, read_queue_e);
173 	if (read_queue_e == NULL) {
174 		goto failed_build;
175 	}
176 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
177 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
178 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
179 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
180 	read_queue_e->sinfo_context = stcb->asoc.context;
181 	read_queue_e->sinfo_timetolive = 0;
182 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
183 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
184 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
185 	read_queue_e->whoFrom = chk->whoTo;
186 	read_queue_e->aux_data = NULL;
187 	read_queue_e->length = 0;
188 	atomic_add_int(&chk->whoTo->ref_count, 1);
189 	read_queue_e->data = chk->data;
190 	read_queue_e->tail_mbuf = NULL;
191 	read_queue_e->stcb = stcb;
192 	read_queue_e->port_from = stcb->rport;
193 	read_queue_e->spec_flags = 0;
194 	read_queue_e->do_not_ref_stcb = 0;
195 	read_queue_e->end_added = 0;
196 	read_queue_e->some_taken = 0;
197 	read_queue_e->pdapi_aborted = 0;
198 failed_build:
199 	return (read_queue_e);
200 }
201 
202 
203 struct mbuf *
204 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
205 {
206 	struct sctp_extrcvinfo *seinfo;
207 	struct sctp_sndrcvinfo *outinfo;
208 	struct sctp_rcvinfo *rcvinfo;
209 	struct sctp_nxtinfo *nxtinfo;
210 	struct cmsghdr *cmh;
211 	struct mbuf *ret;
212 	int len;
213 	int use_extended;
214 	int provide_nxt;
215 
216 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
217 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
218 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
219 		/* user does not want any ancillary data */
220 		return (NULL);
221 	}
222 	len = 0;
223 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
224 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
225 	}
226 	seinfo = (struct sctp_extrcvinfo *)sinfo;
227 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
228 	    (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
229 		provide_nxt = 1;
230 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
231 	} else {
232 		provide_nxt = 0;
233 	}
234 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
235 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
236 			use_extended = 1;
237 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
238 		} else {
239 			use_extended = 0;
240 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
241 		}
242 	} else {
243 		use_extended = 0;
244 	}
245 
246 	ret = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
247 	if (ret == NULL) {
248 		/* No space */
249 		return (ret);
250 	}
251 	SCTP_BUF_LEN(ret) = 0;
252 
253 	/* We need a CMSG header followed by the struct */
254 	cmh = mtod(ret, struct cmsghdr *);
255 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
256 		cmh->cmsg_level = IPPROTO_SCTP;
257 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
258 		cmh->cmsg_type = SCTP_RCVINFO;
259 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
260 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
261 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
262 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
263 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
264 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
265 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
266 		rcvinfo->rcv_context = sinfo->sinfo_context;
267 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
268 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
269 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
270 	}
271 	if (provide_nxt) {
272 		cmh->cmsg_level = IPPROTO_SCTP;
273 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
274 		cmh->cmsg_type = SCTP_NXTINFO;
275 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
276 		nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
277 		nxtinfo->nxt_flags = 0;
278 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
279 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
280 		}
281 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
282 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
283 		}
284 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
285 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
286 		}
287 		nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
288 		nxtinfo->nxt_length = seinfo->sreinfo_next_length;
289 		nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
290 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
291 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
292 	}
293 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
294 		cmh->cmsg_level = IPPROTO_SCTP;
295 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
296 		if (use_extended) {
297 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
298 			cmh->cmsg_type = SCTP_EXTRCV;
299 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
300 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
301 		} else {
302 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
303 			cmh->cmsg_type = SCTP_SNDRCV;
304 			*outinfo = *sinfo;
305 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
306 		}
307 	}
308 	return (ret);
309 }
310 
311 
312 static void
313 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
314 {
315 	uint32_t gap, i, cumackp1;
316 	int fnd = 0;
317 
318 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
319 		return;
320 	}
321 	cumackp1 = asoc->cumulative_tsn + 1;
322 	if (SCTP_TSN_GT(cumackp1, tsn)) {
323 		/*
324 		 * this tsn is behind the cum ack and thus we don't need to
325 		 * worry about it being moved from one to the other.
326 		 */
327 		return;
328 	}
329 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
330 	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
331 		printf("gap:%x tsn:%x\n", gap, tsn);
332 		sctp_print_mapping_array(asoc);
333 #ifdef INVARIANTS
334 		panic("Things are really messed up now!!");
335 #endif
336 	}
337 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
338 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
339 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
340 		asoc->highest_tsn_inside_nr_map = tsn;
341 	}
342 	if (tsn == asoc->highest_tsn_inside_map) {
343 		/* We must back down to see what the new highest is */
344 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
345 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
346 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
347 				asoc->highest_tsn_inside_map = i;
348 				fnd = 1;
349 				break;
350 			}
351 		}
352 		if (!fnd) {
353 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
354 		}
355 	}
356 }
357 
358 
359 /*
360  * We are delivering currently from the reassembly queue. We must continue to
361  * deliver until we either: 1) run out of space. 2) run out of sequential
362  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
363  */
364 static void
365 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
366 {
367 	struct sctp_tmit_chunk *chk, *nchk;
368 	uint16_t nxt_todel;
369 	uint16_t stream_no;
370 	int end = 0;
371 	int cntDel;
372 	struct sctp_queued_to_read *control, *ctl, *nctl;
373 
374 	if (stcb == NULL)
375 		return;
376 
377 	cntDel = stream_no = 0;
378 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
379 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
380 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
381 		/* socket above is long gone or going.. */
382 abandon:
383 		asoc->fragmented_delivery_inprogress = 0;
384 		TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
385 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
386 			asoc->size_on_reasm_queue -= chk->send_size;
387 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
388 			/*
389 			 * Lose the data pointer, since its in the socket
390 			 * buffer
391 			 */
392 			if (chk->data) {
393 				sctp_m_freem(chk->data);
394 				chk->data = NULL;
395 			}
396 			/* Now free the address and data */
397 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
398 			/* sa_ignore FREED_MEMORY */
399 		}
400 		return;
401 	}
402 	SCTP_TCB_LOCK_ASSERT(stcb);
403 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
404 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
405 			/* Can't deliver more :< */
406 			return;
407 		}
408 		stream_no = chk->rec.data.stream_number;
409 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
410 		if (nxt_todel != chk->rec.data.stream_seq &&
411 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
412 			/*
413 			 * Not the next sequence to deliver in its stream OR
414 			 * unordered
415 			 */
416 			return;
417 		}
418 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
419 
420 			control = sctp_build_readq_entry_chk(stcb, chk);
421 			if (control == NULL) {
422 				/* out of memory? */
423 				return;
424 			}
425 			/* save it off for our future deliveries */
426 			stcb->asoc.control_pdapi = control;
427 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
428 				end = 1;
429 			else
430 				end = 0;
431 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
432 			sctp_add_to_readq(stcb->sctp_ep,
433 			    stcb, control, &stcb->sctp_socket->so_rcv, end,
434 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
435 			cntDel++;
436 		} else {
437 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
438 				end = 1;
439 			else
440 				end = 0;
441 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
442 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
443 			    stcb->asoc.control_pdapi,
444 			    chk->data, end, chk->rec.data.TSN_seq,
445 			    &stcb->sctp_socket->so_rcv)) {
446 				/*
447 				 * something is very wrong, either
448 				 * control_pdapi is NULL, or the tail_mbuf
449 				 * is corrupt, or there is a EOM already on
450 				 * the mbuf chain.
451 				 */
452 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
453 					goto abandon;
454 				} else {
455 #ifdef INVARIANTS
456 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
457 						panic("This should not happen control_pdapi NULL?");
458 					}
459 					/* if we did not panic, it was a EOM */
460 					panic("Bad chunking ??");
461 #else
462 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
463 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
464 					}
465 					SCTP_PRINTF("Bad chunking ??\n");
466 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
467 
468 #endif
469 					goto abandon;
470 				}
471 			}
472 			cntDel++;
473 		}
474 		/* pull it we did it */
475 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
476 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
477 			asoc->fragmented_delivery_inprogress = 0;
478 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
479 				asoc->strmin[stream_no].last_sequence_delivered++;
480 			}
481 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
482 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
483 			}
484 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
485 			/*
486 			 * turn the flag back on since we just  delivered
487 			 * yet another one.
488 			 */
489 			asoc->fragmented_delivery_inprogress = 1;
490 		}
491 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
492 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
493 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
494 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
495 
496 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
497 		asoc->size_on_reasm_queue -= chk->send_size;
498 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
499 		/* free up the chk */
500 		chk->data = NULL;
501 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
502 
503 		if (asoc->fragmented_delivery_inprogress == 0) {
504 			/*
505 			 * Now lets see if we can deliver the next one on
506 			 * the stream
507 			 */
508 			struct sctp_stream_in *strm;
509 
510 			strm = &asoc->strmin[stream_no];
511 			nxt_todel = strm->last_sequence_delivered + 1;
512 			TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
513 				/* Deliver more if we can. */
514 				if (nxt_todel == ctl->sinfo_ssn) {
515 					TAILQ_REMOVE(&strm->inqueue, ctl, next);
516 					asoc->size_on_all_streams -= ctl->length;
517 					sctp_ucount_decr(asoc->cnt_on_all_streams);
518 					strm->last_sequence_delivered++;
519 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
520 					sctp_add_to_readq(stcb->sctp_ep, stcb,
521 					    ctl,
522 					    &stcb->sctp_socket->so_rcv, 1,
523 					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
524 				} else {
525 					break;
526 				}
527 				nxt_todel = strm->last_sequence_delivered + 1;
528 			}
529 			break;
530 		}
531 	}
532 }
533 
534 /*
535  * Queue the chunk either right into the socket buffer if it is the next one
536  * to go OR put it in the correct place in the delivery queue.  If we do
537  * append to the so_buf, keep doing so until we are out of order. One big
538  * question still remains, what to do when the socket buffer is FULL??
539  */
540 static void
541 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
542     struct sctp_queued_to_read *control, int *abort_flag)
543 {
544 	/*
545 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
546 	 * all the data in one stream this could happen quite rapidly. One
547 	 * could use the TSN to keep track of things, but this scheme breaks
548 	 * down in the other type of stream useage that could occur. Send a
549 	 * single msg to stream 0, send 4Billion messages to stream 1, now
550 	 * send a message to stream 0. You have a situation where the TSN
551 	 * has wrapped but not in the stream. Is this worth worrying about
552 	 * or should we just change our queue sort at the bottom to be by
553 	 * TSN.
554 	 *
555 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
556 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
557 	 * assignment this could happen... and I don't see how this would be
558 	 * a violation. So for now I am undecided an will leave the sort by
559 	 * SSN alone. Maybe a hybred approach is the answer
560 	 *
561 	 */
562 	struct sctp_stream_in *strm;
563 	struct sctp_queued_to_read *at;
564 	int queue_needed;
565 	uint16_t nxt_todel;
566 	struct mbuf *oper;
567 
568 	queue_needed = 1;
569 	asoc->size_on_all_streams += control->length;
570 	sctp_ucount_incr(asoc->cnt_on_all_streams);
571 	strm = &asoc->strmin[control->sinfo_stream];
572 	nxt_todel = strm->last_sequence_delivered + 1;
573 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
574 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
575 	}
576 	SCTPDBG(SCTP_DEBUG_INDATA1,
577 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
578 	    (uint32_t) control->sinfo_stream,
579 	    (uint32_t) strm->last_sequence_delivered,
580 	    (uint32_t) nxt_todel);
581 	if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
582 		/* The incoming sseq is behind where we last delivered? */
583 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
584 		    control->sinfo_ssn, strm->last_sequence_delivered);
585 protocol_error:
586 		/*
587 		 * throw it in the stream so it gets cleaned up in
588 		 * association destruction
589 		 */
590 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
591 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
592 		    0, M_DONTWAIT, 1, MT_DATA);
593 		if (oper) {
594 			struct sctp_paramhdr *ph;
595 			uint32_t *ippp;
596 
597 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
598 			    (sizeof(uint32_t) * 3);
599 			ph = mtod(oper, struct sctp_paramhdr *);
600 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
601 			ph->param_length = htons(SCTP_BUF_LEN(oper));
602 			ippp = (uint32_t *) (ph + 1);
603 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
604 			ippp++;
605 			*ippp = control->sinfo_tsn;
606 			ippp++;
607 			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
608 		}
609 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
610 		sctp_abort_an_association(stcb->sctp_ep, stcb,
611 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
612 
613 		*abort_flag = 1;
614 		return;
615 
616 	}
617 	if (nxt_todel == control->sinfo_ssn) {
618 		/* can be delivered right away? */
619 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
620 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
621 		}
622 		/* EY it wont be queued if it could be delivered directly */
623 		queue_needed = 0;
624 		asoc->size_on_all_streams -= control->length;
625 		sctp_ucount_decr(asoc->cnt_on_all_streams);
626 		strm->last_sequence_delivered++;
627 
628 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
629 		sctp_add_to_readq(stcb->sctp_ep, stcb,
630 		    control,
631 		    &stcb->sctp_socket->so_rcv, 1,
632 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
633 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
634 			/* all delivered */
635 			nxt_todel = strm->last_sequence_delivered + 1;
636 			if (nxt_todel == control->sinfo_ssn) {
637 				TAILQ_REMOVE(&strm->inqueue, control, next);
638 				asoc->size_on_all_streams -= control->length;
639 				sctp_ucount_decr(asoc->cnt_on_all_streams);
640 				strm->last_sequence_delivered++;
641 				/*
642 				 * We ignore the return of deliver_data here
643 				 * since we always can hold the chunk on the
644 				 * d-queue. And we have a finite number that
645 				 * can be delivered from the strq.
646 				 */
647 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
648 					sctp_log_strm_del(control, NULL,
649 					    SCTP_STR_LOG_FROM_IMMED_DEL);
650 				}
651 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
652 				sctp_add_to_readq(stcb->sctp_ep, stcb,
653 				    control,
654 				    &stcb->sctp_socket->so_rcv, 1,
655 				    SCTP_READ_LOCK_NOT_HELD,
656 				    SCTP_SO_NOT_LOCKED);
657 				continue;
658 			}
659 			break;
660 		}
661 	}
662 	if (queue_needed) {
663 		/*
664 		 * Ok, we did not deliver this guy, find the correct place
665 		 * to put it on the queue.
666 		 */
667 		if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
668 			goto protocol_error;
669 		}
670 		if (TAILQ_EMPTY(&strm->inqueue)) {
671 			/* Empty queue */
672 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
673 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
674 			}
675 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
676 		} else {
677 			TAILQ_FOREACH(at, &strm->inqueue, next) {
678 				if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
679 					/*
680 					 * one in queue is bigger than the
681 					 * new one, insert before this one
682 					 */
683 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
684 						sctp_log_strm_del(control, at,
685 						    SCTP_STR_LOG_FROM_INSERT_MD);
686 					}
687 					TAILQ_INSERT_BEFORE(at, control, next);
688 					break;
689 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
690 					/*
691 					 * Gak, He sent me a duplicate str
692 					 * seq number
693 					 */
694 					/*
695 					 * foo bar, I guess I will just free
696 					 * this new guy, should we abort
697 					 * too? FIX ME MAYBE? Or it COULD be
698 					 * that the SSN's have wrapped.
699 					 * Maybe I should compare to TSN
700 					 * somehow... sigh for now just blow
701 					 * away the chunk!
702 					 */
703 
704 					if (control->data)
705 						sctp_m_freem(control->data);
706 					control->data = NULL;
707 					asoc->size_on_all_streams -= control->length;
708 					sctp_ucount_decr(asoc->cnt_on_all_streams);
709 					if (control->whoFrom) {
710 						sctp_free_remote_addr(control->whoFrom);
711 						control->whoFrom = NULL;
712 					}
713 					sctp_free_a_readq(stcb, control);
714 					return;
715 				} else {
716 					if (TAILQ_NEXT(at, next) == NULL) {
717 						/*
718 						 * We are at the end, insert
719 						 * it after this one
720 						 */
721 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
722 							sctp_log_strm_del(control, at,
723 							    SCTP_STR_LOG_FROM_INSERT_TL);
724 						}
725 						TAILQ_INSERT_AFTER(&strm->inqueue,
726 						    at, control, next);
727 						break;
728 					}
729 				}
730 			}
731 		}
732 	}
733 }
734 
735 /*
736  * Returns two things: You get the total size of the deliverable parts of the
737  * first fragmented message on the reassembly queue. And you get a 1 back if
738  * all of the message is ready or a 0 back if the message is still incomplete
739  */
740 static int
741 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
742 {
743 	struct sctp_tmit_chunk *chk;
744 	uint32_t tsn;
745 
746 	*t_size = 0;
747 	chk = TAILQ_FIRST(&asoc->reasmqueue);
748 	if (chk == NULL) {
749 		/* nothing on the queue */
750 		return (0);
751 	}
752 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
753 		/* Not a first on the queue */
754 		return (0);
755 	}
756 	tsn = chk->rec.data.TSN_seq;
757 	TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
758 		if (tsn != chk->rec.data.TSN_seq) {
759 			return (0);
760 		}
761 		*t_size += chk->send_size;
762 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
763 			return (1);
764 		}
765 		tsn++;
766 	}
767 	return (0);
768 }
769 
770 static void
771 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
772 {
773 	struct sctp_tmit_chunk *chk;
774 	uint16_t nxt_todel;
775 	uint32_t tsize, pd_point;
776 
777 doit_again:
778 	chk = TAILQ_FIRST(&asoc->reasmqueue);
779 	if (chk == NULL) {
780 		/* Huh? */
781 		asoc->size_on_reasm_queue = 0;
782 		asoc->cnt_on_reasm_queue = 0;
783 		return;
784 	}
785 	if (asoc->fragmented_delivery_inprogress == 0) {
786 		nxt_todel =
787 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
788 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
789 		    (nxt_todel == chk->rec.data.stream_seq ||
790 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
791 			/*
792 			 * Yep the first one is here and its ok to deliver
793 			 * but should we?
794 			 */
795 			if (stcb->sctp_socket) {
796 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
797 				    stcb->sctp_ep->partial_delivery_point);
798 			} else {
799 				pd_point = stcb->sctp_ep->partial_delivery_point;
800 			}
801 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
802 
803 				/*
804 				 * Yes, we setup to start reception, by
805 				 * backing down the TSN just in case we
806 				 * can't deliver. If we
807 				 */
808 				asoc->fragmented_delivery_inprogress = 1;
809 				asoc->tsn_last_delivered =
810 				    chk->rec.data.TSN_seq - 1;
811 				asoc->str_of_pdapi =
812 				    chk->rec.data.stream_number;
813 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
814 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
815 				asoc->fragment_flags = chk->rec.data.rcv_flags;
816 				sctp_service_reassembly(stcb, asoc);
817 			}
818 		}
819 	} else {
820 		/*
821 		 * Service re-assembly will deliver stream data queued at
822 		 * the end of fragmented delivery.. but it wont know to go
823 		 * back and call itself again... we do that here with the
824 		 * got doit_again
825 		 */
826 		sctp_service_reassembly(stcb, asoc);
827 		if (asoc->fragmented_delivery_inprogress == 0) {
828 			/*
829 			 * finished our Fragmented delivery, could be more
830 			 * waiting?
831 			 */
832 			goto doit_again;
833 		}
834 	}
835 }
836 
837 /*
838  * Dump onto the re-assembly queue, in its proper place. After dumping on the
839  * queue, see if anthing can be delivered. If so pull it off (or as much as
840  * we can. If we run out of space then we must dump what we can and set the
841  * appropriate flag to say we queued what we could.
842  */
843 static void
844 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
845     struct sctp_tmit_chunk *chk, int *abort_flag)
846 {
847 	struct mbuf *oper;
848 	uint32_t cum_ackp1, prev_tsn, post_tsn;
849 	struct sctp_tmit_chunk *at, *prev, *next;
850 
851 	prev = next = NULL;
852 	cum_ackp1 = asoc->tsn_last_delivered + 1;
853 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
854 		/* This is the first one on the queue */
855 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
856 		/*
857 		 * we do not check for delivery of anything when only one
858 		 * fragment is here
859 		 */
860 		asoc->size_on_reasm_queue = chk->send_size;
861 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
862 		if (chk->rec.data.TSN_seq == cum_ackp1) {
863 			if (asoc->fragmented_delivery_inprogress == 0 &&
864 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
865 			    SCTP_DATA_FIRST_FRAG) {
866 				/*
867 				 * An empty queue, no delivery inprogress,
868 				 * we hit the next one and it does NOT have
869 				 * a FIRST fragment mark.
870 				 */
871 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
872 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
873 				    0, M_DONTWAIT, 1, MT_DATA);
874 
875 				if (oper) {
876 					struct sctp_paramhdr *ph;
877 					uint32_t *ippp;
878 
879 					SCTP_BUF_LEN(oper) =
880 					    sizeof(struct sctp_paramhdr) +
881 					    (sizeof(uint32_t) * 3);
882 					ph = mtod(oper, struct sctp_paramhdr *);
883 					ph->param_type =
884 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
885 					ph->param_length = htons(SCTP_BUF_LEN(oper));
886 					ippp = (uint32_t *) (ph + 1);
887 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
888 					ippp++;
889 					*ippp = chk->rec.data.TSN_seq;
890 					ippp++;
891 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
892 
893 				}
894 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
895 				sctp_abort_an_association(stcb->sctp_ep, stcb,
896 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
897 				*abort_flag = 1;
898 			} else if (asoc->fragmented_delivery_inprogress &&
899 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
900 				/*
901 				 * We are doing a partial delivery and the
902 				 * NEXT chunk MUST be either the LAST or
903 				 * MIDDLE fragment NOT a FIRST
904 				 */
905 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
906 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
907 				    0, M_DONTWAIT, 1, MT_DATA);
908 				if (oper) {
909 					struct sctp_paramhdr *ph;
910 					uint32_t *ippp;
911 
912 					SCTP_BUF_LEN(oper) =
913 					    sizeof(struct sctp_paramhdr) +
914 					    (3 * sizeof(uint32_t));
915 					ph = mtod(oper, struct sctp_paramhdr *);
916 					ph->param_type =
917 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
918 					ph->param_length = htons(SCTP_BUF_LEN(oper));
919 					ippp = (uint32_t *) (ph + 1);
920 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
921 					ippp++;
922 					*ippp = chk->rec.data.TSN_seq;
923 					ippp++;
924 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
925 				}
926 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
927 				sctp_abort_an_association(stcb->sctp_ep, stcb,
928 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
929 				*abort_flag = 1;
930 			} else if (asoc->fragmented_delivery_inprogress) {
931 				/*
932 				 * Here we are ok with a MIDDLE or LAST
933 				 * piece
934 				 */
935 				if (chk->rec.data.stream_number !=
936 				    asoc->str_of_pdapi) {
937 					/* Got to be the right STR No */
938 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
939 					    chk->rec.data.stream_number,
940 					    asoc->str_of_pdapi);
941 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
942 					    0, M_DONTWAIT, 1, MT_DATA);
943 					if (oper) {
944 						struct sctp_paramhdr *ph;
945 						uint32_t *ippp;
946 
947 						SCTP_BUF_LEN(oper) =
948 						    sizeof(struct sctp_paramhdr) +
949 						    (sizeof(uint32_t) * 3);
950 						ph = mtod(oper,
951 						    struct sctp_paramhdr *);
952 						ph->param_type =
953 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
954 						ph->param_length =
955 						    htons(SCTP_BUF_LEN(oper));
956 						ippp = (uint32_t *) (ph + 1);
957 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
958 						ippp++;
959 						*ippp = chk->rec.data.TSN_seq;
960 						ippp++;
961 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
962 					}
963 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
964 					sctp_abort_an_association(stcb->sctp_ep,
965 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
966 					*abort_flag = 1;
967 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
968 					    SCTP_DATA_UNORDERED &&
969 				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
970 					/* Got to be the right STR Seq */
971 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
972 					    chk->rec.data.stream_seq,
973 					    asoc->ssn_of_pdapi);
974 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
975 					    0, M_DONTWAIT, 1, MT_DATA);
976 					if (oper) {
977 						struct sctp_paramhdr *ph;
978 						uint32_t *ippp;
979 
980 						SCTP_BUF_LEN(oper) =
981 						    sizeof(struct sctp_paramhdr) +
982 						    (3 * sizeof(uint32_t));
983 						ph = mtod(oper,
984 						    struct sctp_paramhdr *);
985 						ph->param_type =
986 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
987 						ph->param_length =
988 						    htons(SCTP_BUF_LEN(oper));
989 						ippp = (uint32_t *) (ph + 1);
990 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
991 						ippp++;
992 						*ippp = chk->rec.data.TSN_seq;
993 						ippp++;
994 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
995 
996 					}
997 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
998 					sctp_abort_an_association(stcb->sctp_ep,
999 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1000 					*abort_flag = 1;
1001 				}
1002 			}
1003 		}
1004 		return;
1005 	}
1006 	/* Find its place */
1007 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1008 		if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
1009 			/*
1010 			 * one in queue is bigger than the new one, insert
1011 			 * before this one
1012 			 */
1013 			/* A check */
1014 			asoc->size_on_reasm_queue += chk->send_size;
1015 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1016 			next = at;
1017 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1018 			break;
1019 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1020 			/* Gak, He sent me a duplicate str seq number */
1021 			/*
1022 			 * foo bar, I guess I will just free this new guy,
1023 			 * should we abort too? FIX ME MAYBE? Or it COULD be
1024 			 * that the SSN's have wrapped. Maybe I should
1025 			 * compare to TSN somehow... sigh for now just blow
1026 			 * away the chunk!
1027 			 */
1028 			if (chk->data) {
1029 				sctp_m_freem(chk->data);
1030 				chk->data = NULL;
1031 			}
1032 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1033 			return;
1034 		} else {
1035 			prev = at;
1036 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1037 				/*
1038 				 * We are at the end, insert it after this
1039 				 * one
1040 				 */
1041 				/* check it first */
1042 				asoc->size_on_reasm_queue += chk->send_size;
1043 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1044 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1045 				break;
1046 			}
1047 		}
1048 	}
1049 	/* Now the audits */
1050 	if (prev) {
1051 		prev_tsn = chk->rec.data.TSN_seq - 1;
1052 		if (prev_tsn == prev->rec.data.TSN_seq) {
1053 			/*
1054 			 * Ok the one I am dropping onto the end is the
1055 			 * NEXT. A bit of valdiation here.
1056 			 */
1057 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1058 			    SCTP_DATA_FIRST_FRAG ||
1059 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1060 			    SCTP_DATA_MIDDLE_FRAG) {
1061 				/*
1062 				 * Insert chk MUST be a MIDDLE or LAST
1063 				 * fragment
1064 				 */
1065 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1066 				    SCTP_DATA_FIRST_FRAG) {
1067 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1068 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1069 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1070 					    0, M_DONTWAIT, 1, MT_DATA);
1071 					if (oper) {
1072 						struct sctp_paramhdr *ph;
1073 						uint32_t *ippp;
1074 
1075 						SCTP_BUF_LEN(oper) =
1076 						    sizeof(struct sctp_paramhdr) +
1077 						    (3 * sizeof(uint32_t));
1078 						ph = mtod(oper,
1079 						    struct sctp_paramhdr *);
1080 						ph->param_type =
1081 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1082 						ph->param_length =
1083 						    htons(SCTP_BUF_LEN(oper));
1084 						ippp = (uint32_t *) (ph + 1);
1085 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1086 						ippp++;
1087 						*ippp = chk->rec.data.TSN_seq;
1088 						ippp++;
1089 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1090 
1091 					}
1092 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1093 					sctp_abort_an_association(stcb->sctp_ep,
1094 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1095 					*abort_flag = 1;
1096 					return;
1097 				}
1098 				if (chk->rec.data.stream_number !=
1099 				    prev->rec.data.stream_number) {
1100 					/*
1101 					 * Huh, need the correct STR here,
1102 					 * they must be the same.
1103 					 */
1104 					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1105 					    chk->rec.data.stream_number,
1106 					    prev->rec.data.stream_number);
1107 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1108 					    0, M_DONTWAIT, 1, MT_DATA);
1109 					if (oper) {
1110 						struct sctp_paramhdr *ph;
1111 						uint32_t *ippp;
1112 
1113 						SCTP_BUF_LEN(oper) =
1114 						    sizeof(struct sctp_paramhdr) +
1115 						    (3 * sizeof(uint32_t));
1116 						ph = mtod(oper,
1117 						    struct sctp_paramhdr *);
1118 						ph->param_type =
1119 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1120 						ph->param_length =
1121 						    htons(SCTP_BUF_LEN(oper));
1122 						ippp = (uint32_t *) (ph + 1);
1123 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1124 						ippp++;
1125 						*ippp = chk->rec.data.TSN_seq;
1126 						ippp++;
1127 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1128 					}
1129 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1130 					sctp_abort_an_association(stcb->sctp_ep,
1131 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1132 
1133 					*abort_flag = 1;
1134 					return;
1135 				}
1136 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1137 				    chk->rec.data.stream_seq !=
1138 				    prev->rec.data.stream_seq) {
1139 					/*
1140 					 * Huh, need the correct STR here,
1141 					 * they must be the same.
1142 					 */
1143 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1144 					    chk->rec.data.stream_seq,
1145 					    prev->rec.data.stream_seq);
1146 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1147 					    0, M_DONTWAIT, 1, MT_DATA);
1148 					if (oper) {
1149 						struct sctp_paramhdr *ph;
1150 						uint32_t *ippp;
1151 
1152 						SCTP_BUF_LEN(oper) =
1153 						    sizeof(struct sctp_paramhdr) +
1154 						    (3 * sizeof(uint32_t));
1155 						ph = mtod(oper,
1156 						    struct sctp_paramhdr *);
1157 						ph->param_type =
1158 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1159 						ph->param_length =
1160 						    htons(SCTP_BUF_LEN(oper));
1161 						ippp = (uint32_t *) (ph + 1);
1162 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1163 						ippp++;
1164 						*ippp = chk->rec.data.TSN_seq;
1165 						ippp++;
1166 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1167 					}
1168 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1169 					sctp_abort_an_association(stcb->sctp_ep,
1170 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1171 
1172 					*abort_flag = 1;
1173 					return;
1174 				}
1175 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1176 			    SCTP_DATA_LAST_FRAG) {
1177 				/* Insert chk MUST be a FIRST */
1178 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1179 				    SCTP_DATA_FIRST_FRAG) {
1180 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1181 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1182 					    0, M_DONTWAIT, 1, MT_DATA);
1183 					if (oper) {
1184 						struct sctp_paramhdr *ph;
1185 						uint32_t *ippp;
1186 
1187 						SCTP_BUF_LEN(oper) =
1188 						    sizeof(struct sctp_paramhdr) +
1189 						    (3 * sizeof(uint32_t));
1190 						ph = mtod(oper,
1191 						    struct sctp_paramhdr *);
1192 						ph->param_type =
1193 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1194 						ph->param_length =
1195 						    htons(SCTP_BUF_LEN(oper));
1196 						ippp = (uint32_t *) (ph + 1);
1197 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1198 						ippp++;
1199 						*ippp = chk->rec.data.TSN_seq;
1200 						ippp++;
1201 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1202 
1203 					}
1204 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1205 					sctp_abort_an_association(stcb->sctp_ep,
1206 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1207 
1208 					*abort_flag = 1;
1209 					return;
1210 				}
1211 			}
1212 		}
1213 	}
1214 	if (next) {
1215 		post_tsn = chk->rec.data.TSN_seq + 1;
1216 		if (post_tsn == next->rec.data.TSN_seq) {
1217 			/*
1218 			 * Ok the one I am inserting ahead of is my NEXT
1219 			 * one. A bit of valdiation here.
1220 			 */
1221 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1222 				/* Insert chk MUST be a last fragment */
1223 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1224 				    != SCTP_DATA_LAST_FRAG) {
1225 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1226 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1227 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1228 					    0, M_DONTWAIT, 1, MT_DATA);
1229 					if (oper) {
1230 						struct sctp_paramhdr *ph;
1231 						uint32_t *ippp;
1232 
1233 						SCTP_BUF_LEN(oper) =
1234 						    sizeof(struct sctp_paramhdr) +
1235 						    (3 * sizeof(uint32_t));
1236 						ph = mtod(oper,
1237 						    struct sctp_paramhdr *);
1238 						ph->param_type =
1239 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1240 						ph->param_length =
1241 						    htons(SCTP_BUF_LEN(oper));
1242 						ippp = (uint32_t *) (ph + 1);
1243 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1244 						ippp++;
1245 						*ippp = chk->rec.data.TSN_seq;
1246 						ippp++;
1247 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1248 					}
1249 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1250 					sctp_abort_an_association(stcb->sctp_ep,
1251 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1252 
1253 					*abort_flag = 1;
1254 					return;
1255 				}
1256 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1257 				    SCTP_DATA_MIDDLE_FRAG ||
1258 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1259 			    SCTP_DATA_LAST_FRAG) {
1260 				/*
1261 				 * Insert chk CAN be MIDDLE or FIRST NOT
1262 				 * LAST
1263 				 */
1264 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1265 				    SCTP_DATA_LAST_FRAG) {
1266 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1267 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1268 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1269 					    0, M_DONTWAIT, 1, MT_DATA);
1270 					if (oper) {
1271 						struct sctp_paramhdr *ph;
1272 						uint32_t *ippp;
1273 
1274 						SCTP_BUF_LEN(oper) =
1275 						    sizeof(struct sctp_paramhdr) +
1276 						    (3 * sizeof(uint32_t));
1277 						ph = mtod(oper,
1278 						    struct sctp_paramhdr *);
1279 						ph->param_type =
1280 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1281 						ph->param_length =
1282 						    htons(SCTP_BUF_LEN(oper));
1283 						ippp = (uint32_t *) (ph + 1);
1284 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1285 						ippp++;
1286 						*ippp = chk->rec.data.TSN_seq;
1287 						ippp++;
1288 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1289 
1290 					}
1291 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1292 					sctp_abort_an_association(stcb->sctp_ep,
1293 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1294 
1295 					*abort_flag = 1;
1296 					return;
1297 				}
1298 				if (chk->rec.data.stream_number !=
1299 				    next->rec.data.stream_number) {
1300 					/*
1301 					 * Huh, need the correct STR here,
1302 					 * they must be the same.
1303 					 */
1304 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1305 					    chk->rec.data.stream_number,
1306 					    next->rec.data.stream_number);
1307 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1308 					    0, M_DONTWAIT, 1, MT_DATA);
1309 					if (oper) {
1310 						struct sctp_paramhdr *ph;
1311 						uint32_t *ippp;
1312 
1313 						SCTP_BUF_LEN(oper) =
1314 						    sizeof(struct sctp_paramhdr) +
1315 						    (3 * sizeof(uint32_t));
1316 						ph = mtod(oper,
1317 						    struct sctp_paramhdr *);
1318 						ph->param_type =
1319 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1320 						ph->param_length =
1321 						    htons(SCTP_BUF_LEN(oper));
1322 						ippp = (uint32_t *) (ph + 1);
1323 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1324 						ippp++;
1325 						*ippp = chk->rec.data.TSN_seq;
1326 						ippp++;
1327 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1328 
1329 					}
1330 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1331 					sctp_abort_an_association(stcb->sctp_ep,
1332 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1333 
1334 					*abort_flag = 1;
1335 					return;
1336 				}
1337 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1338 				    chk->rec.data.stream_seq !=
1339 				    next->rec.data.stream_seq) {
1340 					/*
1341 					 * Huh, need the correct STR here,
1342 					 * they must be the same.
1343 					 */
1344 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1345 					    chk->rec.data.stream_seq,
1346 					    next->rec.data.stream_seq);
1347 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1348 					    0, M_DONTWAIT, 1, MT_DATA);
1349 					if (oper) {
1350 						struct sctp_paramhdr *ph;
1351 						uint32_t *ippp;
1352 
1353 						SCTP_BUF_LEN(oper) =
1354 						    sizeof(struct sctp_paramhdr) +
1355 						    (3 * sizeof(uint32_t));
1356 						ph = mtod(oper,
1357 						    struct sctp_paramhdr *);
1358 						ph->param_type =
1359 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1360 						ph->param_length =
1361 						    htons(SCTP_BUF_LEN(oper));
1362 						ippp = (uint32_t *) (ph + 1);
1363 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1364 						ippp++;
1365 						*ippp = chk->rec.data.TSN_seq;
1366 						ippp++;
1367 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1368 					}
1369 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1370 					sctp_abort_an_association(stcb->sctp_ep,
1371 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1372 
1373 					*abort_flag = 1;
1374 					return;
1375 				}
1376 			}
1377 		}
1378 	}
1379 	/* Do we need to do some delivery? check */
1380 	sctp_deliver_reasm_check(stcb, asoc);
1381 }
1382 
1383 /*
1384  * This is an unfortunate routine. It checks to make sure a evil guy is not
1385  * stuffing us full of bad packet fragments. A broken peer could also do this
1386  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1387  * :< more cycles.
1388  */
1389 static int
1390 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1391     uint32_t TSN_seq)
1392 {
1393 	struct sctp_tmit_chunk *at;
1394 	uint32_t tsn_est;
1395 
1396 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1397 		if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1398 			/* is it one bigger? */
1399 			tsn_est = at->rec.data.TSN_seq + 1;
1400 			if (tsn_est == TSN_seq) {
1401 				/* yep. It better be a last then */
1402 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1403 				    SCTP_DATA_LAST_FRAG) {
1404 					/*
1405 					 * Ok this guy belongs next to a guy
1406 					 * that is NOT last, it should be a
1407 					 * middle/last, not a complete
1408 					 * chunk.
1409 					 */
1410 					return (1);
1411 				} else {
1412 					/*
1413 					 * This guy is ok since its a LAST
1414 					 * and the new chunk is a fully
1415 					 * self- contained one.
1416 					 */
1417 					return (0);
1418 				}
1419 			}
1420 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1421 			/* Software error since I have a dup? */
1422 			return (1);
1423 		} else {
1424 			/*
1425 			 * Ok, 'at' is larger than new chunk but does it
1426 			 * need to be right before it.
1427 			 */
1428 			tsn_est = TSN_seq + 1;
1429 			if (tsn_est == at->rec.data.TSN_seq) {
1430 				/* Yep, It better be a first */
1431 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1432 				    SCTP_DATA_FIRST_FRAG) {
1433 					return (1);
1434 				} else {
1435 					return (0);
1436 				}
1437 			}
1438 		}
1439 	}
1440 	return (0);
1441 }
1442 
1443 
1444 static int
1445 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1446     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1447     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1448     int *break_flag, int last_chunk)
1449 {
1450 	/* Process a data chunk */
1451 	/* struct sctp_tmit_chunk *chk; */
1452 	struct sctp_tmit_chunk *chk;
1453 	uint32_t tsn, gap;
1454 	struct mbuf *dmbuf;
1455 	int the_len;
1456 	int need_reasm_check = 0;
1457 	uint16_t strmno, strmseq;
1458 	struct mbuf *oper;
1459 	struct sctp_queued_to_read *control;
1460 	int ordered;
1461 	uint32_t protocol_id;
1462 	uint8_t chunk_flags;
1463 	struct sctp_stream_reset_list *liste;
1464 
1465 	chk = NULL;
1466 	tsn = ntohl(ch->dp.tsn);
1467 	chunk_flags = ch->ch.chunk_flags;
1468 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1469 		asoc->send_sack = 1;
1470 	}
1471 	protocol_id = ch->dp.protocol_id;
1472 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1473 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1474 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1475 	}
1476 	if (stcb == NULL) {
1477 		return (0);
1478 	}
1479 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1480 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1481 		/* It is a duplicate */
1482 		SCTP_STAT_INCR(sctps_recvdupdata);
1483 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1484 			/* Record a dup for the next outbound sack */
1485 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1486 			asoc->numduptsns++;
1487 		}
1488 		asoc->send_sack = 1;
1489 		return (0);
1490 	}
1491 	/* Calculate the number of TSN's between the base and this TSN */
1492 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1493 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1494 		/* Can't hold the bit in the mapping at max array, toss it */
1495 		return (0);
1496 	}
1497 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1498 		SCTP_TCB_LOCK_ASSERT(stcb);
1499 		if (sctp_expand_mapping_array(asoc, gap)) {
1500 			/* Can't expand, drop it */
1501 			return (0);
1502 		}
1503 	}
1504 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1505 		*high_tsn = tsn;
1506 	}
1507 	/* See if we have received this one already */
1508 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1509 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1510 		SCTP_STAT_INCR(sctps_recvdupdata);
1511 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1512 			/* Record a dup for the next outbound sack */
1513 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1514 			asoc->numduptsns++;
1515 		}
1516 		asoc->send_sack = 1;
1517 		return (0);
1518 	}
1519 	/*
1520 	 * Check to see about the GONE flag, duplicates would cause a sack
1521 	 * to be sent up above
1522 	 */
1523 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1524 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1525 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1526 	    ) {
1527 		/*
1528 		 * wait a minute, this guy is gone, there is no longer a
1529 		 * receiver. Send peer an ABORT!
1530 		 */
1531 		struct mbuf *op_err;
1532 
1533 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1534 		sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1535 		*abort_flag = 1;
1536 		return (0);
1537 	}
1538 	/*
1539 	 * Now before going further we see if there is room. If NOT then we
1540 	 * MAY let one through only IF this TSN is the one we are waiting
1541 	 * for on a partial delivery API.
1542 	 */
1543 
1544 	/* now do the tests */
1545 	if (((asoc->cnt_on_all_streams +
1546 	    asoc->cnt_on_reasm_queue +
1547 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1548 	    (((int)asoc->my_rwnd) <= 0)) {
1549 		/*
1550 		 * When we have NO room in the rwnd we check to make sure
1551 		 * the reader is doing its job...
1552 		 */
1553 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1554 			/* some to read, wake-up */
1555 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1556 			struct socket *so;
1557 
1558 			so = SCTP_INP_SO(stcb->sctp_ep);
1559 			atomic_add_int(&stcb->asoc.refcnt, 1);
1560 			SCTP_TCB_UNLOCK(stcb);
1561 			SCTP_SOCKET_LOCK(so, 1);
1562 			SCTP_TCB_LOCK(stcb);
1563 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1564 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1565 				/* assoc was freed while we were unlocked */
1566 				SCTP_SOCKET_UNLOCK(so, 1);
1567 				return (0);
1568 			}
1569 #endif
1570 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1571 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1572 			SCTP_SOCKET_UNLOCK(so, 1);
1573 #endif
1574 		}
1575 		/* now is it in the mapping array of what we have accepted? */
1576 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1577 		    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1578 			/* Nope not in the valid range dump it */
1579 			sctp_set_rwnd(stcb, asoc);
1580 			if ((asoc->cnt_on_all_streams +
1581 			    asoc->cnt_on_reasm_queue +
1582 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1583 				SCTP_STAT_INCR(sctps_datadropchklmt);
1584 			} else {
1585 				SCTP_STAT_INCR(sctps_datadroprwnd);
1586 			}
1587 			*break_flag = 1;
1588 			return (0);
1589 		}
1590 	}
1591 	strmno = ntohs(ch->dp.stream_id);
1592 	if (strmno >= asoc->streamincnt) {
1593 		struct sctp_paramhdr *phdr;
1594 		struct mbuf *mb;
1595 
1596 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1597 		    0, M_DONTWAIT, 1, MT_DATA);
1598 		if (mb != NULL) {
1599 			/* add some space up front so prepend will work well */
1600 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1601 			phdr = mtod(mb, struct sctp_paramhdr *);
1602 			/*
1603 			 * Error causes are just param's and this one has
1604 			 * two back to back phdr, one with the error type
1605 			 * and size, the other with the streamid and a rsvd
1606 			 */
1607 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1608 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1609 			phdr->param_length =
1610 			    htons(sizeof(struct sctp_paramhdr) * 2);
1611 			phdr++;
1612 			/* We insert the stream in the type field */
1613 			phdr->param_type = ch->dp.stream_id;
1614 			/* And set the length to 0 for the rsvd field */
1615 			phdr->param_length = 0;
1616 			sctp_queue_op_err(stcb, mb);
1617 		}
1618 		SCTP_STAT_INCR(sctps_badsid);
1619 		SCTP_TCB_LOCK_ASSERT(stcb);
1620 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1621 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1622 			asoc->highest_tsn_inside_nr_map = tsn;
1623 		}
1624 		if (tsn == (asoc->cumulative_tsn + 1)) {
1625 			/* Update cum-ack */
1626 			asoc->cumulative_tsn = tsn;
1627 		}
1628 		return (0);
1629 	}
1630 	/*
1631 	 * Before we continue lets validate that we are not being fooled by
1632 	 * an evil attacker. We can only have 4k chunks based on our TSN
1633 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1634 	 * way our stream sequence numbers could have wrapped. We of course
1635 	 * only validate the FIRST fragment so the bit must be set.
1636 	 */
1637 	strmseq = ntohs(ch->dp.stream_sequence);
1638 #ifdef SCTP_ASOCLOG_OF_TSNS
1639 	SCTP_TCB_LOCK_ASSERT(stcb);
1640 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1641 		asoc->tsn_in_at = 0;
1642 		asoc->tsn_in_wrapped = 1;
1643 	}
1644 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1645 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1646 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1647 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1648 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1649 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1650 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1651 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1652 	asoc->tsn_in_at++;
1653 #endif
1654 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1655 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1656 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1657 	    SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1658 		/* The incoming sseq is behind where we last delivered? */
1659 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1660 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1661 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1662 		    0, M_DONTWAIT, 1, MT_DATA);
1663 		if (oper) {
1664 			struct sctp_paramhdr *ph;
1665 			uint32_t *ippp;
1666 
1667 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1668 			    (3 * sizeof(uint32_t));
1669 			ph = mtod(oper, struct sctp_paramhdr *);
1670 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1671 			ph->param_length = htons(SCTP_BUF_LEN(oper));
1672 			ippp = (uint32_t *) (ph + 1);
1673 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1674 			ippp++;
1675 			*ippp = tsn;
1676 			ippp++;
1677 			*ippp = ((strmno << 16) | strmseq);
1678 
1679 		}
1680 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1681 		sctp_abort_an_association(stcb->sctp_ep, stcb,
1682 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1683 		*abort_flag = 1;
1684 		return (0);
1685 	}
1686 	/************************************
1687 	 * From here down we may find ch-> invalid
1688 	 * so its a good idea NOT to use it.
1689 	 *************************************/
1690 
1691 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1692 	if (last_chunk == 0) {
1693 		dmbuf = SCTP_M_COPYM(*m,
1694 		    (offset + sizeof(struct sctp_data_chunk)),
1695 		    the_len, M_DONTWAIT);
1696 #ifdef SCTP_MBUF_LOGGING
1697 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1698 			struct mbuf *mat;
1699 
1700 			for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) {
1701 				if (SCTP_BUF_IS_EXTENDED(mat)) {
1702 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1703 				}
1704 			}
1705 		}
1706 #endif
1707 	} else {
1708 		/* We can steal the last chunk */
1709 		int l_len;
1710 
1711 		dmbuf = *m;
1712 		/* lop off the top part */
1713 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1714 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1715 			l_len = SCTP_BUF_LEN(dmbuf);
1716 		} else {
1717 			/*
1718 			 * need to count up the size hopefully does not hit
1719 			 * this to often :-0
1720 			 */
1721 			struct mbuf *lat;
1722 
1723 			l_len = 0;
1724 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1725 				l_len += SCTP_BUF_LEN(lat);
1726 			}
1727 		}
1728 		if (l_len > the_len) {
1729 			/* Trim the end round bytes off  too */
1730 			m_adj(dmbuf, -(l_len - the_len));
1731 		}
1732 	}
1733 	if (dmbuf == NULL) {
1734 		SCTP_STAT_INCR(sctps_nomem);
1735 		return (0);
1736 	}
1737 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1738 	    asoc->fragmented_delivery_inprogress == 0 &&
1739 	    TAILQ_EMPTY(&asoc->resetHead) &&
1740 	    ((ordered == 0) ||
1741 	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1742 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1743 		/* Candidate for express delivery */
1744 		/*
1745 		 * Its not fragmented, No PD-API is up, Nothing in the
1746 		 * delivery queue, Its un-ordered OR ordered and the next to
1747 		 * deliver AND nothing else is stuck on the stream queue,
1748 		 * And there is room for it in the socket buffer. Lets just
1749 		 * stuff it up the buffer....
1750 		 */
1751 
1752 		/* It would be nice to avoid this copy if we could :< */
1753 		sctp_alloc_a_readq(stcb, control);
1754 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1755 		    protocol_id,
1756 		    stcb->asoc.context,
1757 		    strmno, strmseq,
1758 		    chunk_flags,
1759 		    dmbuf);
1760 		if (control == NULL) {
1761 			goto failed_express_del;
1762 		}
1763 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1764 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1765 			asoc->highest_tsn_inside_nr_map = tsn;
1766 		}
1767 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1768 		    control, &stcb->sctp_socket->so_rcv,
1769 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1770 
1771 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1772 			/* for ordered, bump what we delivered */
1773 			asoc->strmin[strmno].last_sequence_delivered++;
1774 		}
1775 		SCTP_STAT_INCR(sctps_recvexpress);
1776 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1777 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1778 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1779 		}
1780 		control = NULL;
1781 
1782 		goto finish_express_del;
1783 	}
1784 failed_express_del:
1785 	/* If we reach here this is a new chunk */
1786 	chk = NULL;
1787 	control = NULL;
1788 	/* Express for fragmented delivery? */
1789 	if ((asoc->fragmented_delivery_inprogress) &&
1790 	    (stcb->asoc.control_pdapi) &&
1791 	    (asoc->str_of_pdapi == strmno) &&
1792 	    (asoc->ssn_of_pdapi == strmseq)
1793 	    ) {
1794 		control = stcb->asoc.control_pdapi;
1795 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1796 			/* Can't be another first? */
1797 			goto failed_pdapi_express_del;
1798 		}
1799 		if (tsn == (control->sinfo_tsn + 1)) {
1800 			/* Yep, we can add it on */
1801 			int end = 0;
1802 
1803 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1804 				end = 1;
1805 			}
1806 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1807 			    tsn,
1808 			    &stcb->sctp_socket->so_rcv)) {
1809 				SCTP_PRINTF("Append fails end:%d\n", end);
1810 				goto failed_pdapi_express_del;
1811 			}
1812 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1813 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1814 				asoc->highest_tsn_inside_nr_map = tsn;
1815 			}
1816 			SCTP_STAT_INCR(sctps_recvexpressm);
1817 			control->sinfo_tsn = tsn;
1818 			asoc->tsn_last_delivered = tsn;
1819 			asoc->fragment_flags = chunk_flags;
1820 			asoc->tsn_of_pdapi_last_delivered = tsn;
1821 			asoc->last_flags_delivered = chunk_flags;
1822 			asoc->last_strm_seq_delivered = strmseq;
1823 			asoc->last_strm_no_delivered = strmno;
1824 			if (end) {
1825 				/* clean up the flags and such */
1826 				asoc->fragmented_delivery_inprogress = 0;
1827 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1828 					asoc->strmin[strmno].last_sequence_delivered++;
1829 				}
1830 				stcb->asoc.control_pdapi = NULL;
1831 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1832 					/*
1833 					 * There could be another message
1834 					 * ready
1835 					 */
1836 					need_reasm_check = 1;
1837 				}
1838 			}
1839 			control = NULL;
1840 			goto finish_express_del;
1841 		}
1842 	}
1843 failed_pdapi_express_del:
1844 	control = NULL;
1845 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1846 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1847 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1848 			asoc->highest_tsn_inside_nr_map = tsn;
1849 		}
1850 	} else {
1851 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1852 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1853 			asoc->highest_tsn_inside_map = tsn;
1854 		}
1855 	}
1856 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1857 		sctp_alloc_a_chunk(stcb, chk);
1858 		if (chk == NULL) {
1859 			/* No memory so we drop the chunk */
1860 			SCTP_STAT_INCR(sctps_nomem);
1861 			if (last_chunk == 0) {
1862 				/* we copied it, free the copy */
1863 				sctp_m_freem(dmbuf);
1864 			}
1865 			return (0);
1866 		}
1867 		chk->rec.data.TSN_seq = tsn;
1868 		chk->no_fr_allowed = 0;
1869 		chk->rec.data.stream_seq = strmseq;
1870 		chk->rec.data.stream_number = strmno;
1871 		chk->rec.data.payloadtype = protocol_id;
1872 		chk->rec.data.context = stcb->asoc.context;
1873 		chk->rec.data.doing_fast_retransmit = 0;
1874 		chk->rec.data.rcv_flags = chunk_flags;
1875 		chk->asoc = asoc;
1876 		chk->send_size = the_len;
1877 		chk->whoTo = net;
1878 		atomic_add_int(&net->ref_count, 1);
1879 		chk->data = dmbuf;
1880 	} else {
1881 		sctp_alloc_a_readq(stcb, control);
1882 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1883 		    protocol_id,
1884 		    stcb->asoc.context,
1885 		    strmno, strmseq,
1886 		    chunk_flags,
1887 		    dmbuf);
1888 		if (control == NULL) {
1889 			/* No memory so we drop the chunk */
1890 			SCTP_STAT_INCR(sctps_nomem);
1891 			if (last_chunk == 0) {
1892 				/* we copied it, free the copy */
1893 				sctp_m_freem(dmbuf);
1894 			}
1895 			return (0);
1896 		}
1897 		control->length = the_len;
1898 	}
1899 
1900 	/* Mark it as received */
1901 	/* Now queue it where it belongs */
1902 	if (control != NULL) {
1903 		/* First a sanity check */
1904 		if (asoc->fragmented_delivery_inprogress) {
1905 			/*
1906 			 * Ok, we have a fragmented delivery in progress if
1907 			 * this chunk is next to deliver OR belongs in our
1908 			 * view to the reassembly, the peer is evil or
1909 			 * broken.
1910 			 */
1911 			uint32_t estimate_tsn;
1912 
1913 			estimate_tsn = asoc->tsn_last_delivered + 1;
1914 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1915 			    (estimate_tsn == control->sinfo_tsn)) {
1916 				/* Evil/Broke peer */
1917 				sctp_m_freem(control->data);
1918 				control->data = NULL;
1919 				if (control->whoFrom) {
1920 					sctp_free_remote_addr(control->whoFrom);
1921 					control->whoFrom = NULL;
1922 				}
1923 				sctp_free_a_readq(stcb, control);
1924 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1925 				    0, M_DONTWAIT, 1, MT_DATA);
1926 				if (oper) {
1927 					struct sctp_paramhdr *ph;
1928 					uint32_t *ippp;
1929 
1930 					SCTP_BUF_LEN(oper) =
1931 					    sizeof(struct sctp_paramhdr) +
1932 					    (3 * sizeof(uint32_t));
1933 					ph = mtod(oper, struct sctp_paramhdr *);
1934 					ph->param_type =
1935 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1936 					ph->param_length = htons(SCTP_BUF_LEN(oper));
1937 					ippp = (uint32_t *) (ph + 1);
1938 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1939 					ippp++;
1940 					*ippp = tsn;
1941 					ippp++;
1942 					*ippp = ((strmno << 16) | strmseq);
1943 				}
1944 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1945 				sctp_abort_an_association(stcb->sctp_ep, stcb,
1946 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1947 
1948 				*abort_flag = 1;
1949 				return (0);
1950 			} else {
1951 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1952 					sctp_m_freem(control->data);
1953 					control->data = NULL;
1954 					if (control->whoFrom) {
1955 						sctp_free_remote_addr(control->whoFrom);
1956 						control->whoFrom = NULL;
1957 					}
1958 					sctp_free_a_readq(stcb, control);
1959 
1960 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1961 					    0, M_DONTWAIT, 1, MT_DATA);
1962 					if (oper) {
1963 						struct sctp_paramhdr *ph;
1964 						uint32_t *ippp;
1965 
1966 						SCTP_BUF_LEN(oper) =
1967 						    sizeof(struct sctp_paramhdr) +
1968 						    (3 * sizeof(uint32_t));
1969 						ph = mtod(oper,
1970 						    struct sctp_paramhdr *);
1971 						ph->param_type =
1972 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1973 						ph->param_length =
1974 						    htons(SCTP_BUF_LEN(oper));
1975 						ippp = (uint32_t *) (ph + 1);
1976 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1977 						ippp++;
1978 						*ippp = tsn;
1979 						ippp++;
1980 						*ippp = ((strmno << 16) | strmseq);
1981 					}
1982 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1983 					sctp_abort_an_association(stcb->sctp_ep,
1984 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1985 
1986 					*abort_flag = 1;
1987 					return (0);
1988 				}
1989 			}
1990 		} else {
1991 			/* No PDAPI running */
1992 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1993 				/*
1994 				 * Reassembly queue is NOT empty validate
1995 				 * that this tsn does not need to be in
1996 				 * reasembly queue. If it does then our peer
1997 				 * is broken or evil.
1998 				 */
1999 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2000 					sctp_m_freem(control->data);
2001 					control->data = NULL;
2002 					if (control->whoFrom) {
2003 						sctp_free_remote_addr(control->whoFrom);
2004 						control->whoFrom = NULL;
2005 					}
2006 					sctp_free_a_readq(stcb, control);
2007 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2008 					    0, M_DONTWAIT, 1, MT_DATA);
2009 					if (oper) {
2010 						struct sctp_paramhdr *ph;
2011 						uint32_t *ippp;
2012 
2013 						SCTP_BUF_LEN(oper) =
2014 						    sizeof(struct sctp_paramhdr) +
2015 						    (3 * sizeof(uint32_t));
2016 						ph = mtod(oper,
2017 						    struct sctp_paramhdr *);
2018 						ph->param_type =
2019 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2020 						ph->param_length =
2021 						    htons(SCTP_BUF_LEN(oper));
2022 						ippp = (uint32_t *) (ph + 1);
2023 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2024 						ippp++;
2025 						*ippp = tsn;
2026 						ippp++;
2027 						*ippp = ((strmno << 16) | strmseq);
2028 					}
2029 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2030 					sctp_abort_an_association(stcb->sctp_ep,
2031 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2032 
2033 					*abort_flag = 1;
2034 					return (0);
2035 				}
2036 			}
2037 		}
2038 		/* ok, if we reach here we have passed the sanity checks */
2039 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2040 			/* queue directly into socket buffer */
2041 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2042 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2043 			    control,
2044 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2045 		} else {
2046 			/*
2047 			 * Special check for when streams are resetting. We
2048 			 * could be more smart about this and check the
2049 			 * actual stream to see if it is not being reset..
2050 			 * that way we would not create a HOLB when amongst
2051 			 * streams being reset and those not being reset.
2052 			 *
2053 			 * We take complete messages that have a stream reset
2054 			 * intervening (aka the TSN is after where our
2055 			 * cum-ack needs to be) off and put them on a
2056 			 * pending_reply_queue. The reassembly ones we do
2057 			 * not have to worry about since they are all sorted
2058 			 * and proceessed by TSN order. It is only the
2059 			 * singletons I must worry about.
2060 			 */
2061 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2062 			    SCTP_TSN_GT(tsn, liste->tsn)) {
2063 				/*
2064 				 * yep its past where we need to reset... go
2065 				 * ahead and queue it.
2066 				 */
2067 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2068 					/* first one on */
2069 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2070 				} else {
2071 					struct sctp_queued_to_read *ctlOn,
2072 					                   *nctlOn;
2073 					unsigned char inserted = 0;
2074 
2075 					TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2076 						if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2077 							continue;
2078 						} else {
2079 							/* found it */
2080 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2081 							inserted = 1;
2082 							break;
2083 						}
2084 					}
2085 					if (inserted == 0) {
2086 						/*
2087 						 * must be put at end, use
2088 						 * prevP (all setup from
2089 						 * loop) to setup nextP.
2090 						 */
2091 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2092 					}
2093 				}
2094 			} else {
2095 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2096 				if (*abort_flag) {
2097 					return (0);
2098 				}
2099 			}
2100 		}
2101 	} else {
2102 		/* Into the re-assembly queue */
2103 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2104 		if (*abort_flag) {
2105 			/*
2106 			 * the assoc is now gone and chk was put onto the
2107 			 * reasm queue, which has all been freed.
2108 			 */
2109 			*m = NULL;
2110 			return (0);
2111 		}
2112 	}
2113 finish_express_del:
2114 	if (tsn == (asoc->cumulative_tsn + 1)) {
2115 		/* Update cum-ack */
2116 		asoc->cumulative_tsn = tsn;
2117 	}
2118 	if (last_chunk) {
2119 		*m = NULL;
2120 	}
2121 	if (ordered) {
2122 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2123 	} else {
2124 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2125 	}
2126 	SCTP_STAT_INCR(sctps_recvdata);
2127 	/* Set it present please */
2128 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2129 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2130 	}
2131 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2132 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2133 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2134 	}
2135 	/* check the special flag for stream resets */
2136 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2137 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2138 		/*
2139 		 * we have finished working through the backlogged TSN's now
2140 		 * time to reset streams. 1: call reset function. 2: free
2141 		 * pending_reply space 3: distribute any chunks in
2142 		 * pending_reply_queue.
2143 		 */
2144 		struct sctp_queued_to_read *ctl, *nctl;
2145 
2146 		sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2147 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2148 		SCTP_FREE(liste, SCTP_M_STRESET);
2149 		/* sa_ignore FREED_MEMORY */
2150 		liste = TAILQ_FIRST(&asoc->resetHead);
2151 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2152 			/* All can be removed */
2153 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2154 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2155 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2156 				if (*abort_flag) {
2157 					return (0);
2158 				}
2159 			}
2160 		} else {
2161 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2162 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2163 					break;
2164 				}
2165 				/*
2166 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2167 				 * process it which is the NOT of
2168 				 * ctl->sinfo_tsn > liste->tsn
2169 				 */
2170 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2171 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2172 				if (*abort_flag) {
2173 					return (0);
2174 				}
2175 			}
2176 		}
2177 		/*
2178 		 * Now service re-assembly to pick up anything that has been
2179 		 * held on reassembly queue?
2180 		 */
2181 		sctp_deliver_reasm_check(stcb, asoc);
2182 		need_reasm_check = 0;
2183 	}
2184 	if (need_reasm_check) {
2185 		/* Another one waits ? */
2186 		sctp_deliver_reasm_check(stcb, asoc);
2187 	}
2188 	return (1);
2189 }
2190 
2191 int8_t sctp_map_lookup_tab[256] = {
2192 	0, 1, 0, 2, 0, 1, 0, 3,
2193 	0, 1, 0, 2, 0, 1, 0, 4,
2194 	0, 1, 0, 2, 0, 1, 0, 3,
2195 	0, 1, 0, 2, 0, 1, 0, 5,
2196 	0, 1, 0, 2, 0, 1, 0, 3,
2197 	0, 1, 0, 2, 0, 1, 0, 4,
2198 	0, 1, 0, 2, 0, 1, 0, 3,
2199 	0, 1, 0, 2, 0, 1, 0, 6,
2200 	0, 1, 0, 2, 0, 1, 0, 3,
2201 	0, 1, 0, 2, 0, 1, 0, 4,
2202 	0, 1, 0, 2, 0, 1, 0, 3,
2203 	0, 1, 0, 2, 0, 1, 0, 5,
2204 	0, 1, 0, 2, 0, 1, 0, 3,
2205 	0, 1, 0, 2, 0, 1, 0, 4,
2206 	0, 1, 0, 2, 0, 1, 0, 3,
2207 	0, 1, 0, 2, 0, 1, 0, 7,
2208 	0, 1, 0, 2, 0, 1, 0, 3,
2209 	0, 1, 0, 2, 0, 1, 0, 4,
2210 	0, 1, 0, 2, 0, 1, 0, 3,
2211 	0, 1, 0, 2, 0, 1, 0, 5,
2212 	0, 1, 0, 2, 0, 1, 0, 3,
2213 	0, 1, 0, 2, 0, 1, 0, 4,
2214 	0, 1, 0, 2, 0, 1, 0, 3,
2215 	0, 1, 0, 2, 0, 1, 0, 6,
2216 	0, 1, 0, 2, 0, 1, 0, 3,
2217 	0, 1, 0, 2, 0, 1, 0, 4,
2218 	0, 1, 0, 2, 0, 1, 0, 3,
2219 	0, 1, 0, 2, 0, 1, 0, 5,
2220 	0, 1, 0, 2, 0, 1, 0, 3,
2221 	0, 1, 0, 2, 0, 1, 0, 4,
2222 	0, 1, 0, 2, 0, 1, 0, 3,
2223 	0, 1, 0, 2, 0, 1, 0, 8
2224 };
2225 
2226 
2227 void
2228 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2229 {
2230 	/*
2231 	 * Now we also need to check the mapping array in a couple of ways.
2232 	 * 1) Did we move the cum-ack point?
2233 	 *
2234 	 * When you first glance at this you might think that all entries that
2235 	 * make up the postion of the cum-ack would be in the nr-mapping
2236 	 * array only.. i.e. things up to the cum-ack are always
2237 	 * deliverable. Thats true with one exception, when its a fragmented
2238 	 * message we may not deliver the data until some threshold (or all
2239 	 * of it) is in place. So we must OR the nr_mapping_array and
2240 	 * mapping_array to get a true picture of the cum-ack.
2241 	 */
2242 	struct sctp_association *asoc;
2243 	int at;
2244 	uint8_t val;
2245 	int slide_from, slide_end, lgap, distance;
2246 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2247 
2248 	asoc = &stcb->asoc;
2249 
2250 	old_cumack = asoc->cumulative_tsn;
2251 	old_base = asoc->mapping_array_base_tsn;
2252 	old_highest = asoc->highest_tsn_inside_map;
2253 	/*
2254 	 * We could probably improve this a small bit by calculating the
2255 	 * offset of the current cum-ack as the starting point.
2256 	 */
2257 	at = 0;
2258 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2259 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2260 		if (val == 0xff) {
2261 			at += 8;
2262 		} else {
2263 			/* there is a 0 bit */
2264 			at += sctp_map_lookup_tab[val];
2265 			break;
2266 		}
2267 	}
2268 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2269 
2270 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2271 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2272 #ifdef INVARIANTS
2273 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2274 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2275 #else
2276 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2277 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2278 		sctp_print_mapping_array(asoc);
2279 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2280 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2281 		}
2282 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2283 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2284 #endif
2285 	}
2286 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2287 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2288 	} else {
2289 		highest_tsn = asoc->highest_tsn_inside_map;
2290 	}
2291 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2292 		/* The complete array was completed by a single FR */
2293 		/* highest becomes the cum-ack */
2294 		int clr;
2295 
2296 #ifdef INVARIANTS
2297 		unsigned int i;
2298 
2299 #endif
2300 
2301 		/* clear the array */
2302 		clr = ((at + 7) >> 3);
2303 		if (clr > asoc->mapping_array_size) {
2304 			clr = asoc->mapping_array_size;
2305 		}
2306 		memset(asoc->mapping_array, 0, clr);
2307 		memset(asoc->nr_mapping_array, 0, clr);
2308 #ifdef INVARIANTS
2309 		for (i = 0; i < asoc->mapping_array_size; i++) {
2310 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2311 				printf("Error Mapping array's not clean at clear\n");
2312 				sctp_print_mapping_array(asoc);
2313 			}
2314 		}
2315 #endif
2316 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2317 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2318 	} else if (at >= 8) {
2319 		/* we can slide the mapping array down */
2320 		/* slide_from holds where we hit the first NON 0xff byte */
2321 
2322 		/*
2323 		 * now calculate the ceiling of the move using our highest
2324 		 * TSN value
2325 		 */
2326 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2327 		slide_end = (lgap >> 3);
2328 		if (slide_end < slide_from) {
2329 			sctp_print_mapping_array(asoc);
2330 #ifdef INVARIANTS
2331 			panic("impossible slide");
2332 #else
2333 			printf("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2334 			    lgap, slide_end, slide_from, at);
2335 			return;
2336 #endif
2337 		}
2338 		if (slide_end > asoc->mapping_array_size) {
2339 #ifdef INVARIANTS
2340 			panic("would overrun buffer");
2341 #else
2342 			printf("Gak, would have overrun map end:%d slide_end:%d\n",
2343 			    asoc->mapping_array_size, slide_end);
2344 			slide_end = asoc->mapping_array_size;
2345 #endif
2346 		}
2347 		distance = (slide_end - slide_from) + 1;
2348 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2349 			sctp_log_map(old_base, old_cumack, old_highest,
2350 			    SCTP_MAP_PREPARE_SLIDE);
2351 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2352 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2353 		}
2354 		if (distance + slide_from > asoc->mapping_array_size ||
2355 		    distance < 0) {
2356 			/*
2357 			 * Here we do NOT slide forward the array so that
2358 			 * hopefully when more data comes in to fill it up
2359 			 * we will be able to slide it forward. Really I
2360 			 * don't think this should happen :-0
2361 			 */
2362 
2363 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2364 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2365 				    (uint32_t) asoc->mapping_array_size,
2366 				    SCTP_MAP_SLIDE_NONE);
2367 			}
2368 		} else {
2369 			int ii;
2370 
2371 			for (ii = 0; ii < distance; ii++) {
2372 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2373 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2374 
2375 			}
2376 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2377 				asoc->mapping_array[ii] = 0;
2378 				asoc->nr_mapping_array[ii] = 0;
2379 			}
2380 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2381 				asoc->highest_tsn_inside_map += (slide_from << 3);
2382 			}
2383 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2384 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2385 			}
2386 			asoc->mapping_array_base_tsn += (slide_from << 3);
2387 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2388 				sctp_log_map(asoc->mapping_array_base_tsn,
2389 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2390 				    SCTP_MAP_SLIDE_RESULT);
2391 			}
2392 		}
2393 	}
2394 }
2395 
2396 void
2397 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2398 {
2399 	struct sctp_association *asoc;
2400 	uint32_t highest_tsn;
2401 
2402 	asoc = &stcb->asoc;
2403 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2404 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2405 	} else {
2406 		highest_tsn = asoc->highest_tsn_inside_map;
2407 	}
2408 
2409 	/*
2410 	 * Now we need to see if we need to queue a sack or just start the
2411 	 * timer (if allowed).
2412 	 */
2413 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2414 		/*
2415 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2416 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2417 		 * SACK
2418 		 */
2419 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2420 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2421 			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2422 		}
2423 		sctp_send_shutdown(stcb,
2424 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2425 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2426 	} else {
2427 		int is_a_gap;
2428 
2429 		/* is there a gap now ? */
2430 		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2431 
2432 		/*
2433 		 * CMT DAC algorithm: increase number of packets received
2434 		 * since last ack
2435 		 */
2436 		stcb->asoc.cmt_dac_pkts_rcvd++;
2437 
2438 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2439 							 * SACK */
2440 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2441 							 * longer is one */
2442 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2443 		    (is_a_gap) ||	/* is still a gap */
2444 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2445 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2446 		    ) {
2447 
2448 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2449 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2450 			    (stcb->asoc.send_sack == 0) &&
2451 			    (stcb->asoc.numduptsns == 0) &&
2452 			    (stcb->asoc.delayed_ack) &&
2453 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2454 
2455 				/*
2456 				 * CMT DAC algorithm: With CMT, delay acks
2457 				 * even in the face of
2458 				 *
2459 				 * reordering. Therefore, if acks that do not
2460 				 * have to be sent because of the above
2461 				 * reasons, will be delayed. That is, acks
2462 				 * that would have been sent due to gap
2463 				 * reports will be delayed with DAC. Start
2464 				 * the delayed ack timer.
2465 				 */
2466 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2467 				    stcb->sctp_ep, stcb, NULL);
2468 			} else {
2469 				/*
2470 				 * Ok we must build a SACK since the timer
2471 				 * is pending, we got our first packet OR
2472 				 * there are gaps or duplicates.
2473 				 */
2474 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2475 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2476 			}
2477 		} else {
2478 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2479 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2480 				    stcb->sctp_ep, stcb, NULL);
2481 			}
2482 		}
2483 	}
2484 }
2485 
2486 void
2487 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2488 {
2489 	struct sctp_tmit_chunk *chk;
2490 	uint32_t tsize, pd_point;
2491 	uint16_t nxt_todel;
2492 
2493 	if (asoc->fragmented_delivery_inprogress) {
2494 		sctp_service_reassembly(stcb, asoc);
2495 	}
2496 	/* Can we proceed further, i.e. the PD-API is complete */
2497 	if (asoc->fragmented_delivery_inprogress) {
2498 		/* no */
2499 		return;
2500 	}
2501 	/*
2502 	 * Now is there some other chunk I can deliver from the reassembly
2503 	 * queue.
2504 	 */
2505 doit_again:
2506 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2507 	if (chk == NULL) {
2508 		asoc->size_on_reasm_queue = 0;
2509 		asoc->cnt_on_reasm_queue = 0;
2510 		return;
2511 	}
2512 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2513 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2514 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2515 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2516 		/*
2517 		 * Yep the first one is here. We setup to start reception,
2518 		 * by backing down the TSN just in case we can't deliver.
2519 		 */
2520 
2521 		/*
2522 		 * Before we start though either all of the message should
2523 		 * be here or the socket buffer max or nothing on the
2524 		 * delivery queue and something can be delivered.
2525 		 */
2526 		if (stcb->sctp_socket) {
2527 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2528 			    stcb->sctp_ep->partial_delivery_point);
2529 		} else {
2530 			pd_point = stcb->sctp_ep->partial_delivery_point;
2531 		}
2532 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2533 			asoc->fragmented_delivery_inprogress = 1;
2534 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2535 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2536 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2537 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2538 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2539 			sctp_service_reassembly(stcb, asoc);
2540 			if (asoc->fragmented_delivery_inprogress == 0) {
2541 				goto doit_again;
2542 			}
2543 		}
2544 	}
2545 }
2546 
2547 int
2548 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2549     struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2550     struct sctp_nets *net, uint32_t * high_tsn)
2551 {
2552 	struct sctp_data_chunk *ch, chunk_buf;
2553 	struct sctp_association *asoc;
2554 	int num_chunks = 0;	/* number of control chunks processed */
2555 	int stop_proc = 0;
2556 	int chk_length, break_flag, last_chunk;
2557 	int abort_flag = 0, was_a_gap;
2558 	struct mbuf *m;
2559 	uint32_t highest_tsn;
2560 
2561 	/* set the rwnd */
2562 	sctp_set_rwnd(stcb, &stcb->asoc);
2563 
2564 	m = *mm;
2565 	SCTP_TCB_LOCK_ASSERT(stcb);
2566 	asoc = &stcb->asoc;
2567 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2568 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2569 	} else {
2570 		highest_tsn = asoc->highest_tsn_inside_map;
2571 	}
2572 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2573 	/*
2574 	 * setup where we got the last DATA packet from for any SACK that
2575 	 * may need to go out. Don't bump the net. This is done ONLY when a
2576 	 * chunk is assigned.
2577 	 */
2578 	asoc->last_data_chunk_from = net;
2579 
2580 	/*-
2581 	 * Now before we proceed we must figure out if this is a wasted
2582 	 * cluster... i.e. it is a small packet sent in and yet the driver
2583 	 * underneath allocated a full cluster for it. If so we must copy it
2584 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2585 	 * with cluster starvation. Note for __Panda__ we don't do this
2586 	 * since it has clusters all the way down to 64 bytes.
2587 	 */
2588 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2589 		/* we only handle mbufs that are singletons.. not chains */
2590 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2591 		if (m) {
2592 			/* ok lets see if we can copy the data up */
2593 			caddr_t *from, *to;
2594 
2595 			/* get the pointers and copy */
2596 			to = mtod(m, caddr_t *);
2597 			from = mtod((*mm), caddr_t *);
2598 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2599 			/* copy the length and free up the old */
2600 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2601 			sctp_m_freem(*mm);
2602 			/* sucess, back copy */
2603 			*mm = m;
2604 		} else {
2605 			/* We are in trouble in the mbuf world .. yikes */
2606 			m = *mm;
2607 		}
2608 	}
2609 	/* get pointer to the first chunk header */
2610 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2611 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2612 	if (ch == NULL) {
2613 		return (1);
2614 	}
2615 	/*
2616 	 * process all DATA chunks...
2617 	 */
2618 	*high_tsn = asoc->cumulative_tsn;
2619 	break_flag = 0;
2620 	asoc->data_pkts_seen++;
2621 	while (stop_proc == 0) {
2622 		/* validate chunk length */
2623 		chk_length = ntohs(ch->ch.chunk_length);
2624 		if (length - *offset < chk_length) {
2625 			/* all done, mutulated chunk */
2626 			stop_proc = 1;
2627 			continue;
2628 		}
2629 		if (ch->ch.chunk_type == SCTP_DATA) {
2630 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2631 				/*
2632 				 * Need to send an abort since we had a
2633 				 * invalid data chunk.
2634 				 */
2635 				struct mbuf *op_err;
2636 
2637 				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2638 				    0, M_DONTWAIT, 1, MT_DATA);
2639 
2640 				if (op_err) {
2641 					struct sctp_paramhdr *ph;
2642 					uint32_t *ippp;
2643 
2644 					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2645 					    (2 * sizeof(uint32_t));
2646 					ph = mtod(op_err, struct sctp_paramhdr *);
2647 					ph->param_type =
2648 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2649 					ph->param_length = htons(SCTP_BUF_LEN(op_err));
2650 					ippp = (uint32_t *) (ph + 1);
2651 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2652 					ippp++;
2653 					*ippp = asoc->cumulative_tsn;
2654 
2655 				}
2656 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2657 				sctp_abort_association(inp, stcb, m, iphlen, sh,
2658 				    op_err, 0, net->port);
2659 				return (2);
2660 			}
2661 #ifdef SCTP_AUDITING_ENABLED
2662 			sctp_audit_log(0xB1, 0);
2663 #endif
2664 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2665 				last_chunk = 1;
2666 			} else {
2667 				last_chunk = 0;
2668 			}
2669 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2670 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2671 			    last_chunk)) {
2672 				num_chunks++;
2673 			}
2674 			if (abort_flag)
2675 				return (2);
2676 
2677 			if (break_flag) {
2678 				/*
2679 				 * Set because of out of rwnd space and no
2680 				 * drop rep space left.
2681 				 */
2682 				stop_proc = 1;
2683 				continue;
2684 			}
2685 		} else {
2686 			/* not a data chunk in the data region */
2687 			switch (ch->ch.chunk_type) {
2688 			case SCTP_INITIATION:
2689 			case SCTP_INITIATION_ACK:
2690 			case SCTP_SELECTIVE_ACK:
2691 			case SCTP_NR_SELECTIVE_ACK:
2692 			case SCTP_HEARTBEAT_REQUEST:
2693 			case SCTP_HEARTBEAT_ACK:
2694 			case SCTP_ABORT_ASSOCIATION:
2695 			case SCTP_SHUTDOWN:
2696 			case SCTP_SHUTDOWN_ACK:
2697 			case SCTP_OPERATION_ERROR:
2698 			case SCTP_COOKIE_ECHO:
2699 			case SCTP_COOKIE_ACK:
2700 			case SCTP_ECN_ECHO:
2701 			case SCTP_ECN_CWR:
2702 			case SCTP_SHUTDOWN_COMPLETE:
2703 			case SCTP_AUTHENTICATION:
2704 			case SCTP_ASCONF_ACK:
2705 			case SCTP_PACKET_DROPPED:
2706 			case SCTP_STREAM_RESET:
2707 			case SCTP_FORWARD_CUM_TSN:
2708 			case SCTP_ASCONF:
2709 				/*
2710 				 * Now, what do we do with KNOWN chunks that
2711 				 * are NOT in the right place?
2712 				 *
2713 				 * For now, I do nothing but ignore them. We
2714 				 * may later want to add sysctl stuff to
2715 				 * switch out and do either an ABORT() or
2716 				 * possibly process them.
2717 				 */
2718 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2719 					struct mbuf *op_err;
2720 
2721 					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2722 					sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
2723 					return (2);
2724 				}
2725 				break;
2726 			default:
2727 				/* unknown chunk type, use bit rules */
2728 				if (ch->ch.chunk_type & 0x40) {
2729 					/* Add a error report to the queue */
2730 					struct mbuf *merr;
2731 					struct sctp_paramhdr *phd;
2732 
2733 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2734 					if (merr) {
2735 						phd = mtod(merr, struct sctp_paramhdr *);
2736 						/*
2737 						 * We cheat and use param
2738 						 * type since we did not
2739 						 * bother to define a error
2740 						 * cause struct. They are
2741 						 * the same basic format
2742 						 * with different names.
2743 						 */
2744 						phd->param_type =
2745 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2746 						phd->param_length =
2747 						    htons(chk_length + sizeof(*phd));
2748 						SCTP_BUF_LEN(merr) = sizeof(*phd);
2749 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_DONTWAIT);
2750 						if (SCTP_BUF_NEXT(merr)) {
2751 							if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) {
2752 								sctp_m_freem(merr);
2753 							} else {
2754 								sctp_queue_op_err(stcb, merr);
2755 							}
2756 						} else {
2757 							sctp_m_freem(merr);
2758 						}
2759 					}
2760 				}
2761 				if ((ch->ch.chunk_type & 0x80) == 0) {
2762 					/* discard the rest of this packet */
2763 					stop_proc = 1;
2764 				}	/* else skip this bad chunk and
2765 					 * continue... */
2766 				break;
2767 			}	/* switch of chunk type */
2768 		}
2769 		*offset += SCTP_SIZE32(chk_length);
2770 		if ((*offset >= length) || stop_proc) {
2771 			/* no more data left in the mbuf chain */
2772 			stop_proc = 1;
2773 			continue;
2774 		}
2775 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2776 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2777 		if (ch == NULL) {
2778 			*offset = length;
2779 			stop_proc = 1;
2780 			continue;
2781 		}
2782 	}
2783 	if (break_flag) {
2784 		/*
2785 		 * we need to report rwnd overrun drops.
2786 		 */
2787 		sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2788 	}
2789 	if (num_chunks) {
2790 		/*
2791 		 * Did we get data, if so update the time for auto-close and
2792 		 * give peer credit for being alive.
2793 		 */
2794 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2795 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2796 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2797 			    stcb->asoc.overall_error_count,
2798 			    0,
2799 			    SCTP_FROM_SCTP_INDATA,
2800 			    __LINE__);
2801 		}
2802 		stcb->asoc.overall_error_count = 0;
2803 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2804 	}
2805 	/* now service all of the reassm queue if needed */
2806 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2807 		sctp_service_queues(stcb, asoc);
2808 
2809 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2810 		/* Assure that we ack right away */
2811 		stcb->asoc.send_sack = 1;
2812 	}
2813 	/* Start a sack timer or QUEUE a SACK for sending */
2814 	sctp_sack_check(stcb, was_a_gap);
2815 	return (0);
2816 }
2817 
2818 static int
2819 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2820     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2821     int *num_frs,
2822     uint32_t * biggest_newly_acked_tsn,
2823     uint32_t * this_sack_lowest_newack,
2824     int *rto_ok)
2825 {
2826 	struct sctp_tmit_chunk *tp1;
2827 	unsigned int theTSN;
2828 	int j, wake_him = 0, circled = 0;
2829 
2830 	/* Recover the tp1 we last saw */
2831 	tp1 = *p_tp1;
2832 	if (tp1 == NULL) {
2833 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2834 	}
2835 	for (j = frag_strt; j <= frag_end; j++) {
2836 		theTSN = j + last_tsn;
2837 		while (tp1) {
2838 			if (tp1->rec.data.doing_fast_retransmit)
2839 				(*num_frs) += 1;
2840 
2841 			/*-
2842 			 * CMT: CUCv2 algorithm. For each TSN being
2843 			 * processed from the sent queue, track the
2844 			 * next expected pseudo-cumack, or
2845 			 * rtx_pseudo_cumack, if required. Separate
2846 			 * cumack trackers for first transmissions,
2847 			 * and retransmissions.
2848 			 */
2849 			if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2850 			    (tp1->snd_count == 1)) {
2851 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2852 				tp1->whoTo->find_pseudo_cumack = 0;
2853 			}
2854 			if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2855 			    (tp1->snd_count > 1)) {
2856 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2857 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2858 			}
2859 			if (tp1->rec.data.TSN_seq == theTSN) {
2860 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2861 					/*-
2862 					 * must be held until
2863 					 * cum-ack passes
2864 					 */
2865 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2866 						/*-
2867 						 * If it is less than RESEND, it is
2868 						 * now no-longer in flight.
2869 						 * Higher values may already be set
2870 						 * via previous Gap Ack Blocks...
2871 						 * i.e. ACKED or RESEND.
2872 						 */
2873 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2874 						    *biggest_newly_acked_tsn)) {
2875 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2876 						}
2877 						/*-
2878 						 * CMT: SFR algo (and HTNA) - set
2879 						 * saw_newack to 1 for dest being
2880 						 * newly acked. update
2881 						 * this_sack_highest_newack if
2882 						 * appropriate.
2883 						 */
2884 						if (tp1->rec.data.chunk_was_revoked == 0)
2885 							tp1->whoTo->saw_newack = 1;
2886 
2887 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2888 						    tp1->whoTo->this_sack_highest_newack)) {
2889 							tp1->whoTo->this_sack_highest_newack =
2890 							    tp1->rec.data.TSN_seq;
2891 						}
2892 						/*-
2893 						 * CMT DAC algo: also update
2894 						 * this_sack_lowest_newack
2895 						 */
2896 						if (*this_sack_lowest_newack == 0) {
2897 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2898 								sctp_log_sack(*this_sack_lowest_newack,
2899 								    last_tsn,
2900 								    tp1->rec.data.TSN_seq,
2901 								    0,
2902 								    0,
2903 								    SCTP_LOG_TSN_ACKED);
2904 							}
2905 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2906 						}
2907 						/*-
2908 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2909 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2910 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2911 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2912 						 * Separate pseudo_cumack trackers for first transmissions and
2913 						 * retransmissions.
2914 						 */
2915 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2916 							if (tp1->rec.data.chunk_was_revoked == 0) {
2917 								tp1->whoTo->new_pseudo_cumack = 1;
2918 							}
2919 							tp1->whoTo->find_pseudo_cumack = 1;
2920 						}
2921 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2922 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2923 						}
2924 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2925 							if (tp1->rec.data.chunk_was_revoked == 0) {
2926 								tp1->whoTo->new_pseudo_cumack = 1;
2927 							}
2928 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2929 						}
2930 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2931 							sctp_log_sack(*biggest_newly_acked_tsn,
2932 							    last_tsn,
2933 							    tp1->rec.data.TSN_seq,
2934 							    frag_strt,
2935 							    frag_end,
2936 							    SCTP_LOG_TSN_ACKED);
2937 						}
2938 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2939 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2940 							    tp1->whoTo->flight_size,
2941 							    tp1->book_size,
2942 							    (uintptr_t) tp1->whoTo,
2943 							    tp1->rec.data.TSN_seq);
2944 						}
2945 						sctp_flight_size_decrease(tp1);
2946 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2947 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2948 							    tp1);
2949 						}
2950 						sctp_total_flight_decrease(stcb, tp1);
2951 
2952 						tp1->whoTo->net_ack += tp1->send_size;
2953 						if (tp1->snd_count < 2) {
2954 							/*-
2955 							 * True non-retransmited chunk
2956 							 */
2957 							tp1->whoTo->net_ack2 += tp1->send_size;
2958 
2959 							/*-
2960 							 * update RTO too ?
2961 							 */
2962 							if (tp1->do_rtt) {
2963 								if (*rto_ok) {
2964 									tp1->whoTo->RTO =
2965 									    sctp_calculate_rto(stcb,
2966 									    &stcb->asoc,
2967 									    tp1->whoTo,
2968 									    &tp1->sent_rcv_time,
2969 									    sctp_align_safe_nocopy,
2970 									    SCTP_RTT_FROM_DATA);
2971 									*rto_ok = 0;
2972 								}
2973 								if (tp1->whoTo->rto_needed == 0) {
2974 									tp1->whoTo->rto_needed = 1;
2975 								}
2976 								tp1->do_rtt = 0;
2977 							}
2978 						}
2979 					}
2980 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2981 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2982 						    stcb->asoc.this_sack_highest_gap)) {
2983 							stcb->asoc.this_sack_highest_gap =
2984 							    tp1->rec.data.TSN_seq;
2985 						}
2986 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2987 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2988 #ifdef SCTP_AUDITING_ENABLED
2989 							sctp_audit_log(0xB2,
2990 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2991 #endif
2992 						}
2993 					}
2994 					/*-
2995 					 * All chunks NOT UNSENT fall through here and are marked
2996 					 * (leave PR-SCTP ones that are to skip alone though)
2997 					 */
2998 					if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
2999 						tp1->sent = SCTP_DATAGRAM_MARKED;
3000 
3001 					if (tp1->rec.data.chunk_was_revoked) {
3002 						/* deflate the cwnd */
3003 						tp1->whoTo->cwnd -= tp1->book_size;
3004 						tp1->rec.data.chunk_was_revoked = 0;
3005 					}
3006 					/* NR Sack code here */
3007 					if (nr_sacking) {
3008 						if (tp1->data) {
3009 							/*
3010 							 * sa_ignore
3011 							 * NO_NULL_CHK
3012 							 */
3013 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3014 							sctp_m_freem(tp1->data);
3015 							tp1->data = NULL;
3016 						}
3017 						wake_him++;
3018 					}
3019 				}
3020 				break;
3021 			}	/* if (tp1->TSN_seq == theTSN) */
3022 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
3023 				break;
3024 			}
3025 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3026 			if ((tp1 == NULL) && (circled == 0)) {
3027 				circled++;
3028 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3029 			}
3030 		}		/* end while (tp1) */
3031 		if (tp1 == NULL) {
3032 			circled = 0;
3033 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3034 		}
3035 		/* In case the fragments were not in order we must reset */
3036 	}			/* end for (j = fragStart */
3037 	*p_tp1 = tp1;
3038 	return (wake_him);	/* Return value only used for nr-sack */
3039 }
3040 
3041 
3042 static int
3043 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3044     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3045     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3046     int num_seg, int num_nr_seg, int *rto_ok)
3047 {
3048 	struct sctp_gap_ack_block *frag, block;
3049 	struct sctp_tmit_chunk *tp1;
3050 	int i;
3051 	int num_frs = 0;
3052 	int chunk_freed;
3053 	int non_revocable;
3054 	uint16_t frag_strt, frag_end, prev_frag_end;
3055 
3056 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3057 	prev_frag_end = 0;
3058 	chunk_freed = 0;
3059 
3060 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3061 		if (i == num_seg) {
3062 			prev_frag_end = 0;
3063 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3064 		}
3065 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3066 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3067 		*offset += sizeof(block);
3068 		if (frag == NULL) {
3069 			return (chunk_freed);
3070 		}
3071 		frag_strt = ntohs(frag->start);
3072 		frag_end = ntohs(frag->end);
3073 
3074 		if (frag_strt > frag_end) {
3075 			/* This gap report is malformed, skip it. */
3076 			continue;
3077 		}
3078 		if (frag_strt <= prev_frag_end) {
3079 			/* This gap report is not in order, so restart. */
3080 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3081 		}
3082 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3083 			*biggest_tsn_acked = last_tsn + frag_end;
3084 		}
3085 		if (i < num_seg) {
3086 			non_revocable = 0;
3087 		} else {
3088 			non_revocable = 1;
3089 		}
3090 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3091 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3092 		    this_sack_lowest_newack, rto_ok)) {
3093 			chunk_freed = 1;
3094 		}
3095 		prev_frag_end = frag_end;
3096 	}
3097 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3098 		if (num_frs)
3099 			sctp_log_fr(*biggest_tsn_acked,
3100 			    *biggest_newly_acked_tsn,
3101 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3102 	}
3103 	return (chunk_freed);
3104 }
3105 
3106 static void
3107 sctp_check_for_revoked(struct sctp_tcb *stcb,
3108     struct sctp_association *asoc, uint32_t cumack,
3109     uint32_t biggest_tsn_acked)
3110 {
3111 	struct sctp_tmit_chunk *tp1;
3112 	int tot_revoked = 0;
3113 
3114 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3115 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3116 			/*
3117 			 * ok this guy is either ACK or MARKED. If it is
3118 			 * ACKED it has been previously acked but not this
3119 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3120 			 * again.
3121 			 */
3122 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3123 				break;
3124 			}
3125 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3126 				/* it has been revoked */
3127 				tp1->sent = SCTP_DATAGRAM_SENT;
3128 				tp1->rec.data.chunk_was_revoked = 1;
3129 				/*
3130 				 * We must add this stuff back in to assure
3131 				 * timers and such get started.
3132 				 */
3133 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3134 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3135 					    tp1->whoTo->flight_size,
3136 					    tp1->book_size,
3137 					    (uintptr_t) tp1->whoTo,
3138 					    tp1->rec.data.TSN_seq);
3139 				}
3140 				sctp_flight_size_increase(tp1);
3141 				sctp_total_flight_increase(stcb, tp1);
3142 				/*
3143 				 * We inflate the cwnd to compensate for our
3144 				 * artificial inflation of the flight_size.
3145 				 */
3146 				tp1->whoTo->cwnd += tp1->book_size;
3147 				tot_revoked++;
3148 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3149 					sctp_log_sack(asoc->last_acked_seq,
3150 					    cumack,
3151 					    tp1->rec.data.TSN_seq,
3152 					    0,
3153 					    0,
3154 					    SCTP_LOG_TSN_REVOKED);
3155 				}
3156 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3157 				/* it has been re-acked in this SACK */
3158 				tp1->sent = SCTP_DATAGRAM_ACKED;
3159 			}
3160 		}
3161 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3162 			break;
3163 	}
3164 }
3165 
3166 
3167 static void
3168 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3169     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3170 {
3171 	struct sctp_tmit_chunk *tp1;
3172 	int strike_flag = 0;
3173 	struct timeval now;
3174 	int tot_retrans = 0;
3175 	uint32_t sending_seq;
3176 	struct sctp_nets *net;
3177 	int num_dests_sacked = 0;
3178 
3179 	/*
3180 	 * select the sending_seq, this is either the next thing ready to be
3181 	 * sent but not transmitted, OR, the next seq we assign.
3182 	 */
3183 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3184 	if (tp1 == NULL) {
3185 		sending_seq = asoc->sending_seq;
3186 	} else {
3187 		sending_seq = tp1->rec.data.TSN_seq;
3188 	}
3189 
3190 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3191 	if ((asoc->sctp_cmt_on_off > 0) &&
3192 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3193 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3194 			if (net->saw_newack)
3195 				num_dests_sacked++;
3196 		}
3197 	}
3198 	if (stcb->asoc.peer_supports_prsctp) {
3199 		(void)SCTP_GETTIME_TIMEVAL(&now);
3200 	}
3201 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3202 		strike_flag = 0;
3203 		if (tp1->no_fr_allowed) {
3204 			/* this one had a timeout or something */
3205 			continue;
3206 		}
3207 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3208 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3209 				sctp_log_fr(biggest_tsn_newly_acked,
3210 				    tp1->rec.data.TSN_seq,
3211 				    tp1->sent,
3212 				    SCTP_FR_LOG_CHECK_STRIKE);
3213 		}
3214 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3215 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3216 			/* done */
3217 			break;
3218 		}
3219 		if (stcb->asoc.peer_supports_prsctp) {
3220 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3221 				/* Is it expired? */
3222 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3223 					/* Yes so drop it */
3224 					if (tp1->data != NULL) {
3225 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3226 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3227 						    SCTP_SO_NOT_LOCKED);
3228 					}
3229 					continue;
3230 				}
3231 			}
3232 		}
3233 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3234 			/* we are beyond the tsn in the sack  */
3235 			break;
3236 		}
3237 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3238 			/* either a RESEND, ACKED, or MARKED */
3239 			/* skip */
3240 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3241 				/* Continue strikin FWD-TSN chunks */
3242 				tp1->rec.data.fwd_tsn_cnt++;
3243 			}
3244 			continue;
3245 		}
3246 		/*
3247 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3248 		 */
3249 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3250 			/*
3251 			 * No new acks were receieved for data sent to this
3252 			 * dest. Therefore, according to the SFR algo for
3253 			 * CMT, no data sent to this dest can be marked for
3254 			 * FR using this SACK.
3255 			 */
3256 			continue;
3257 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3258 		    tp1->whoTo->this_sack_highest_newack)) {
3259 			/*
3260 			 * CMT: New acks were receieved for data sent to
3261 			 * this dest. But no new acks were seen for data
3262 			 * sent after tp1. Therefore, according to the SFR
3263 			 * algo for CMT, tp1 cannot be marked for FR using
3264 			 * this SACK. This step covers part of the DAC algo
3265 			 * and the HTNA algo as well.
3266 			 */
3267 			continue;
3268 		}
3269 		/*
3270 		 * Here we check to see if we were have already done a FR
3271 		 * and if so we see if the biggest TSN we saw in the sack is
3272 		 * smaller than the recovery point. If so we don't strike
3273 		 * the tsn... otherwise we CAN strike the TSN.
3274 		 */
3275 		/*
3276 		 * @@@ JRI: Check for CMT if (accum_moved &&
3277 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3278 		 * 0)) {
3279 		 */
3280 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3281 			/*
3282 			 * Strike the TSN if in fast-recovery and cum-ack
3283 			 * moved.
3284 			 */
3285 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3286 				sctp_log_fr(biggest_tsn_newly_acked,
3287 				    tp1->rec.data.TSN_seq,
3288 				    tp1->sent,
3289 				    SCTP_FR_LOG_STRIKE_CHUNK);
3290 			}
3291 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3292 				tp1->sent++;
3293 			}
3294 			if ((asoc->sctp_cmt_on_off > 0) &&
3295 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3296 				/*
3297 				 * CMT DAC algorithm: If SACK flag is set to
3298 				 * 0, then lowest_newack test will not pass
3299 				 * because it would have been set to the
3300 				 * cumack earlier. If not already to be
3301 				 * rtx'd, If not a mixed sack and if tp1 is
3302 				 * not between two sacked TSNs, then mark by
3303 				 * one more. NOTE that we are marking by one
3304 				 * additional time since the SACK DAC flag
3305 				 * indicates that two packets have been
3306 				 * received after this missing TSN.
3307 				 */
3308 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3309 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3310 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3311 						sctp_log_fr(16 + num_dests_sacked,
3312 						    tp1->rec.data.TSN_seq,
3313 						    tp1->sent,
3314 						    SCTP_FR_LOG_STRIKE_CHUNK);
3315 					}
3316 					tp1->sent++;
3317 				}
3318 			}
3319 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3320 		    (asoc->sctp_cmt_on_off == 0)) {
3321 			/*
3322 			 * For those that have done a FR we must take
3323 			 * special consideration if we strike. I.e the
3324 			 * biggest_newly_acked must be higher than the
3325 			 * sending_seq at the time we did the FR.
3326 			 */
3327 			if (
3328 #ifdef SCTP_FR_TO_ALTERNATE
3329 			/*
3330 			 * If FR's go to new networks, then we must only do
3331 			 * this for singly homed asoc's. However if the FR's
3332 			 * go to the same network (Armando's work) then its
3333 			 * ok to FR multiple times.
3334 			 */
3335 			    (asoc->numnets < 2)
3336 #else
3337 			    (1)
3338 #endif
3339 			    ) {
3340 
3341 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3342 				    tp1->rec.data.fast_retran_tsn)) {
3343 					/*
3344 					 * Strike the TSN, since this ack is
3345 					 * beyond where things were when we
3346 					 * did a FR.
3347 					 */
3348 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3349 						sctp_log_fr(biggest_tsn_newly_acked,
3350 						    tp1->rec.data.TSN_seq,
3351 						    tp1->sent,
3352 						    SCTP_FR_LOG_STRIKE_CHUNK);
3353 					}
3354 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3355 						tp1->sent++;
3356 					}
3357 					strike_flag = 1;
3358 					if ((asoc->sctp_cmt_on_off > 0) &&
3359 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3360 						/*
3361 						 * CMT DAC algorithm: If
3362 						 * SACK flag is set to 0,
3363 						 * then lowest_newack test
3364 						 * will not pass because it
3365 						 * would have been set to
3366 						 * the cumack earlier. If
3367 						 * not already to be rtx'd,
3368 						 * If not a mixed sack and
3369 						 * if tp1 is not between two
3370 						 * sacked TSNs, then mark by
3371 						 * one more. NOTE that we
3372 						 * are marking by one
3373 						 * additional time since the
3374 						 * SACK DAC flag indicates
3375 						 * that two packets have
3376 						 * been received after this
3377 						 * missing TSN.
3378 						 */
3379 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3380 						    (num_dests_sacked == 1) &&
3381 						    SCTP_TSN_GT(this_sack_lowest_newack,
3382 						    tp1->rec.data.TSN_seq)) {
3383 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3384 								sctp_log_fr(32 + num_dests_sacked,
3385 								    tp1->rec.data.TSN_seq,
3386 								    tp1->sent,
3387 								    SCTP_FR_LOG_STRIKE_CHUNK);
3388 							}
3389 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3390 								tp1->sent++;
3391 							}
3392 						}
3393 					}
3394 				}
3395 			}
3396 			/*
3397 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3398 			 * algo covers HTNA.
3399 			 */
3400 		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3401 		    biggest_tsn_newly_acked)) {
3402 			/*
3403 			 * We don't strike these: This is the  HTNA
3404 			 * algorithm i.e. we don't strike If our TSN is
3405 			 * larger than the Highest TSN Newly Acked.
3406 			 */
3407 			;
3408 		} else {
3409 			/* Strike the TSN */
3410 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3411 				sctp_log_fr(biggest_tsn_newly_acked,
3412 				    tp1->rec.data.TSN_seq,
3413 				    tp1->sent,
3414 				    SCTP_FR_LOG_STRIKE_CHUNK);
3415 			}
3416 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3417 				tp1->sent++;
3418 			}
3419 			if ((asoc->sctp_cmt_on_off > 0) &&
3420 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3421 				/*
3422 				 * CMT DAC algorithm: If SACK flag is set to
3423 				 * 0, then lowest_newack test will not pass
3424 				 * because it would have been set to the
3425 				 * cumack earlier. If not already to be
3426 				 * rtx'd, If not a mixed sack and if tp1 is
3427 				 * not between two sacked TSNs, then mark by
3428 				 * one more. NOTE that we are marking by one
3429 				 * additional time since the SACK DAC flag
3430 				 * indicates that two packets have been
3431 				 * received after this missing TSN.
3432 				 */
3433 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3434 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3435 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3436 						sctp_log_fr(48 + num_dests_sacked,
3437 						    tp1->rec.data.TSN_seq,
3438 						    tp1->sent,
3439 						    SCTP_FR_LOG_STRIKE_CHUNK);
3440 					}
3441 					tp1->sent++;
3442 				}
3443 			}
3444 		}
3445 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3446 			struct sctp_nets *alt;
3447 
3448 			/* fix counts and things */
3449 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3450 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3451 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3452 				    tp1->book_size,
3453 				    (uintptr_t) tp1->whoTo,
3454 				    tp1->rec.data.TSN_seq);
3455 			}
3456 			if (tp1->whoTo) {
3457 				tp1->whoTo->net_ack++;
3458 				sctp_flight_size_decrease(tp1);
3459 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3460 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3461 					    tp1);
3462 				}
3463 			}
3464 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3465 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3466 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3467 			}
3468 			/* add back to the rwnd */
3469 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3470 
3471 			/* remove from the total flight */
3472 			sctp_total_flight_decrease(stcb, tp1);
3473 
3474 			if ((stcb->asoc.peer_supports_prsctp) &&
3475 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3476 				/*
3477 				 * Has it been retransmitted tv_sec times? -
3478 				 * we store the retran count there.
3479 				 */
3480 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3481 					/* Yes, so drop it */
3482 					if (tp1->data != NULL) {
3483 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3484 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3485 						    SCTP_SO_NOT_LOCKED);
3486 					}
3487 					/* Make sure to flag we had a FR */
3488 					tp1->whoTo->net_ack++;
3489 					continue;
3490 				}
3491 			}
3492 			/* printf("OK, we are now ready to FR this guy\n"); */
3493 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3494 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3495 				    0, SCTP_FR_MARKED);
3496 			}
3497 			if (strike_flag) {
3498 				/* This is a subsequent FR */
3499 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3500 			}
3501 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3502 			if (asoc->sctp_cmt_on_off > 0) {
3503 				/*
3504 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3505 				 * If CMT is being used, then pick dest with
3506 				 * largest ssthresh for any retransmission.
3507 				 */
3508 				tp1->no_fr_allowed = 1;
3509 				alt = tp1->whoTo;
3510 				/* sa_ignore NO_NULL_CHK */
3511 				if (asoc->sctp_cmt_pf > 0) {
3512 					/*
3513 					 * JRS 5/18/07 - If CMT PF is on,
3514 					 * use the PF version of
3515 					 * find_alt_net()
3516 					 */
3517 					alt = sctp_find_alternate_net(stcb, alt, 2);
3518 				} else {
3519 					/*
3520 					 * JRS 5/18/07 - If only CMT is on,
3521 					 * use the CMT version of
3522 					 * find_alt_net()
3523 					 */
3524 					/* sa_ignore NO_NULL_CHK */
3525 					alt = sctp_find_alternate_net(stcb, alt, 1);
3526 				}
3527 				if (alt == NULL) {
3528 					alt = tp1->whoTo;
3529 				}
3530 				/*
3531 				 * CUCv2: If a different dest is picked for
3532 				 * the retransmission, then new
3533 				 * (rtx-)pseudo_cumack needs to be tracked
3534 				 * for orig dest. Let CUCv2 track new (rtx-)
3535 				 * pseudo-cumack always.
3536 				 */
3537 				if (tp1->whoTo) {
3538 					tp1->whoTo->find_pseudo_cumack = 1;
3539 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3540 				}
3541 			} else {/* CMT is OFF */
3542 
3543 #ifdef SCTP_FR_TO_ALTERNATE
3544 				/* Can we find an alternate? */
3545 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3546 #else
3547 				/*
3548 				 * default behavior is to NOT retransmit
3549 				 * FR's to an alternate. Armando Caro's
3550 				 * paper details why.
3551 				 */
3552 				alt = tp1->whoTo;
3553 #endif
3554 			}
3555 
3556 			tp1->rec.data.doing_fast_retransmit = 1;
3557 			tot_retrans++;
3558 			/* mark the sending seq for possible subsequent FR's */
3559 			/*
3560 			 * printf("Marking TSN for FR new value %x\n",
3561 			 * (uint32_t)tpi->rec.data.TSN_seq);
3562 			 */
3563 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3564 				/*
3565 				 * If the queue of send is empty then its
3566 				 * the next sequence number that will be
3567 				 * assigned so we subtract one from this to
3568 				 * get the one we last sent.
3569 				 */
3570 				tp1->rec.data.fast_retran_tsn = sending_seq;
3571 			} else {
3572 				/*
3573 				 * If there are chunks on the send queue
3574 				 * (unsent data that has made it from the
3575 				 * stream queues but not out the door, we
3576 				 * take the first one (which will have the
3577 				 * lowest TSN) and subtract one to get the
3578 				 * one we last sent.
3579 				 */
3580 				struct sctp_tmit_chunk *ttt;
3581 
3582 				ttt = TAILQ_FIRST(&asoc->send_queue);
3583 				tp1->rec.data.fast_retran_tsn =
3584 				    ttt->rec.data.TSN_seq;
3585 			}
3586 
3587 			if (tp1->do_rtt) {
3588 				/*
3589 				 * this guy had a RTO calculation pending on
3590 				 * it, cancel it
3591 				 */
3592 				if ((tp1->whoTo != NULL) &&
3593 				    (tp1->whoTo->rto_needed == 0)) {
3594 					tp1->whoTo->rto_needed = 1;
3595 				}
3596 				tp1->do_rtt = 0;
3597 			}
3598 			if (alt != tp1->whoTo) {
3599 				/* yes, there is an alternate. */
3600 				sctp_free_remote_addr(tp1->whoTo);
3601 				/* sa_ignore FREED_MEMORY */
3602 				tp1->whoTo = alt;
3603 				atomic_add_int(&alt->ref_count, 1);
3604 			}
3605 		}
3606 	}
3607 }
3608 
3609 struct sctp_tmit_chunk *
3610 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3611     struct sctp_association *asoc)
3612 {
3613 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3614 	struct timeval now;
3615 	int now_filled = 0;
3616 
3617 	if (asoc->peer_supports_prsctp == 0) {
3618 		return (NULL);
3619 	}
3620 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3621 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3622 		    tp1->sent != SCTP_DATAGRAM_RESEND) {
3623 			/* no chance to advance, out of here */
3624 			break;
3625 		}
3626 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3627 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3628 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3629 				    asoc->advanced_peer_ack_point,
3630 				    tp1->rec.data.TSN_seq, 0, 0);
3631 			}
3632 		}
3633 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3634 			/*
3635 			 * We can't fwd-tsn past any that are reliable aka
3636 			 * retransmitted until the asoc fails.
3637 			 */
3638 			break;
3639 		}
3640 		if (!now_filled) {
3641 			(void)SCTP_GETTIME_TIMEVAL(&now);
3642 			now_filled = 1;
3643 		}
3644 		/*
3645 		 * now we got a chunk which is marked for another
3646 		 * retransmission to a PR-stream but has run out its chances
3647 		 * already maybe OR has been marked to skip now. Can we skip
3648 		 * it if its a resend?
3649 		 */
3650 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3651 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3652 			/*
3653 			 * Now is this one marked for resend and its time is
3654 			 * now up?
3655 			 */
3656 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3657 				/* Yes so drop it */
3658 				if (tp1->data) {
3659 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3660 					    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3661 					    SCTP_SO_NOT_LOCKED);
3662 				}
3663 			} else {
3664 				/*
3665 				 * No, we are done when hit one for resend
3666 				 * whos time as not expired.
3667 				 */
3668 				break;
3669 			}
3670 		}
3671 		/*
3672 		 * Ok now if this chunk is marked to drop it we can clean up
3673 		 * the chunk, advance our peer ack point and we can check
3674 		 * the next chunk.
3675 		 */
3676 		if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3677 			/* advance PeerAckPoint goes forward */
3678 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3679 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3680 				a_adv = tp1;
3681 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3682 				/* No update but we do save the chk */
3683 				a_adv = tp1;
3684 			}
3685 		} else {
3686 			/*
3687 			 * If it is still in RESEND we can advance no
3688 			 * further
3689 			 */
3690 			break;
3691 		}
3692 	}
3693 	return (a_adv);
3694 }
3695 
3696 static int
3697 sctp_fs_audit(struct sctp_association *asoc)
3698 {
3699 	struct sctp_tmit_chunk *chk;
3700 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3701 	int entry_flight, entry_cnt, ret;
3702 
3703 	entry_flight = asoc->total_flight;
3704 	entry_cnt = asoc->total_flight_count;
3705 	ret = 0;
3706 
3707 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3708 		return (0);
3709 
3710 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3711 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3712 			printf("Chk TSN:%u size:%d inflight cnt:%d\n",
3713 			    chk->rec.data.TSN_seq,
3714 			    chk->send_size,
3715 			    chk->snd_count
3716 			    );
3717 			inflight++;
3718 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3719 			resend++;
3720 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3721 			inbetween++;
3722 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3723 			above++;
3724 		} else {
3725 			acked++;
3726 		}
3727 	}
3728 
3729 	if ((inflight > 0) || (inbetween > 0)) {
3730 #ifdef INVARIANTS
3731 		panic("Flight size-express incorrect? \n");
3732 #else
3733 		printf("asoc->total_flight:%d cnt:%d\n",
3734 		    entry_flight, entry_cnt);
3735 
3736 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3737 		    inflight, inbetween, resend, above, acked);
3738 		ret = 1;
3739 #endif
3740 	}
3741 	return (ret);
3742 }
3743 
3744 
3745 static void
3746 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3747     struct sctp_association *asoc,
3748     struct sctp_tmit_chunk *tp1)
3749 {
3750 	tp1->window_probe = 0;
3751 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3752 		/* TSN's skipped we do NOT move back. */
3753 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3754 		    tp1->whoTo->flight_size,
3755 		    tp1->book_size,
3756 		    (uintptr_t) tp1->whoTo,
3757 		    tp1->rec.data.TSN_seq);
3758 		return;
3759 	}
3760 	/* First setup this by shrinking flight */
3761 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3762 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3763 		    tp1);
3764 	}
3765 	sctp_flight_size_decrease(tp1);
3766 	sctp_total_flight_decrease(stcb, tp1);
3767 	/* Now mark for resend */
3768 	tp1->sent = SCTP_DATAGRAM_RESEND;
3769 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3770 
3771 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3772 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3773 		    tp1->whoTo->flight_size,
3774 		    tp1->book_size,
3775 		    (uintptr_t) tp1->whoTo,
3776 		    tp1->rec.data.TSN_seq);
3777 	}
3778 }
3779 
3780 void
3781 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3782     uint32_t rwnd, int *abort_now, int ecne_seen)
3783 {
3784 	struct sctp_nets *net;
3785 	struct sctp_association *asoc;
3786 	struct sctp_tmit_chunk *tp1, *tp2;
3787 	uint32_t old_rwnd;
3788 	int win_probe_recovery = 0;
3789 	int win_probe_recovered = 0;
3790 	int j, done_once = 0;
3791 	int rto_ok = 1;
3792 
3793 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3794 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3795 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3796 	}
3797 	SCTP_TCB_LOCK_ASSERT(stcb);
3798 #ifdef SCTP_ASOCLOG_OF_TSNS
3799 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3800 	stcb->asoc.cumack_log_at++;
3801 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3802 		stcb->asoc.cumack_log_at = 0;
3803 	}
3804 #endif
3805 	asoc = &stcb->asoc;
3806 	old_rwnd = asoc->peers_rwnd;
3807 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3808 		/* old ack */
3809 		return;
3810 	} else if (asoc->last_acked_seq == cumack) {
3811 		/* Window update sack */
3812 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3813 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3814 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3815 			/* SWS sender side engages */
3816 			asoc->peers_rwnd = 0;
3817 		}
3818 		if (asoc->peers_rwnd > old_rwnd) {
3819 			goto again;
3820 		}
3821 		return;
3822 	}
3823 	/* First setup for CC stuff */
3824 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3825 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3826 			/* Drag along the window_tsn for cwr's */
3827 			net->cwr_window_tsn = cumack;
3828 		}
3829 		net->prev_cwnd = net->cwnd;
3830 		net->net_ack = 0;
3831 		net->net_ack2 = 0;
3832 
3833 		/*
3834 		 * CMT: Reset CUC and Fast recovery algo variables before
3835 		 * SACK processing
3836 		 */
3837 		net->new_pseudo_cumack = 0;
3838 		net->will_exit_fast_recovery = 0;
3839 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3840 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3841 		}
3842 	}
3843 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3844 		uint32_t send_s;
3845 
3846 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3847 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3848 			    sctpchunk_listhead);
3849 			send_s = tp1->rec.data.TSN_seq + 1;
3850 		} else {
3851 			send_s = asoc->sending_seq;
3852 		}
3853 		if (SCTP_TSN_GE(cumack, send_s)) {
3854 #ifndef INVARIANTS
3855 			struct mbuf *oper;
3856 
3857 #endif
3858 #ifdef INVARIANTS
3859 			panic("Impossible sack 1");
3860 #else
3861 
3862 			*abort_now = 1;
3863 			/* XXX */
3864 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3865 			    0, M_DONTWAIT, 1, MT_DATA);
3866 			if (oper) {
3867 				struct sctp_paramhdr *ph;
3868 				uint32_t *ippp;
3869 
3870 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3871 				    sizeof(uint32_t);
3872 				ph = mtod(oper, struct sctp_paramhdr *);
3873 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3874 				ph->param_length = htons(SCTP_BUF_LEN(oper));
3875 				ippp = (uint32_t *) (ph + 1);
3876 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3877 			}
3878 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3879 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
3880 			return;
3881 #endif
3882 		}
3883 	}
3884 	asoc->this_sack_highest_gap = cumack;
3885 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3886 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3887 		    stcb->asoc.overall_error_count,
3888 		    0,
3889 		    SCTP_FROM_SCTP_INDATA,
3890 		    __LINE__);
3891 	}
3892 	stcb->asoc.overall_error_count = 0;
3893 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3894 		/* process the new consecutive TSN first */
3895 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3896 			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3897 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3898 					printf("Warning, an unsent is now acked?\n");
3899 				}
3900 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3901 					/*
3902 					 * If it is less than ACKED, it is
3903 					 * now no-longer in flight. Higher
3904 					 * values may occur during marking
3905 					 */
3906 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3907 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3908 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3909 							    tp1->whoTo->flight_size,
3910 							    tp1->book_size,
3911 							    (uintptr_t) tp1->whoTo,
3912 							    tp1->rec.data.TSN_seq);
3913 						}
3914 						sctp_flight_size_decrease(tp1);
3915 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3916 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3917 							    tp1);
3918 						}
3919 						/* sa_ignore NO_NULL_CHK */
3920 						sctp_total_flight_decrease(stcb, tp1);
3921 					}
3922 					tp1->whoTo->net_ack += tp1->send_size;
3923 					if (tp1->snd_count < 2) {
3924 						/*
3925 						 * True non-retransmited
3926 						 * chunk
3927 						 */
3928 						tp1->whoTo->net_ack2 +=
3929 						    tp1->send_size;
3930 
3931 						/* update RTO too? */
3932 						if (tp1->do_rtt) {
3933 							if (rto_ok) {
3934 								tp1->whoTo->RTO =
3935 								/*
3936 								 * sa_ignore
3937 								 * NO_NULL_CH
3938 								 * K
3939 								 */
3940 								    sctp_calculate_rto(stcb,
3941 								    asoc, tp1->whoTo,
3942 								    &tp1->sent_rcv_time,
3943 								    sctp_align_safe_nocopy,
3944 								    SCTP_RTT_FROM_DATA);
3945 								rto_ok = 0;
3946 							}
3947 							if (tp1->whoTo->rto_needed == 0) {
3948 								tp1->whoTo->rto_needed = 1;
3949 							}
3950 							tp1->do_rtt = 0;
3951 						}
3952 					}
3953 					/*
3954 					 * CMT: CUCv2 algorithm. From the
3955 					 * cumack'd TSNs, for each TSN being
3956 					 * acked for the first time, set the
3957 					 * following variables for the
3958 					 * corresp destination.
3959 					 * new_pseudo_cumack will trigger a
3960 					 * cwnd update.
3961 					 * find_(rtx_)pseudo_cumack will
3962 					 * trigger search for the next
3963 					 * expected (rtx-)pseudo-cumack.
3964 					 */
3965 					tp1->whoTo->new_pseudo_cumack = 1;
3966 					tp1->whoTo->find_pseudo_cumack = 1;
3967 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3968 
3969 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3970 						/* sa_ignore NO_NULL_CHK */
3971 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3972 					}
3973 				}
3974 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3975 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3976 				}
3977 				if (tp1->rec.data.chunk_was_revoked) {
3978 					/* deflate the cwnd */
3979 					tp1->whoTo->cwnd -= tp1->book_size;
3980 					tp1->rec.data.chunk_was_revoked = 0;
3981 				}
3982 				tp1->sent = SCTP_DATAGRAM_ACKED;
3983 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3984 				if (tp1->data) {
3985 					/* sa_ignore NO_NULL_CHK */
3986 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3987 					sctp_m_freem(tp1->data);
3988 					tp1->data = NULL;
3989 				}
3990 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3991 					sctp_log_sack(asoc->last_acked_seq,
3992 					    cumack,
3993 					    tp1->rec.data.TSN_seq,
3994 					    0,
3995 					    0,
3996 					    SCTP_LOG_FREE_SENT);
3997 				}
3998 				asoc->sent_queue_cnt--;
3999 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4000 			} else {
4001 				break;
4002 			}
4003 		}
4004 
4005 	}
4006 	/* sa_ignore NO_NULL_CHK */
4007 	if (stcb->sctp_socket) {
4008 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4009 		struct socket *so;
4010 
4011 #endif
4012 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4013 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4014 			/* sa_ignore NO_NULL_CHK */
4015 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4016 		}
4017 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4018 		so = SCTP_INP_SO(stcb->sctp_ep);
4019 		atomic_add_int(&stcb->asoc.refcnt, 1);
4020 		SCTP_TCB_UNLOCK(stcb);
4021 		SCTP_SOCKET_LOCK(so, 1);
4022 		SCTP_TCB_LOCK(stcb);
4023 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4024 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4025 			/* assoc was freed while we were unlocked */
4026 			SCTP_SOCKET_UNLOCK(so, 1);
4027 			return;
4028 		}
4029 #endif
4030 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4031 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4032 		SCTP_SOCKET_UNLOCK(so, 1);
4033 #endif
4034 	} else {
4035 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4036 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4037 		}
4038 	}
4039 
4040 	/* JRS - Use the congestion control given in the CC module */
4041 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4042 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4043 			if (net->net_ack2 > 0) {
4044 				/*
4045 				 * Karn's rule applies to clearing error
4046 				 * count, this is optional.
4047 				 */
4048 				net->error_count = 0;
4049 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4050 					/* addr came good */
4051 					net->dest_state |= SCTP_ADDR_REACHABLE;
4052 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4053 					    SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
4054 				}
4055 				if (net == stcb->asoc.primary_destination) {
4056 					if (stcb->asoc.alternate) {
4057 						/*
4058 						 * release the alternate,
4059 						 * primary is good
4060 						 */
4061 						sctp_free_remote_addr(stcb->asoc.alternate);
4062 						stcb->asoc.alternate = NULL;
4063 					}
4064 				}
4065 				if (net->dest_state & SCTP_ADDR_PF) {
4066 					net->dest_state &= ~SCTP_ADDR_PF;
4067 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4068 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4069 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4070 					/* Done with this net */
4071 					net->net_ack = 0;
4072 				}
4073 				/* restore any doubled timers */
4074 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4075 				if (net->RTO < stcb->asoc.minrto) {
4076 					net->RTO = stcb->asoc.minrto;
4077 				}
4078 				if (net->RTO > stcb->asoc.maxrto) {
4079 					net->RTO = stcb->asoc.maxrto;
4080 				}
4081 			}
4082 		}
4083 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4084 	}
4085 	asoc->last_acked_seq = cumack;
4086 
4087 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4088 		/* nothing left in-flight */
4089 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4090 			net->flight_size = 0;
4091 			net->partial_bytes_acked = 0;
4092 		}
4093 		asoc->total_flight = 0;
4094 		asoc->total_flight_count = 0;
4095 	}
4096 	/* RWND update */
4097 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4098 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4099 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4100 		/* SWS sender side engages */
4101 		asoc->peers_rwnd = 0;
4102 	}
4103 	if (asoc->peers_rwnd > old_rwnd) {
4104 		win_probe_recovery = 1;
4105 	}
4106 	/* Now assure a timer where data is queued at */
4107 again:
4108 	j = 0;
4109 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4110 		int to_ticks;
4111 
4112 		if (win_probe_recovery && (net->window_probe)) {
4113 			win_probe_recovered = 1;
4114 			/*
4115 			 * Find first chunk that was used with window probe
4116 			 * and clear the sent
4117 			 */
4118 			/* sa_ignore FREED_MEMORY */
4119 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4120 				if (tp1->window_probe) {
4121 					/* move back to data send queue */
4122 					sctp_window_probe_recovery(stcb, asoc, tp1);
4123 					break;
4124 				}
4125 			}
4126 		}
4127 		if (net->RTO == 0) {
4128 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4129 		} else {
4130 			to_ticks = MSEC_TO_TICKS(net->RTO);
4131 		}
4132 		if (net->flight_size) {
4133 			j++;
4134 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4135 			    sctp_timeout_handler, &net->rxt_timer);
4136 			if (net->window_probe) {
4137 				net->window_probe = 0;
4138 			}
4139 		} else {
4140 			if (net->window_probe) {
4141 				/*
4142 				 * In window probes we must assure a timer
4143 				 * is still running there
4144 				 */
4145 				net->window_probe = 0;
4146 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4147 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4148 					    sctp_timeout_handler, &net->rxt_timer);
4149 				}
4150 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4151 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4152 				    stcb, net,
4153 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4154 			}
4155 		}
4156 	}
4157 	if ((j == 0) &&
4158 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4159 	    (asoc->sent_queue_retran_cnt == 0) &&
4160 	    (win_probe_recovered == 0) &&
4161 	    (done_once == 0)) {
4162 		/*
4163 		 * huh, this should not happen unless all packets are
4164 		 * PR-SCTP and marked to skip of course.
4165 		 */
4166 		if (sctp_fs_audit(asoc)) {
4167 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4168 				net->flight_size = 0;
4169 			}
4170 			asoc->total_flight = 0;
4171 			asoc->total_flight_count = 0;
4172 			asoc->sent_queue_retran_cnt = 0;
4173 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4174 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4175 					sctp_flight_size_increase(tp1);
4176 					sctp_total_flight_increase(stcb, tp1);
4177 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4178 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4179 				}
4180 			}
4181 		}
4182 		done_once = 1;
4183 		goto again;
4184 	}
4185 	/**********************************/
4186 	/* Now what about shutdown issues */
4187 	/**********************************/
4188 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4189 		/* nothing left on sendqueue.. consider done */
4190 		/* clean up */
4191 		if ((asoc->stream_queue_cnt == 1) &&
4192 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4193 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4194 		    (asoc->locked_on_sending)
4195 		    ) {
4196 			struct sctp_stream_queue_pending *sp;
4197 
4198 			/*
4199 			 * I may be in a state where we got all across.. but
4200 			 * cannot write more due to a shutdown... we abort
4201 			 * since the user did not indicate EOR in this case.
4202 			 * The sp will be cleaned during free of the asoc.
4203 			 */
4204 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4205 			    sctp_streamhead);
4206 			if ((sp) && (sp->length == 0)) {
4207 				/* Let cleanup code purge it */
4208 				if (sp->msg_is_complete) {
4209 					asoc->stream_queue_cnt--;
4210 				} else {
4211 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4212 					asoc->locked_on_sending = NULL;
4213 					asoc->stream_queue_cnt--;
4214 				}
4215 			}
4216 		}
4217 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4218 		    (asoc->stream_queue_cnt == 0)) {
4219 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4220 				/* Need to abort here */
4221 				struct mbuf *oper;
4222 
4223 		abort_out_now:
4224 				*abort_now = 1;
4225 				/* XXX */
4226 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4227 				    0, M_DONTWAIT, 1, MT_DATA);
4228 				if (oper) {
4229 					struct sctp_paramhdr *ph;
4230 					uint32_t *ippp;
4231 
4232 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4233 					    sizeof(uint32_t);
4234 					ph = mtod(oper, struct sctp_paramhdr *);
4235 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4236 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4237 					ippp = (uint32_t *) (ph + 1);
4238 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4239 				}
4240 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4241 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4242 			} else {
4243 				struct sctp_nets *netp;
4244 
4245 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4246 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4247 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4248 				}
4249 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4250 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4251 				sctp_stop_timers_for_shutdown(stcb);
4252 				if (asoc->alternate) {
4253 					netp = asoc->alternate;
4254 				} else {
4255 					netp = asoc->primary_destination;
4256 				}
4257 				sctp_send_shutdown(stcb, netp);
4258 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4259 				    stcb->sctp_ep, stcb, netp);
4260 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4261 				    stcb->sctp_ep, stcb, netp);
4262 			}
4263 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4264 		    (asoc->stream_queue_cnt == 0)) {
4265 			struct sctp_nets *netp;
4266 
4267 			if (asoc->alternate) {
4268 				netp = asoc->alternate;
4269 			} else {
4270 				netp = asoc->primary_destination;
4271 			}
4272 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4273 				goto abort_out_now;
4274 			}
4275 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4276 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4277 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4278 			sctp_send_shutdown_ack(stcb, netp);
4279 			sctp_stop_timers_for_shutdown(stcb);
4280 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4281 			    stcb->sctp_ep, stcb, netp);
4282 		}
4283 	}
4284 	/*********************************************/
4285 	/* Here we perform PR-SCTP procedures        */
4286 	/* (section 4.2)                             */
4287 	/*********************************************/
4288 	/* C1. update advancedPeerAckPoint */
4289 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4290 		asoc->advanced_peer_ack_point = cumack;
4291 	}
4292 	/* PR-Sctp issues need to be addressed too */
4293 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4294 		struct sctp_tmit_chunk *lchk;
4295 		uint32_t old_adv_peer_ack_point;
4296 
4297 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4298 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4299 		/* C3. See if we need to send a Fwd-TSN */
4300 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4301 			/*
4302 			 * ISSUE with ECN, see FWD-TSN processing.
4303 			 */
4304 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4305 				send_forward_tsn(stcb, asoc);
4306 			} else if (lchk) {
4307 				/* try to FR fwd-tsn's that get lost too */
4308 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4309 					send_forward_tsn(stcb, asoc);
4310 				}
4311 			}
4312 		}
4313 		if (lchk) {
4314 			/* Assure a timer is up */
4315 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4316 			    stcb->sctp_ep, stcb, lchk->whoTo);
4317 		}
4318 	}
4319 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4320 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4321 		    rwnd,
4322 		    stcb->asoc.peers_rwnd,
4323 		    stcb->asoc.total_flight,
4324 		    stcb->asoc.total_output_queue_size);
4325 	}
4326 }
4327 
4328 void
4329 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4330     struct sctp_tcb *stcb,
4331     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4332     int *abort_now, uint8_t flags,
4333     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4334 {
4335 	struct sctp_association *asoc;
4336 	struct sctp_tmit_chunk *tp1, *tp2;
4337 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4338 	uint16_t wake_him = 0;
4339 	uint32_t send_s = 0;
4340 	long j;
4341 	int accum_moved = 0;
4342 	int will_exit_fast_recovery = 0;
4343 	uint32_t a_rwnd, old_rwnd;
4344 	int win_probe_recovery = 0;
4345 	int win_probe_recovered = 0;
4346 	struct sctp_nets *net = NULL;
4347 	int done_once;
4348 	int rto_ok = 1;
4349 	uint8_t reneged_all = 0;
4350 	uint8_t cmt_dac_flag;
4351 
4352 	/*
4353 	 * we take any chance we can to service our queues since we cannot
4354 	 * get awoken when the socket is read from :<
4355 	 */
4356 	/*
4357 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4358 	 * old sack, if so discard. 2) If there is nothing left in the send
4359 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4360 	 * too, update any rwnd change and verify no timers are running.
4361 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4362 	 * moved process these first and note that it moved. 4) Process any
4363 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4364 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4365 	 * sync up flightsizes and things, stop all timers and also check
4366 	 * for shutdown_pending state. If so then go ahead and send off the
4367 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4368 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4369 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4370 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4371 	 * if in shutdown_recv state.
4372 	 */
4373 	SCTP_TCB_LOCK_ASSERT(stcb);
4374 	/* CMT DAC algo */
4375 	this_sack_lowest_newack = 0;
4376 	SCTP_STAT_INCR(sctps_slowpath_sack);
4377 	last_tsn = cum_ack;
4378 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4379 #ifdef SCTP_ASOCLOG_OF_TSNS
4380 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4381 	stcb->asoc.cumack_log_at++;
4382 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4383 		stcb->asoc.cumack_log_at = 0;
4384 	}
4385 #endif
4386 	a_rwnd = rwnd;
4387 
4388 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4389 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4390 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4391 	}
4392 	old_rwnd = stcb->asoc.peers_rwnd;
4393 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4394 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4395 		    stcb->asoc.overall_error_count,
4396 		    0,
4397 		    SCTP_FROM_SCTP_INDATA,
4398 		    __LINE__);
4399 	}
4400 	stcb->asoc.overall_error_count = 0;
4401 	asoc = &stcb->asoc;
4402 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4403 		sctp_log_sack(asoc->last_acked_seq,
4404 		    cum_ack,
4405 		    0,
4406 		    num_seg,
4407 		    num_dup,
4408 		    SCTP_LOG_NEW_SACK);
4409 	}
4410 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4411 		uint16_t i;
4412 		uint32_t *dupdata, dblock;
4413 
4414 		for (i = 0; i < num_dup; i++) {
4415 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4416 			    sizeof(uint32_t), (uint8_t *) & dblock);
4417 			if (dupdata == NULL) {
4418 				break;
4419 			}
4420 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4421 		}
4422 	}
4423 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4424 		/* reality check */
4425 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4426 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4427 			    sctpchunk_listhead);
4428 			send_s = tp1->rec.data.TSN_seq + 1;
4429 		} else {
4430 			tp1 = NULL;
4431 			send_s = asoc->sending_seq;
4432 		}
4433 		if (SCTP_TSN_GE(cum_ack, send_s)) {
4434 			struct mbuf *oper;
4435 
4436 			/*
4437 			 * no way, we have not even sent this TSN out yet.
4438 			 * Peer is hopelessly messed up with us.
4439 			 */
4440 			printf("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4441 			    cum_ack, send_s);
4442 			if (tp1) {
4443 				printf("Got send_s from tsn:%x + 1 of tp1:%p\n",
4444 				    tp1->rec.data.TSN_seq, tp1);
4445 			}
4446 	hopeless_peer:
4447 			*abort_now = 1;
4448 			/* XXX */
4449 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4450 			    0, M_DONTWAIT, 1, MT_DATA);
4451 			if (oper) {
4452 				struct sctp_paramhdr *ph;
4453 				uint32_t *ippp;
4454 
4455 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4456 				    sizeof(uint32_t);
4457 				ph = mtod(oper, struct sctp_paramhdr *);
4458 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4459 				ph->param_length = htons(SCTP_BUF_LEN(oper));
4460 				ippp = (uint32_t *) (ph + 1);
4461 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4462 			}
4463 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4464 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4465 			return;
4466 		}
4467 	}
4468 	/**********************/
4469 	/* 1) check the range */
4470 	/**********************/
4471 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4472 		/* acking something behind */
4473 		return;
4474 	}
4475 	/* update the Rwnd of the peer */
4476 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4477 	    TAILQ_EMPTY(&asoc->send_queue) &&
4478 	    (asoc->stream_queue_cnt == 0)) {
4479 		/* nothing left on send/sent and strmq */
4480 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4481 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4482 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4483 		}
4484 		asoc->peers_rwnd = a_rwnd;
4485 		if (asoc->sent_queue_retran_cnt) {
4486 			asoc->sent_queue_retran_cnt = 0;
4487 		}
4488 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4489 			/* SWS sender side engages */
4490 			asoc->peers_rwnd = 0;
4491 		}
4492 		/* stop any timers */
4493 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4494 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4495 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4496 			net->partial_bytes_acked = 0;
4497 			net->flight_size = 0;
4498 		}
4499 		asoc->total_flight = 0;
4500 		asoc->total_flight_count = 0;
4501 		return;
4502 	}
4503 	/*
4504 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4505 	 * things. The total byte count acked is tracked in netAckSz AND
4506 	 * netAck2 is used to track the total bytes acked that are un-
4507 	 * amibguious and were never retransmitted. We track these on a per
4508 	 * destination address basis.
4509 	 */
4510 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4511 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4512 			/* Drag along the window_tsn for cwr's */
4513 			net->cwr_window_tsn = cum_ack;
4514 		}
4515 		net->prev_cwnd = net->cwnd;
4516 		net->net_ack = 0;
4517 		net->net_ack2 = 0;
4518 
4519 		/*
4520 		 * CMT: Reset CUC and Fast recovery algo variables before
4521 		 * SACK processing
4522 		 */
4523 		net->new_pseudo_cumack = 0;
4524 		net->will_exit_fast_recovery = 0;
4525 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4526 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4527 		}
4528 	}
4529 	/* process the new consecutive TSN first */
4530 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4531 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4532 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4533 				accum_moved = 1;
4534 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4535 					/*
4536 					 * If it is less than ACKED, it is
4537 					 * now no-longer in flight. Higher
4538 					 * values may occur during marking
4539 					 */
4540 					if ((tp1->whoTo->dest_state &
4541 					    SCTP_ADDR_UNCONFIRMED) &&
4542 					    (tp1->snd_count < 2)) {
4543 						/*
4544 						 * If there was no retran
4545 						 * and the address is
4546 						 * un-confirmed and we sent
4547 						 * there and are now
4548 						 * sacked.. its confirmed,
4549 						 * mark it so.
4550 						 */
4551 						tp1->whoTo->dest_state &=
4552 						    ~SCTP_ADDR_UNCONFIRMED;
4553 					}
4554 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4555 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4556 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4557 							    tp1->whoTo->flight_size,
4558 							    tp1->book_size,
4559 							    (uintptr_t) tp1->whoTo,
4560 							    tp1->rec.data.TSN_seq);
4561 						}
4562 						sctp_flight_size_decrease(tp1);
4563 						sctp_total_flight_decrease(stcb, tp1);
4564 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4565 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4566 							    tp1);
4567 						}
4568 					}
4569 					tp1->whoTo->net_ack += tp1->send_size;
4570 
4571 					/* CMT SFR and DAC algos */
4572 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4573 					tp1->whoTo->saw_newack = 1;
4574 
4575 					if (tp1->snd_count < 2) {
4576 						/*
4577 						 * True non-retransmited
4578 						 * chunk
4579 						 */
4580 						tp1->whoTo->net_ack2 +=
4581 						    tp1->send_size;
4582 
4583 						/* update RTO too? */
4584 						if (tp1->do_rtt) {
4585 							if (rto_ok) {
4586 								tp1->whoTo->RTO =
4587 								    sctp_calculate_rto(stcb,
4588 								    asoc, tp1->whoTo,
4589 								    &tp1->sent_rcv_time,
4590 								    sctp_align_safe_nocopy,
4591 								    SCTP_RTT_FROM_DATA);
4592 								rto_ok = 0;
4593 							}
4594 							if (tp1->whoTo->rto_needed == 0) {
4595 								tp1->whoTo->rto_needed = 1;
4596 							}
4597 							tp1->do_rtt = 0;
4598 						}
4599 					}
4600 					/*
4601 					 * CMT: CUCv2 algorithm. From the
4602 					 * cumack'd TSNs, for each TSN being
4603 					 * acked for the first time, set the
4604 					 * following variables for the
4605 					 * corresp destination.
4606 					 * new_pseudo_cumack will trigger a
4607 					 * cwnd update.
4608 					 * find_(rtx_)pseudo_cumack will
4609 					 * trigger search for the next
4610 					 * expected (rtx-)pseudo-cumack.
4611 					 */
4612 					tp1->whoTo->new_pseudo_cumack = 1;
4613 					tp1->whoTo->find_pseudo_cumack = 1;
4614 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4615 
4616 
4617 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4618 						sctp_log_sack(asoc->last_acked_seq,
4619 						    cum_ack,
4620 						    tp1->rec.data.TSN_seq,
4621 						    0,
4622 						    0,
4623 						    SCTP_LOG_TSN_ACKED);
4624 					}
4625 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4626 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4627 					}
4628 				}
4629 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4630 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4631 #ifdef SCTP_AUDITING_ENABLED
4632 					sctp_audit_log(0xB3,
4633 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4634 #endif
4635 				}
4636 				if (tp1->rec.data.chunk_was_revoked) {
4637 					/* deflate the cwnd */
4638 					tp1->whoTo->cwnd -= tp1->book_size;
4639 					tp1->rec.data.chunk_was_revoked = 0;
4640 				}
4641 				tp1->sent = SCTP_DATAGRAM_ACKED;
4642 			}
4643 		} else {
4644 			break;
4645 		}
4646 	}
4647 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4648 	/* always set this up to cum-ack */
4649 	asoc->this_sack_highest_gap = last_tsn;
4650 
4651 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4652 
4653 		/*
4654 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4655 		 * to be greater than the cumack. Also reset saw_newack to 0
4656 		 * for all dests.
4657 		 */
4658 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4659 			net->saw_newack = 0;
4660 			net->this_sack_highest_newack = last_tsn;
4661 		}
4662 
4663 		/*
4664 		 * thisSackHighestGap will increase while handling NEW
4665 		 * segments this_sack_highest_newack will increase while
4666 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4667 		 * used for CMT DAC algo. saw_newack will also change.
4668 		 */
4669 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4670 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4671 		    num_seg, num_nr_seg, &rto_ok)) {
4672 			wake_him++;
4673 		}
4674 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4675 			/*
4676 			 * validate the biggest_tsn_acked in the gap acks if
4677 			 * strict adherence is wanted.
4678 			 */
4679 			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4680 				/*
4681 				 * peer is either confused or we are under
4682 				 * attack. We must abort.
4683 				 */
4684 				printf("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4685 				    biggest_tsn_acked,
4686 				    send_s);
4687 
4688 				goto hopeless_peer;
4689 			}
4690 		}
4691 	}
4692 	/*******************************************/
4693 	/* cancel ALL T3-send timer if accum moved */
4694 	/*******************************************/
4695 	if (asoc->sctp_cmt_on_off > 0) {
4696 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4697 			if (net->new_pseudo_cumack)
4698 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4699 				    stcb, net,
4700 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4701 
4702 		}
4703 	} else {
4704 		if (accum_moved) {
4705 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4706 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4707 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4708 			}
4709 		}
4710 	}
4711 	/********************************************/
4712 	/* drop the acked chunks from the sentqueue */
4713 	/********************************************/
4714 	asoc->last_acked_seq = cum_ack;
4715 
4716 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4717 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4718 			break;
4719 		}
4720 		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4721 			/* no more sent on list */
4722 			printf("Warning, tp1->sent == %d and its now acked?\n",
4723 			    tp1->sent);
4724 		}
4725 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4726 		if (tp1->pr_sctp_on) {
4727 			if (asoc->pr_sctp_cnt != 0)
4728 				asoc->pr_sctp_cnt--;
4729 		}
4730 		asoc->sent_queue_cnt--;
4731 		if (tp1->data) {
4732 			/* sa_ignore NO_NULL_CHK */
4733 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4734 			sctp_m_freem(tp1->data);
4735 			tp1->data = NULL;
4736 			if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4737 				asoc->sent_queue_cnt_removeable--;
4738 			}
4739 		}
4740 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4741 			sctp_log_sack(asoc->last_acked_seq,
4742 			    cum_ack,
4743 			    tp1->rec.data.TSN_seq,
4744 			    0,
4745 			    0,
4746 			    SCTP_LOG_FREE_SENT);
4747 		}
4748 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4749 		wake_him++;
4750 	}
4751 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4752 #ifdef INVARIANTS
4753 		panic("Warning flight size is postive and should be 0");
4754 #else
4755 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4756 		    asoc->total_flight);
4757 #endif
4758 		asoc->total_flight = 0;
4759 	}
4760 	/* sa_ignore NO_NULL_CHK */
4761 	if ((wake_him) && (stcb->sctp_socket)) {
4762 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4763 		struct socket *so;
4764 
4765 #endif
4766 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4767 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4768 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4769 		}
4770 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4771 		so = SCTP_INP_SO(stcb->sctp_ep);
4772 		atomic_add_int(&stcb->asoc.refcnt, 1);
4773 		SCTP_TCB_UNLOCK(stcb);
4774 		SCTP_SOCKET_LOCK(so, 1);
4775 		SCTP_TCB_LOCK(stcb);
4776 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4777 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4778 			/* assoc was freed while we were unlocked */
4779 			SCTP_SOCKET_UNLOCK(so, 1);
4780 			return;
4781 		}
4782 #endif
4783 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4784 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4785 		SCTP_SOCKET_UNLOCK(so, 1);
4786 #endif
4787 	} else {
4788 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4789 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4790 		}
4791 	}
4792 
4793 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4794 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4795 			/* Setup so we will exit RFC2582 fast recovery */
4796 			will_exit_fast_recovery = 1;
4797 		}
4798 	}
4799 	/*
4800 	 * Check for revoked fragments:
4801 	 *
4802 	 * if Previous sack - Had no frags then we can't have any revoked if
4803 	 * Previous sack - Had frag's then - If we now have frags aka
4804 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4805 	 * some of them. else - The peer revoked all ACKED fragments, since
4806 	 * we had some before and now we have NONE.
4807 	 */
4808 
4809 	if (num_seg) {
4810 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4811 		asoc->saw_sack_with_frags = 1;
4812 	} else if (asoc->saw_sack_with_frags) {
4813 		int cnt_revoked = 0;
4814 
4815 		/* Peer revoked all dg's marked or acked */
4816 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4817 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4818 				tp1->sent = SCTP_DATAGRAM_SENT;
4819 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4820 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4821 					    tp1->whoTo->flight_size,
4822 					    tp1->book_size,
4823 					    (uintptr_t) tp1->whoTo,
4824 					    tp1->rec.data.TSN_seq);
4825 				}
4826 				sctp_flight_size_increase(tp1);
4827 				sctp_total_flight_increase(stcb, tp1);
4828 				tp1->rec.data.chunk_was_revoked = 1;
4829 				/*
4830 				 * To ensure that this increase in
4831 				 * flightsize, which is artificial, does not
4832 				 * throttle the sender, we also increase the
4833 				 * cwnd artificially.
4834 				 */
4835 				tp1->whoTo->cwnd += tp1->book_size;
4836 				cnt_revoked++;
4837 			}
4838 		}
4839 		if (cnt_revoked) {
4840 			reneged_all = 1;
4841 		}
4842 		asoc->saw_sack_with_frags = 0;
4843 	}
4844 	if (num_nr_seg > 0)
4845 		asoc->saw_sack_with_nr_frags = 1;
4846 	else
4847 		asoc->saw_sack_with_nr_frags = 0;
4848 
4849 	/* JRS - Use the congestion control given in the CC module */
4850 	if (ecne_seen == 0) {
4851 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4852 			if (net->net_ack2 > 0) {
4853 				/*
4854 				 * Karn's rule applies to clearing error
4855 				 * count, this is optional.
4856 				 */
4857 				net->error_count = 0;
4858 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4859 					/* addr came good */
4860 					net->dest_state |= SCTP_ADDR_REACHABLE;
4861 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4862 					    SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
4863 				}
4864 				if (net == stcb->asoc.primary_destination) {
4865 					if (stcb->asoc.alternate) {
4866 						/*
4867 						 * release the alternate,
4868 						 * primary is good
4869 						 */
4870 						sctp_free_remote_addr(stcb->asoc.alternate);
4871 						stcb->asoc.alternate = NULL;
4872 					}
4873 				}
4874 				if (net->dest_state & SCTP_ADDR_PF) {
4875 					net->dest_state &= ~SCTP_ADDR_PF;
4876 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4877 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4878 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4879 					/* Done with this net */
4880 					net->net_ack = 0;
4881 				}
4882 				/* restore any doubled timers */
4883 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4884 				if (net->RTO < stcb->asoc.minrto) {
4885 					net->RTO = stcb->asoc.minrto;
4886 				}
4887 				if (net->RTO > stcb->asoc.maxrto) {
4888 					net->RTO = stcb->asoc.maxrto;
4889 				}
4890 			}
4891 		}
4892 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4893 	}
4894 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4895 		/* nothing left in-flight */
4896 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4897 			/* stop all timers */
4898 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4899 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4900 			net->flight_size = 0;
4901 			net->partial_bytes_acked = 0;
4902 		}
4903 		asoc->total_flight = 0;
4904 		asoc->total_flight_count = 0;
4905 	}
4906 	/**********************************/
4907 	/* Now what about shutdown issues */
4908 	/**********************************/
4909 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4910 		/* nothing left on sendqueue.. consider done */
4911 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4912 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4913 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4914 		}
4915 		asoc->peers_rwnd = a_rwnd;
4916 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4917 			/* SWS sender side engages */
4918 			asoc->peers_rwnd = 0;
4919 		}
4920 		/* clean up */
4921 		if ((asoc->stream_queue_cnt == 1) &&
4922 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4923 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4924 		    (asoc->locked_on_sending)
4925 		    ) {
4926 			struct sctp_stream_queue_pending *sp;
4927 
4928 			/*
4929 			 * I may be in a state where we got all across.. but
4930 			 * cannot write more due to a shutdown... we abort
4931 			 * since the user did not indicate EOR in this case.
4932 			 */
4933 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4934 			    sctp_streamhead);
4935 			if ((sp) && (sp->length == 0)) {
4936 				asoc->locked_on_sending = NULL;
4937 				if (sp->msg_is_complete) {
4938 					asoc->stream_queue_cnt--;
4939 				} else {
4940 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4941 					asoc->stream_queue_cnt--;
4942 				}
4943 			}
4944 		}
4945 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4946 		    (asoc->stream_queue_cnt == 0)) {
4947 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4948 				/* Need to abort here */
4949 				struct mbuf *oper;
4950 
4951 		abort_out_now:
4952 				*abort_now = 1;
4953 				/* XXX */
4954 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4955 				    0, M_DONTWAIT, 1, MT_DATA);
4956 				if (oper) {
4957 					struct sctp_paramhdr *ph;
4958 					uint32_t *ippp;
4959 
4960 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4961 					    sizeof(uint32_t);
4962 					ph = mtod(oper, struct sctp_paramhdr *);
4963 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4964 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4965 					ippp = (uint32_t *) (ph + 1);
4966 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4967 				}
4968 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4969 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4970 				return;
4971 			} else {
4972 				struct sctp_nets *netp;
4973 
4974 				if (asoc->alternate) {
4975 					netp = asoc->alternate;
4976 				} else {
4977 					netp = asoc->primary_destination;
4978 				}
4979 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4980 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4981 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4982 				}
4983 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4984 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4985 				sctp_stop_timers_for_shutdown(stcb);
4986 				sctp_send_shutdown(stcb, netp);
4987 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4988 				    stcb->sctp_ep, stcb, netp);
4989 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4990 				    stcb->sctp_ep, stcb, netp);
4991 			}
4992 			return;
4993 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4994 		    (asoc->stream_queue_cnt == 0)) {
4995 			struct sctp_nets *netp;
4996 
4997 			if (asoc->alternate) {
4998 				netp = asoc->alternate;
4999 			} else {
5000 				netp = asoc->primary_destination;
5001 			}
5002 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5003 				goto abort_out_now;
5004 			}
5005 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5006 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5007 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5008 			sctp_send_shutdown_ack(stcb, netp);
5009 			sctp_stop_timers_for_shutdown(stcb);
5010 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5011 			    stcb->sctp_ep, stcb, netp);
5012 			return;
5013 		}
5014 	}
5015 	/*
5016 	 * Now here we are going to recycle net_ack for a different use...
5017 	 * HEADS UP.
5018 	 */
5019 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5020 		net->net_ack = 0;
5021 	}
5022 
5023 	/*
5024 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5025 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5026 	 * automatically ensure that.
5027 	 */
5028 	if ((asoc->sctp_cmt_on_off > 0) &&
5029 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5030 	    (cmt_dac_flag == 0)) {
5031 		this_sack_lowest_newack = cum_ack;
5032 	}
5033 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5034 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5035 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5036 	}
5037 	/* JRS - Use the congestion control given in the CC module */
5038 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5039 
5040 	/* Now are we exiting loss recovery ? */
5041 	if (will_exit_fast_recovery) {
5042 		/* Ok, we must exit fast recovery */
5043 		asoc->fast_retran_loss_recovery = 0;
5044 	}
5045 	if ((asoc->sat_t3_loss_recovery) &&
5046 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5047 		/* end satellite t3 loss recovery */
5048 		asoc->sat_t3_loss_recovery = 0;
5049 	}
5050 	/*
5051 	 * CMT Fast recovery
5052 	 */
5053 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5054 		if (net->will_exit_fast_recovery) {
5055 			/* Ok, we must exit fast recovery */
5056 			net->fast_retran_loss_recovery = 0;
5057 		}
5058 	}
5059 
5060 	/* Adjust and set the new rwnd value */
5061 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5062 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5063 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5064 	}
5065 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5066 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5067 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5068 		/* SWS sender side engages */
5069 		asoc->peers_rwnd = 0;
5070 	}
5071 	if (asoc->peers_rwnd > old_rwnd) {
5072 		win_probe_recovery = 1;
5073 	}
5074 	/*
5075 	 * Now we must setup so we have a timer up for anyone with
5076 	 * outstanding data.
5077 	 */
5078 	done_once = 0;
5079 again:
5080 	j = 0;
5081 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5082 		if (win_probe_recovery && (net->window_probe)) {
5083 			win_probe_recovered = 1;
5084 			/*-
5085 			 * Find first chunk that was used with
5086 			 * window probe and clear the event. Put
5087 			 * it back into the send queue as if has
5088 			 * not been sent.
5089 			 */
5090 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5091 				if (tp1->window_probe) {
5092 					sctp_window_probe_recovery(stcb, asoc, tp1);
5093 					break;
5094 				}
5095 			}
5096 		}
5097 		if (net->flight_size) {
5098 			j++;
5099 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5100 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5101 				    stcb->sctp_ep, stcb, net);
5102 			}
5103 			if (net->window_probe) {
5104 				net->window_probe = 0;
5105 			}
5106 		} else {
5107 			if (net->window_probe) {
5108 				/*
5109 				 * In window probes we must assure a timer
5110 				 * is still running there
5111 				 */
5112 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5113 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5114 					    stcb->sctp_ep, stcb, net);
5115 
5116 				}
5117 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5118 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5119 				    stcb, net,
5120 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5121 			}
5122 		}
5123 	}
5124 	if ((j == 0) &&
5125 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5126 	    (asoc->sent_queue_retran_cnt == 0) &&
5127 	    (win_probe_recovered == 0) &&
5128 	    (done_once == 0)) {
5129 		/*
5130 		 * huh, this should not happen unless all packets are
5131 		 * PR-SCTP and marked to skip of course.
5132 		 */
5133 		if (sctp_fs_audit(asoc)) {
5134 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5135 				net->flight_size = 0;
5136 			}
5137 			asoc->total_flight = 0;
5138 			asoc->total_flight_count = 0;
5139 			asoc->sent_queue_retran_cnt = 0;
5140 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5141 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5142 					sctp_flight_size_increase(tp1);
5143 					sctp_total_flight_increase(stcb, tp1);
5144 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5145 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5146 				}
5147 			}
5148 		}
5149 		done_once = 1;
5150 		goto again;
5151 	}
5152 	/*********************************************/
5153 	/* Here we perform PR-SCTP procedures        */
5154 	/* (section 4.2)                             */
5155 	/*********************************************/
5156 	/* C1. update advancedPeerAckPoint */
5157 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5158 		asoc->advanced_peer_ack_point = cum_ack;
5159 	}
5160 	/* C2. try to further move advancedPeerAckPoint ahead */
5161 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5162 		struct sctp_tmit_chunk *lchk;
5163 		uint32_t old_adv_peer_ack_point;
5164 
5165 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5166 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5167 		/* C3. See if we need to send a Fwd-TSN */
5168 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5169 			/*
5170 			 * ISSUE with ECN, see FWD-TSN processing.
5171 			 */
5172 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5173 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5174 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5175 				    old_adv_peer_ack_point);
5176 			}
5177 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5178 				send_forward_tsn(stcb, asoc);
5179 			} else if (lchk) {
5180 				/* try to FR fwd-tsn's that get lost too */
5181 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5182 					send_forward_tsn(stcb, asoc);
5183 				}
5184 			}
5185 		}
5186 		if (lchk) {
5187 			/* Assure a timer is up */
5188 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5189 			    stcb->sctp_ep, stcb, lchk->whoTo);
5190 		}
5191 	}
5192 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5193 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5194 		    a_rwnd,
5195 		    stcb->asoc.peers_rwnd,
5196 		    stcb->asoc.total_flight,
5197 		    stcb->asoc.total_output_queue_size);
5198 	}
5199 }
5200 
5201 void
5202 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5203 {
5204 	/* Copy cum-ack */
5205 	uint32_t cum_ack, a_rwnd;
5206 
5207 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5208 	/* Arrange so a_rwnd does NOT change */
5209 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5210 
5211 	/* Now call the express sack handling */
5212 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5213 }
5214 
5215 static void
5216 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5217     struct sctp_stream_in *strmin)
5218 {
5219 	struct sctp_queued_to_read *ctl, *nctl;
5220 	struct sctp_association *asoc;
5221 	uint16_t tt;
5222 
5223 	asoc = &stcb->asoc;
5224 	tt = strmin->last_sequence_delivered;
5225 	/*
5226 	 * First deliver anything prior to and including the stream no that
5227 	 * came in
5228 	 */
5229 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5230 		if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
5231 			/* this is deliverable now */
5232 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5233 			/* subtract pending on streams */
5234 			asoc->size_on_all_streams -= ctl->length;
5235 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5236 			/* deliver it to at least the delivery-q */
5237 			if (stcb->sctp_socket) {
5238 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5239 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5240 				    ctl,
5241 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5242 			}
5243 		} else {
5244 			/* no more delivery now. */
5245 			break;
5246 		}
5247 	}
5248 	/*
5249 	 * now we must deliver things in queue the normal way  if any are
5250 	 * now ready.
5251 	 */
5252 	tt = strmin->last_sequence_delivered + 1;
5253 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5254 		if (tt == ctl->sinfo_ssn) {
5255 			/* this is deliverable now */
5256 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5257 			/* subtract pending on streams */
5258 			asoc->size_on_all_streams -= ctl->length;
5259 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5260 			/* deliver it to at least the delivery-q */
5261 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5262 			if (stcb->sctp_socket) {
5263 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5264 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5265 				    ctl,
5266 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5267 
5268 			}
5269 			tt = strmin->last_sequence_delivered + 1;
5270 		} else {
5271 			break;
5272 		}
5273 	}
5274 }
5275 
5276 static void
5277 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5278     struct sctp_association *asoc,
5279     uint16_t stream, uint16_t seq)
5280 {
5281 	struct sctp_tmit_chunk *chk, *nchk;
5282 
5283 	/* For each one on here see if we need to toss it */
5284 	/*
5285 	 * For now large messages held on the reasmqueue that are complete
5286 	 * will be tossed too. We could in theory do more work to spin
5287 	 * through and stop after dumping one msg aka seeing the start of a
5288 	 * new msg at the head, and call the delivery function... to see if
5289 	 * it can be delivered... But for now we just dump everything on the
5290 	 * queue.
5291 	 */
5292 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5293 		/*
5294 		 * Do not toss it if on a different stream or marked for
5295 		 * unordered delivery in which case the stream sequence
5296 		 * number has no meaning.
5297 		 */
5298 		if ((chk->rec.data.stream_number != stream) ||
5299 		    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5300 			continue;
5301 		}
5302 		if (chk->rec.data.stream_seq == seq) {
5303 			/* It needs to be tossed */
5304 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5305 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5306 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5307 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5308 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5309 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5310 			}
5311 			asoc->size_on_reasm_queue -= chk->send_size;
5312 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5313 
5314 			/* Clear up any stream problem */
5315 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5316 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5317 				/*
5318 				 * We must dump forward this streams
5319 				 * sequence number if the chunk is not
5320 				 * unordered that is being skipped. There is
5321 				 * a chance that if the peer does not
5322 				 * include the last fragment in its FWD-TSN
5323 				 * we WILL have a problem here since you
5324 				 * would have a partial chunk in queue that
5325 				 * may not be deliverable. Also if a Partial
5326 				 * delivery API as started the user may get
5327 				 * a partial chunk. The next read returning
5328 				 * a new chunk... really ugly but I see no
5329 				 * way around it! Maybe a notify??
5330 				 */
5331 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5332 			}
5333 			if (chk->data) {
5334 				sctp_m_freem(chk->data);
5335 				chk->data = NULL;
5336 			}
5337 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5338 		} else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5339 			/*
5340 			 * If the stream_seq is > than the purging one, we
5341 			 * are done
5342 			 */
5343 			break;
5344 		}
5345 	}
5346 }
5347 
5348 
5349 void
5350 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5351     struct sctp_forward_tsn_chunk *fwd,
5352     int *abort_flag, struct mbuf *m, int offset)
5353 {
5354 	/* The pr-sctp fwd tsn */
5355 	/*
5356 	 * here we will perform all the data receiver side steps for
5357 	 * processing FwdTSN, as required in by pr-sctp draft:
5358 	 *
5359 	 * Assume we get FwdTSN(x):
5360 	 *
5361 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5362 	 * others we have 3) examine and update re-ordering queue on
5363 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5364 	 * report where we are.
5365 	 */
5366 	struct sctp_association *asoc;
5367 	uint32_t new_cum_tsn, gap;
5368 	unsigned int i, fwd_sz, m_size;
5369 	uint32_t str_seq;
5370 	struct sctp_stream_in *strm;
5371 	struct sctp_tmit_chunk *chk, *nchk;
5372 	struct sctp_queued_to_read *ctl, *sv;
5373 
5374 	asoc = &stcb->asoc;
5375 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5376 		SCTPDBG(SCTP_DEBUG_INDATA1,
5377 		    "Bad size too small/big fwd-tsn\n");
5378 		return;
5379 	}
5380 	m_size = (stcb->asoc.mapping_array_size << 3);
5381 	/*************************************************************/
5382 	/* 1. Here we update local cumTSN and shift the bitmap array */
5383 	/*************************************************************/
5384 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5385 
5386 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5387 		/* Already got there ... */
5388 		return;
5389 	}
5390 	/*
5391 	 * now we know the new TSN is more advanced, let's find the actual
5392 	 * gap
5393 	 */
5394 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5395 	asoc->cumulative_tsn = new_cum_tsn;
5396 	if (gap >= m_size) {
5397 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5398 			struct mbuf *oper;
5399 
5400 			/*
5401 			 * out of range (of single byte chunks in the rwnd I
5402 			 * give out). This must be an attacker.
5403 			 */
5404 			*abort_flag = 1;
5405 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5406 			    0, M_DONTWAIT, 1, MT_DATA);
5407 			if (oper) {
5408 				struct sctp_paramhdr *ph;
5409 				uint32_t *ippp;
5410 
5411 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5412 				    (sizeof(uint32_t) * 3);
5413 				ph = mtod(oper, struct sctp_paramhdr *);
5414 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5415 				ph->param_length = htons(SCTP_BUF_LEN(oper));
5416 				ippp = (uint32_t *) (ph + 1);
5417 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5418 				ippp++;
5419 				*ippp = asoc->highest_tsn_inside_map;
5420 				ippp++;
5421 				*ippp = new_cum_tsn;
5422 			}
5423 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5424 			sctp_abort_an_association(stcb->sctp_ep, stcb,
5425 			    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
5426 			return;
5427 		}
5428 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5429 
5430 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5431 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5432 		asoc->highest_tsn_inside_map = new_cum_tsn;
5433 
5434 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5435 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5436 
5437 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5438 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5439 		}
5440 	} else {
5441 		SCTP_TCB_LOCK_ASSERT(stcb);
5442 		for (i = 0; i <= gap; i++) {
5443 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5444 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5445 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5446 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5447 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5448 				}
5449 			}
5450 		}
5451 	}
5452 	/*************************************************************/
5453 	/* 2. Clear up re-assembly queue                             */
5454 	/*************************************************************/
5455 	/*
5456 	 * First service it if pd-api is up, just in case we can progress it
5457 	 * forward
5458 	 */
5459 	if (asoc->fragmented_delivery_inprogress) {
5460 		sctp_service_reassembly(stcb, asoc);
5461 	}
5462 	/* For each one on here see if we need to toss it */
5463 	/*
5464 	 * For now large messages held on the reasmqueue that are complete
5465 	 * will be tossed too. We could in theory do more work to spin
5466 	 * through and stop after dumping one msg aka seeing the start of a
5467 	 * new msg at the head, and call the delivery function... to see if
5468 	 * it can be delivered... But for now we just dump everything on the
5469 	 * queue.
5470 	 */
5471 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5472 		if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5473 			/* It needs to be tossed */
5474 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5475 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5476 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5477 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5478 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5479 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5480 			}
5481 			asoc->size_on_reasm_queue -= chk->send_size;
5482 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5483 
5484 			/* Clear up any stream problem */
5485 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5486 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5487 				/*
5488 				 * We must dump forward this streams
5489 				 * sequence number if the chunk is not
5490 				 * unordered that is being skipped. There is
5491 				 * a chance that if the peer does not
5492 				 * include the last fragment in its FWD-TSN
5493 				 * we WILL have a problem here since you
5494 				 * would have a partial chunk in queue that
5495 				 * may not be deliverable. Also if a Partial
5496 				 * delivery API as started the user may get
5497 				 * a partial chunk. The next read returning
5498 				 * a new chunk... really ugly but I see no
5499 				 * way around it! Maybe a notify??
5500 				 */
5501 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5502 			}
5503 			if (chk->data) {
5504 				sctp_m_freem(chk->data);
5505 				chk->data = NULL;
5506 			}
5507 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5508 		} else {
5509 			/*
5510 			 * Ok we have gone beyond the end of the fwd-tsn's
5511 			 * mark.
5512 			 */
5513 			break;
5514 		}
5515 	}
5516 	/*******************************************************/
5517 	/* 3. Update the PR-stream re-ordering queues and fix  */
5518 	/* delivery issues as needed.                       */
5519 	/*******************************************************/
5520 	fwd_sz -= sizeof(*fwd);
5521 	if (m && fwd_sz) {
5522 		/* New method. */
5523 		unsigned int num_str;
5524 		struct sctp_strseq *stseq, strseqbuf;
5525 
5526 		offset += sizeof(*fwd);
5527 
5528 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5529 		num_str = fwd_sz / sizeof(struct sctp_strseq);
5530 		for (i = 0; i < num_str; i++) {
5531 			uint16_t st;
5532 
5533 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5534 			    sizeof(struct sctp_strseq),
5535 			    (uint8_t *) & strseqbuf);
5536 			offset += sizeof(struct sctp_strseq);
5537 			if (stseq == NULL) {
5538 				break;
5539 			}
5540 			/* Convert */
5541 			st = ntohs(stseq->stream);
5542 			stseq->stream = st;
5543 			st = ntohs(stseq->sequence);
5544 			stseq->sequence = st;
5545 
5546 			/* now process */
5547 
5548 			/*
5549 			 * Ok we now look for the stream/seq on the read
5550 			 * queue where its not all delivered. If we find it
5551 			 * we transmute the read entry into a PDI_ABORTED.
5552 			 */
5553 			if (stseq->stream >= asoc->streamincnt) {
5554 				/* screwed up streams, stop!  */
5555 				break;
5556 			}
5557 			if ((asoc->str_of_pdapi == stseq->stream) &&
5558 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5559 				/*
5560 				 * If this is the one we were partially
5561 				 * delivering now then we no longer are.
5562 				 * Note this will change with the reassembly
5563 				 * re-write.
5564 				 */
5565 				asoc->fragmented_delivery_inprogress = 0;
5566 			}
5567 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5568 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5569 				if ((ctl->sinfo_stream == stseq->stream) &&
5570 				    (ctl->sinfo_ssn == stseq->sequence)) {
5571 					str_seq = (stseq->stream << 16) | stseq->sequence;
5572 					ctl->end_added = 1;
5573 					ctl->pdapi_aborted = 1;
5574 					sv = stcb->asoc.control_pdapi;
5575 					stcb->asoc.control_pdapi = ctl;
5576 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5577 					    stcb,
5578 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5579 					    (void *)&str_seq,
5580 					    SCTP_SO_NOT_LOCKED);
5581 					stcb->asoc.control_pdapi = sv;
5582 					break;
5583 				} else if ((ctl->sinfo_stream == stseq->stream) &&
5584 				    SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5585 					/* We are past our victim SSN */
5586 					break;
5587 				}
5588 			}
5589 			strm = &asoc->strmin[stseq->stream];
5590 			if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5591 				/* Update the sequence number */
5592 				strm->last_sequence_delivered = stseq->sequence;
5593 			}
5594 			/* now kick the stream the new way */
5595 			/* sa_ignore NO_NULL_CHK */
5596 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5597 		}
5598 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5599 	}
5600 	/*
5601 	 * Now slide thing forward.
5602 	 */
5603 	sctp_slide_mapping_arrays(stcb);
5604 
5605 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5606 		/* now lets kick out and check for more fragmented delivery */
5607 		/* sa_ignore NO_NULL_CHK */
5608 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5609 	}
5610 }
5611