xref: /freebsd/sys/netinet/sctp_indata.c (revision 3ef51c5fb9163f2aafb1c14729e06a8bf0c4d113)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $	 */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctputil.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_input.h>
46 #include <netinet/sctp_indata.h>
47 #include <netinet/sctp_uio.h>
48 #include <netinet/sctp_timer.h>
49 
50 
51 /*
52  * NOTES: On the outbound side of things I need to check the sack timer to
53  * see if I should generate a sack into the chunk queue (if I have data to
54  * send that is and will be sending it .. for bundling.
55  *
56  * The callback in sctp_usrreq.c will get called when the socket is read from.
57  * This will cause sctp_service_queues() to get called on the top entry in
58  * the list.
59  */
60 
61 void
62 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
63 {
64 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65 }
66 
67 /* Calculate what the rwnd would be */
68 uint32_t
69 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
70 {
71 	uint32_t calc = 0;
72 
73 	/*
74 	 * This is really set wrong with respect to a 1-2-m socket. Since
75 	 * the sb_cc is the count that everyone as put up. When we re-write
76 	 * sctp_soreceive then we will fix this so that ONLY this
77 	 * associations data is taken into account.
78 	 */
79 	if (stcb->sctp_socket == NULL)
80 		return (calc);
81 
82 	if (stcb->asoc.sb_cc == 0 &&
83 	    asoc->size_on_reasm_queue == 0 &&
84 	    asoc->size_on_all_streams == 0) {
85 		/* Full rwnd granted */
86 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87 		return (calc);
88 	}
89 	/* get actual space */
90 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91 
92 	/*
93 	 * take out what has NOT been put on socket queue and we yet hold
94 	 * for putting up.
95 	 */
96 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
97 	    asoc->cnt_on_reasm_queue * MSIZE));
98 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
99 	    asoc->cnt_on_all_streams * MSIZE));
100 
101 	if (calc == 0) {
102 		/* out of space */
103 		return (calc);
104 	}
105 	/* what is the overhead of all these rwnd's */
106 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
107 	/*
108 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
109 	 * even it is 0. SWS engaged
110 	 */
111 	if (calc < stcb->asoc.my_rwnd_control_len) {
112 		calc = 1;
113 	}
114 	return (calc);
115 }
116 
117 
118 
119 /*
120  * Build out our readq entry based on the incoming packet.
121  */
122 struct sctp_queued_to_read *
123 sctp_build_readq_entry(struct sctp_tcb *stcb,
124     struct sctp_nets *net,
125     uint32_t tsn, uint32_t ppid,
126     uint32_t context, uint16_t stream_no,
127     uint16_t stream_seq, uint8_t flags,
128     struct mbuf *dm)
129 {
130 	struct sctp_queued_to_read *read_queue_e = NULL;
131 
132 	sctp_alloc_a_readq(stcb, read_queue_e);
133 	if (read_queue_e == NULL) {
134 		goto failed_build;
135 	}
136 	read_queue_e->sinfo_stream = stream_no;
137 	read_queue_e->sinfo_ssn = stream_seq;
138 	read_queue_e->sinfo_flags = (flags << 8);
139 	read_queue_e->sinfo_ppid = ppid;
140 	read_queue_e->sinfo_context = context;
141 	read_queue_e->sinfo_timetolive = 0;
142 	read_queue_e->sinfo_tsn = tsn;
143 	read_queue_e->sinfo_cumtsn = tsn;
144 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
145 	read_queue_e->whoFrom = net;
146 	read_queue_e->length = 0;
147 	atomic_add_int(&net->ref_count, 1);
148 	read_queue_e->data = dm;
149 	read_queue_e->spec_flags = 0;
150 	read_queue_e->tail_mbuf = NULL;
151 	read_queue_e->aux_data = NULL;
152 	read_queue_e->stcb = stcb;
153 	read_queue_e->port_from = stcb->rport;
154 	read_queue_e->do_not_ref_stcb = 0;
155 	read_queue_e->end_added = 0;
156 	read_queue_e->some_taken = 0;
157 	read_queue_e->pdapi_aborted = 0;
158 failed_build:
159 	return (read_queue_e);
160 }
161 
162 
163 /*
164  * Build out our readq entry based on the incoming packet.
165  */
166 static struct sctp_queued_to_read *
167 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
168     struct sctp_tmit_chunk *chk)
169 {
170 	struct sctp_queued_to_read *read_queue_e = NULL;
171 
172 	sctp_alloc_a_readq(stcb, read_queue_e);
173 	if (read_queue_e == NULL) {
174 		goto failed_build;
175 	}
176 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
177 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
178 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
179 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
180 	read_queue_e->sinfo_context = stcb->asoc.context;
181 	read_queue_e->sinfo_timetolive = 0;
182 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
183 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
184 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
185 	read_queue_e->whoFrom = chk->whoTo;
186 	read_queue_e->aux_data = NULL;
187 	read_queue_e->length = 0;
188 	atomic_add_int(&chk->whoTo->ref_count, 1);
189 	read_queue_e->data = chk->data;
190 	read_queue_e->tail_mbuf = NULL;
191 	read_queue_e->stcb = stcb;
192 	read_queue_e->port_from = stcb->rport;
193 	read_queue_e->spec_flags = 0;
194 	read_queue_e->do_not_ref_stcb = 0;
195 	read_queue_e->end_added = 0;
196 	read_queue_e->some_taken = 0;
197 	read_queue_e->pdapi_aborted = 0;
198 failed_build:
199 	return (read_queue_e);
200 }
201 
202 
203 struct mbuf *
204 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
205 {
206 	struct sctp_extrcvinfo *seinfo;
207 	struct sctp_sndrcvinfo *outinfo;
208 	struct sctp_rcvinfo *rcvinfo;
209 	struct sctp_nxtinfo *nxtinfo;
210 	struct cmsghdr *cmh;
211 	struct mbuf *ret;
212 	int len;
213 	int use_extended;
214 	int provide_nxt;
215 
216 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
217 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
218 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
219 		/* user does not want any ancillary data */
220 		return (NULL);
221 	}
222 	len = 0;
223 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
224 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
225 	}
226 	seinfo = (struct sctp_extrcvinfo *)sinfo;
227 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
228 	    (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
229 		provide_nxt = 1;
230 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
231 	} else {
232 		provide_nxt = 0;
233 	}
234 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
235 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
236 			use_extended = 1;
237 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
238 		} else {
239 			use_extended = 0;
240 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
241 		}
242 	} else {
243 		use_extended = 0;
244 	}
245 
246 	ret = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
247 	if (ret == NULL) {
248 		/* No space */
249 		return (ret);
250 	}
251 	SCTP_BUF_LEN(ret) = 0;
252 
253 	/* We need a CMSG header followed by the struct */
254 	cmh = mtod(ret, struct cmsghdr *);
255 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
256 		cmh->cmsg_level = IPPROTO_SCTP;
257 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
258 		cmh->cmsg_type = SCTP_RCVINFO;
259 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
260 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
261 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
262 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
263 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
264 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
265 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
266 		rcvinfo->rcv_context = sinfo->sinfo_context;
267 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
268 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
269 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
270 	}
271 	if (provide_nxt) {
272 		cmh->cmsg_level = IPPROTO_SCTP;
273 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
274 		cmh->cmsg_type = SCTP_NXTINFO;
275 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
276 		nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
277 		nxtinfo->nxt_flags = 0;
278 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
279 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
280 		}
281 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
282 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
283 		}
284 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
285 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
286 		}
287 		nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
288 		nxtinfo->nxt_length = seinfo->sreinfo_next_length;
289 		nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
290 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
291 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
292 	}
293 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
294 		cmh->cmsg_level = IPPROTO_SCTP;
295 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
296 		if (use_extended) {
297 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
298 			cmh->cmsg_type = SCTP_EXTRCV;
299 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
300 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
301 		} else {
302 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
303 			cmh->cmsg_type = SCTP_SNDRCV;
304 			*outinfo = *sinfo;
305 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
306 		}
307 	}
308 	return (ret);
309 }
310 
311 
312 static void
313 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
314 {
315 	uint32_t gap, i, cumackp1;
316 	int fnd = 0;
317 
318 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
319 		return;
320 	}
321 	cumackp1 = asoc->cumulative_tsn + 1;
322 	if (SCTP_TSN_GT(cumackp1, tsn)) {
323 		/*
324 		 * this tsn is behind the cum ack and thus we don't need to
325 		 * worry about it being moved from one to the other.
326 		 */
327 		return;
328 	}
329 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
330 	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
331 		printf("gap:%x tsn:%x\n", gap, tsn);
332 		sctp_print_mapping_array(asoc);
333 #ifdef INVARIANTS
334 		panic("Things are really messed up now!!");
335 #endif
336 	}
337 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
338 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
339 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
340 		asoc->highest_tsn_inside_nr_map = tsn;
341 	}
342 	if (tsn == asoc->highest_tsn_inside_map) {
343 		/* We must back down to see what the new highest is */
344 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
345 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
346 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
347 				asoc->highest_tsn_inside_map = i;
348 				fnd = 1;
349 				break;
350 			}
351 		}
352 		if (!fnd) {
353 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
354 		}
355 	}
356 }
357 
358 
359 /*
360  * We are delivering currently from the reassembly queue. We must continue to
361  * deliver until we either: 1) run out of space. 2) run out of sequential
362  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
363  */
364 static void
365 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
366 {
367 	struct sctp_tmit_chunk *chk, *nchk;
368 	uint16_t nxt_todel;
369 	uint16_t stream_no;
370 	int end = 0;
371 	int cntDel;
372 	struct sctp_queued_to_read *control, *ctl, *nctl;
373 
374 	if (stcb == NULL)
375 		return;
376 
377 	cntDel = stream_no = 0;
378 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
379 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
380 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
381 		/* socket above is long gone or going.. */
382 abandon:
383 		asoc->fragmented_delivery_inprogress = 0;
384 		TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
385 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
386 			asoc->size_on_reasm_queue -= chk->send_size;
387 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
388 			/*
389 			 * Lose the data pointer, since its in the socket
390 			 * buffer
391 			 */
392 			if (chk->data) {
393 				sctp_m_freem(chk->data);
394 				chk->data = NULL;
395 			}
396 			/* Now free the address and data */
397 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
398 			/* sa_ignore FREED_MEMORY */
399 		}
400 		return;
401 	}
402 	SCTP_TCB_LOCK_ASSERT(stcb);
403 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
404 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
405 			/* Can't deliver more :< */
406 			return;
407 		}
408 		stream_no = chk->rec.data.stream_number;
409 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
410 		if (nxt_todel != chk->rec.data.stream_seq &&
411 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
412 			/*
413 			 * Not the next sequence to deliver in its stream OR
414 			 * unordered
415 			 */
416 			return;
417 		}
418 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
419 
420 			control = sctp_build_readq_entry_chk(stcb, chk);
421 			if (control == NULL) {
422 				/* out of memory? */
423 				return;
424 			}
425 			/* save it off for our future deliveries */
426 			stcb->asoc.control_pdapi = control;
427 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
428 				end = 1;
429 			else
430 				end = 0;
431 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
432 			sctp_add_to_readq(stcb->sctp_ep,
433 			    stcb, control, &stcb->sctp_socket->so_rcv, end,
434 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
435 			cntDel++;
436 		} else {
437 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
438 				end = 1;
439 			else
440 				end = 0;
441 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
442 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
443 			    stcb->asoc.control_pdapi,
444 			    chk->data, end, chk->rec.data.TSN_seq,
445 			    &stcb->sctp_socket->so_rcv)) {
446 				/*
447 				 * something is very wrong, either
448 				 * control_pdapi is NULL, or the tail_mbuf
449 				 * is corrupt, or there is a EOM already on
450 				 * the mbuf chain.
451 				 */
452 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
453 					goto abandon;
454 				} else {
455 #ifdef INVARIANTS
456 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
457 						panic("This should not happen control_pdapi NULL?");
458 					}
459 					/* if we did not panic, it was a EOM */
460 					panic("Bad chunking ??");
461 #else
462 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
463 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
464 					}
465 					SCTP_PRINTF("Bad chunking ??\n");
466 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
467 
468 #endif
469 					goto abandon;
470 				}
471 			}
472 			cntDel++;
473 		}
474 		/* pull it we did it */
475 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
476 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
477 			asoc->fragmented_delivery_inprogress = 0;
478 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
479 				asoc->strmin[stream_no].last_sequence_delivered++;
480 			}
481 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
482 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
483 			}
484 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
485 			/*
486 			 * turn the flag back on since we just  delivered
487 			 * yet another one.
488 			 */
489 			asoc->fragmented_delivery_inprogress = 1;
490 		}
491 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
492 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
493 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
494 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
495 
496 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
497 		asoc->size_on_reasm_queue -= chk->send_size;
498 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
499 		/* free up the chk */
500 		chk->data = NULL;
501 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
502 
503 		if (asoc->fragmented_delivery_inprogress == 0) {
504 			/*
505 			 * Now lets see if we can deliver the next one on
506 			 * the stream
507 			 */
508 			struct sctp_stream_in *strm;
509 
510 			strm = &asoc->strmin[stream_no];
511 			nxt_todel = strm->last_sequence_delivered + 1;
512 			TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
513 				/* Deliver more if we can. */
514 				if (nxt_todel == ctl->sinfo_ssn) {
515 					TAILQ_REMOVE(&strm->inqueue, ctl, next);
516 					asoc->size_on_all_streams -= ctl->length;
517 					sctp_ucount_decr(asoc->cnt_on_all_streams);
518 					strm->last_sequence_delivered++;
519 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
520 					sctp_add_to_readq(stcb->sctp_ep, stcb,
521 					    ctl,
522 					    &stcb->sctp_socket->so_rcv, 1,
523 					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
524 				} else {
525 					break;
526 				}
527 				nxt_todel = strm->last_sequence_delivered + 1;
528 			}
529 			break;
530 		}
531 	}
532 }
533 
534 /*
535  * Queue the chunk either right into the socket buffer if it is the next one
536  * to go OR put it in the correct place in the delivery queue.  If we do
537  * append to the so_buf, keep doing so until we are out of order. One big
538  * question still remains, what to do when the socket buffer is FULL??
539  */
540 static void
541 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
542     struct sctp_queued_to_read *control, int *abort_flag)
543 {
544 	/*
545 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
546 	 * all the data in one stream this could happen quite rapidly. One
547 	 * could use the TSN to keep track of things, but this scheme breaks
548 	 * down in the other type of stream useage that could occur. Send a
549 	 * single msg to stream 0, send 4Billion messages to stream 1, now
550 	 * send a message to stream 0. You have a situation where the TSN
551 	 * has wrapped but not in the stream. Is this worth worrying about
552 	 * or should we just change our queue sort at the bottom to be by
553 	 * TSN.
554 	 *
555 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
556 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
557 	 * assignment this could happen... and I don't see how this would be
558 	 * a violation. So for now I am undecided an will leave the sort by
559 	 * SSN alone. Maybe a hybred approach is the answer
560 	 *
561 	 */
562 	struct sctp_stream_in *strm;
563 	struct sctp_queued_to_read *at;
564 	int queue_needed;
565 	uint16_t nxt_todel;
566 	struct mbuf *oper;
567 
568 	queue_needed = 1;
569 	asoc->size_on_all_streams += control->length;
570 	sctp_ucount_incr(asoc->cnt_on_all_streams);
571 	strm = &asoc->strmin[control->sinfo_stream];
572 	nxt_todel = strm->last_sequence_delivered + 1;
573 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
574 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
575 	}
576 	SCTPDBG(SCTP_DEBUG_INDATA1,
577 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
578 	    (uint32_t) control->sinfo_stream,
579 	    (uint32_t) strm->last_sequence_delivered,
580 	    (uint32_t) nxt_todel);
581 	if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
582 		/* The incoming sseq is behind where we last delivered? */
583 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
584 		    control->sinfo_ssn, strm->last_sequence_delivered);
585 protocol_error:
586 		/*
587 		 * throw it in the stream so it gets cleaned up in
588 		 * association destruction
589 		 */
590 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
591 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
592 		    0, M_DONTWAIT, 1, MT_DATA);
593 		if (oper) {
594 			struct sctp_paramhdr *ph;
595 			uint32_t *ippp;
596 
597 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
598 			    (sizeof(uint32_t) * 3);
599 			ph = mtod(oper, struct sctp_paramhdr *);
600 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
601 			ph->param_length = htons(SCTP_BUF_LEN(oper));
602 			ippp = (uint32_t *) (ph + 1);
603 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
604 			ippp++;
605 			*ippp = control->sinfo_tsn;
606 			ippp++;
607 			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
608 		}
609 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
610 		sctp_abort_an_association(stcb->sctp_ep, stcb,
611 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
612 
613 		*abort_flag = 1;
614 		return;
615 
616 	}
617 	if (nxt_todel == control->sinfo_ssn) {
618 		/* can be delivered right away? */
619 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
620 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
621 		}
622 		/* EY it wont be queued if it could be delivered directly */
623 		queue_needed = 0;
624 		asoc->size_on_all_streams -= control->length;
625 		sctp_ucount_decr(asoc->cnt_on_all_streams);
626 		strm->last_sequence_delivered++;
627 
628 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
629 		sctp_add_to_readq(stcb->sctp_ep, stcb,
630 		    control,
631 		    &stcb->sctp_socket->so_rcv, 1,
632 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
633 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
634 			/* all delivered */
635 			nxt_todel = strm->last_sequence_delivered + 1;
636 			if (nxt_todel == control->sinfo_ssn) {
637 				TAILQ_REMOVE(&strm->inqueue, control, next);
638 				asoc->size_on_all_streams -= control->length;
639 				sctp_ucount_decr(asoc->cnt_on_all_streams);
640 				strm->last_sequence_delivered++;
641 				/*
642 				 * We ignore the return of deliver_data here
643 				 * since we always can hold the chunk on the
644 				 * d-queue. And we have a finite number that
645 				 * can be delivered from the strq.
646 				 */
647 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
648 					sctp_log_strm_del(control, NULL,
649 					    SCTP_STR_LOG_FROM_IMMED_DEL);
650 				}
651 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
652 				sctp_add_to_readq(stcb->sctp_ep, stcb,
653 				    control,
654 				    &stcb->sctp_socket->so_rcv, 1,
655 				    SCTP_READ_LOCK_NOT_HELD,
656 				    SCTP_SO_NOT_LOCKED);
657 				continue;
658 			}
659 			break;
660 		}
661 	}
662 	if (queue_needed) {
663 		/*
664 		 * Ok, we did not deliver this guy, find the correct place
665 		 * to put it on the queue.
666 		 */
667 		if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
668 			goto protocol_error;
669 		}
670 		if (TAILQ_EMPTY(&strm->inqueue)) {
671 			/* Empty queue */
672 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
673 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
674 			}
675 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
676 		} else {
677 			TAILQ_FOREACH(at, &strm->inqueue, next) {
678 				if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
679 					/*
680 					 * one in queue is bigger than the
681 					 * new one, insert before this one
682 					 */
683 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
684 						sctp_log_strm_del(control, at,
685 						    SCTP_STR_LOG_FROM_INSERT_MD);
686 					}
687 					TAILQ_INSERT_BEFORE(at, control, next);
688 					break;
689 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
690 					/*
691 					 * Gak, He sent me a duplicate str
692 					 * seq number
693 					 */
694 					/*
695 					 * foo bar, I guess I will just free
696 					 * this new guy, should we abort
697 					 * too? FIX ME MAYBE? Or it COULD be
698 					 * that the SSN's have wrapped.
699 					 * Maybe I should compare to TSN
700 					 * somehow... sigh for now just blow
701 					 * away the chunk!
702 					 */
703 
704 					if (control->data)
705 						sctp_m_freem(control->data);
706 					control->data = NULL;
707 					asoc->size_on_all_streams -= control->length;
708 					sctp_ucount_decr(asoc->cnt_on_all_streams);
709 					if (control->whoFrom) {
710 						sctp_free_remote_addr(control->whoFrom);
711 						control->whoFrom = NULL;
712 					}
713 					sctp_free_a_readq(stcb, control);
714 					return;
715 				} else {
716 					if (TAILQ_NEXT(at, next) == NULL) {
717 						/*
718 						 * We are at the end, insert
719 						 * it after this one
720 						 */
721 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
722 							sctp_log_strm_del(control, at,
723 							    SCTP_STR_LOG_FROM_INSERT_TL);
724 						}
725 						TAILQ_INSERT_AFTER(&strm->inqueue,
726 						    at, control, next);
727 						break;
728 					}
729 				}
730 			}
731 		}
732 	}
733 }
734 
735 /*
736  * Returns two things: You get the total size of the deliverable parts of the
737  * first fragmented message on the reassembly queue. And you get a 1 back if
738  * all of the message is ready or a 0 back if the message is still incomplete
739  */
740 static int
741 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
742 {
743 	struct sctp_tmit_chunk *chk;
744 	uint32_t tsn;
745 
746 	*t_size = 0;
747 	chk = TAILQ_FIRST(&asoc->reasmqueue);
748 	if (chk == NULL) {
749 		/* nothing on the queue */
750 		return (0);
751 	}
752 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
753 		/* Not a first on the queue */
754 		return (0);
755 	}
756 	tsn = chk->rec.data.TSN_seq;
757 	TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
758 		if (tsn != chk->rec.data.TSN_seq) {
759 			return (0);
760 		}
761 		*t_size += chk->send_size;
762 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
763 			return (1);
764 		}
765 		tsn++;
766 	}
767 	return (0);
768 }
769 
770 static void
771 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
772 {
773 	struct sctp_tmit_chunk *chk;
774 	uint16_t nxt_todel;
775 	uint32_t tsize, pd_point;
776 
777 doit_again:
778 	chk = TAILQ_FIRST(&asoc->reasmqueue);
779 	if (chk == NULL) {
780 		/* Huh? */
781 		asoc->size_on_reasm_queue = 0;
782 		asoc->cnt_on_reasm_queue = 0;
783 		return;
784 	}
785 	if (asoc->fragmented_delivery_inprogress == 0) {
786 		nxt_todel =
787 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
788 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
789 		    (nxt_todel == chk->rec.data.stream_seq ||
790 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
791 			/*
792 			 * Yep the first one is here and its ok to deliver
793 			 * but should we?
794 			 */
795 			if (stcb->sctp_socket) {
796 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
797 				    stcb->sctp_ep->partial_delivery_point);
798 			} else {
799 				pd_point = stcb->sctp_ep->partial_delivery_point;
800 			}
801 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
802 
803 				/*
804 				 * Yes, we setup to start reception, by
805 				 * backing down the TSN just in case we
806 				 * can't deliver. If we
807 				 */
808 				asoc->fragmented_delivery_inprogress = 1;
809 				asoc->tsn_last_delivered =
810 				    chk->rec.data.TSN_seq - 1;
811 				asoc->str_of_pdapi =
812 				    chk->rec.data.stream_number;
813 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
814 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
815 				asoc->fragment_flags = chk->rec.data.rcv_flags;
816 				sctp_service_reassembly(stcb, asoc);
817 			}
818 		}
819 	} else {
820 		/*
821 		 * Service re-assembly will deliver stream data queued at
822 		 * the end of fragmented delivery.. but it wont know to go
823 		 * back and call itself again... we do that here with the
824 		 * got doit_again
825 		 */
826 		sctp_service_reassembly(stcb, asoc);
827 		if (asoc->fragmented_delivery_inprogress == 0) {
828 			/*
829 			 * finished our Fragmented delivery, could be more
830 			 * waiting?
831 			 */
832 			goto doit_again;
833 		}
834 	}
835 }
836 
837 /*
838  * Dump onto the re-assembly queue, in its proper place. After dumping on the
839  * queue, see if anthing can be delivered. If so pull it off (or as much as
840  * we can. If we run out of space then we must dump what we can and set the
841  * appropriate flag to say we queued what we could.
842  */
843 static void
844 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
845     struct sctp_tmit_chunk *chk, int *abort_flag)
846 {
847 	struct mbuf *oper;
848 	uint32_t cum_ackp1, prev_tsn, post_tsn;
849 	struct sctp_tmit_chunk *at, *prev, *next;
850 
851 	prev = next = NULL;
852 	cum_ackp1 = asoc->tsn_last_delivered + 1;
853 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
854 		/* This is the first one on the queue */
855 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
856 		/*
857 		 * we do not check for delivery of anything when only one
858 		 * fragment is here
859 		 */
860 		asoc->size_on_reasm_queue = chk->send_size;
861 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
862 		if (chk->rec.data.TSN_seq == cum_ackp1) {
863 			if (asoc->fragmented_delivery_inprogress == 0 &&
864 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
865 			    SCTP_DATA_FIRST_FRAG) {
866 				/*
867 				 * An empty queue, no delivery inprogress,
868 				 * we hit the next one and it does NOT have
869 				 * a FIRST fragment mark.
870 				 */
871 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
872 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
873 				    0, M_DONTWAIT, 1, MT_DATA);
874 
875 				if (oper) {
876 					struct sctp_paramhdr *ph;
877 					uint32_t *ippp;
878 
879 					SCTP_BUF_LEN(oper) =
880 					    sizeof(struct sctp_paramhdr) +
881 					    (sizeof(uint32_t) * 3);
882 					ph = mtod(oper, struct sctp_paramhdr *);
883 					ph->param_type =
884 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
885 					ph->param_length = htons(SCTP_BUF_LEN(oper));
886 					ippp = (uint32_t *) (ph + 1);
887 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
888 					ippp++;
889 					*ippp = chk->rec.data.TSN_seq;
890 					ippp++;
891 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
892 
893 				}
894 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
895 				sctp_abort_an_association(stcb->sctp_ep, stcb,
896 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
897 				*abort_flag = 1;
898 			} else if (asoc->fragmented_delivery_inprogress &&
899 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
900 				/*
901 				 * We are doing a partial delivery and the
902 				 * NEXT chunk MUST be either the LAST or
903 				 * MIDDLE fragment NOT a FIRST
904 				 */
905 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
906 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
907 				    0, M_DONTWAIT, 1, MT_DATA);
908 				if (oper) {
909 					struct sctp_paramhdr *ph;
910 					uint32_t *ippp;
911 
912 					SCTP_BUF_LEN(oper) =
913 					    sizeof(struct sctp_paramhdr) +
914 					    (3 * sizeof(uint32_t));
915 					ph = mtod(oper, struct sctp_paramhdr *);
916 					ph->param_type =
917 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
918 					ph->param_length = htons(SCTP_BUF_LEN(oper));
919 					ippp = (uint32_t *) (ph + 1);
920 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
921 					ippp++;
922 					*ippp = chk->rec.data.TSN_seq;
923 					ippp++;
924 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
925 				}
926 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
927 				sctp_abort_an_association(stcb->sctp_ep, stcb,
928 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
929 				*abort_flag = 1;
930 			} else if (asoc->fragmented_delivery_inprogress) {
931 				/*
932 				 * Here we are ok with a MIDDLE or LAST
933 				 * piece
934 				 */
935 				if (chk->rec.data.stream_number !=
936 				    asoc->str_of_pdapi) {
937 					/* Got to be the right STR No */
938 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
939 					    chk->rec.data.stream_number,
940 					    asoc->str_of_pdapi);
941 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
942 					    0, M_DONTWAIT, 1, MT_DATA);
943 					if (oper) {
944 						struct sctp_paramhdr *ph;
945 						uint32_t *ippp;
946 
947 						SCTP_BUF_LEN(oper) =
948 						    sizeof(struct sctp_paramhdr) +
949 						    (sizeof(uint32_t) * 3);
950 						ph = mtod(oper,
951 						    struct sctp_paramhdr *);
952 						ph->param_type =
953 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
954 						ph->param_length =
955 						    htons(SCTP_BUF_LEN(oper));
956 						ippp = (uint32_t *) (ph + 1);
957 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
958 						ippp++;
959 						*ippp = chk->rec.data.TSN_seq;
960 						ippp++;
961 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
962 					}
963 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
964 					sctp_abort_an_association(stcb->sctp_ep,
965 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
966 					*abort_flag = 1;
967 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
968 					    SCTP_DATA_UNORDERED &&
969 				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
970 					/* Got to be the right STR Seq */
971 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
972 					    chk->rec.data.stream_seq,
973 					    asoc->ssn_of_pdapi);
974 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
975 					    0, M_DONTWAIT, 1, MT_DATA);
976 					if (oper) {
977 						struct sctp_paramhdr *ph;
978 						uint32_t *ippp;
979 
980 						SCTP_BUF_LEN(oper) =
981 						    sizeof(struct sctp_paramhdr) +
982 						    (3 * sizeof(uint32_t));
983 						ph = mtod(oper,
984 						    struct sctp_paramhdr *);
985 						ph->param_type =
986 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
987 						ph->param_length =
988 						    htons(SCTP_BUF_LEN(oper));
989 						ippp = (uint32_t *) (ph + 1);
990 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
991 						ippp++;
992 						*ippp = chk->rec.data.TSN_seq;
993 						ippp++;
994 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
995 
996 					}
997 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
998 					sctp_abort_an_association(stcb->sctp_ep,
999 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1000 					*abort_flag = 1;
1001 				}
1002 			}
1003 		}
1004 		return;
1005 	}
1006 	/* Find its place */
1007 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1008 		if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
1009 			/*
1010 			 * one in queue is bigger than the new one, insert
1011 			 * before this one
1012 			 */
1013 			/* A check */
1014 			asoc->size_on_reasm_queue += chk->send_size;
1015 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1016 			next = at;
1017 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1018 			break;
1019 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1020 			/* Gak, He sent me a duplicate str seq number */
1021 			/*
1022 			 * foo bar, I guess I will just free this new guy,
1023 			 * should we abort too? FIX ME MAYBE? Or it COULD be
1024 			 * that the SSN's have wrapped. Maybe I should
1025 			 * compare to TSN somehow... sigh for now just blow
1026 			 * away the chunk!
1027 			 */
1028 			if (chk->data) {
1029 				sctp_m_freem(chk->data);
1030 				chk->data = NULL;
1031 			}
1032 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1033 			return;
1034 		} else {
1035 			prev = at;
1036 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1037 				/*
1038 				 * We are at the end, insert it after this
1039 				 * one
1040 				 */
1041 				/* check it first */
1042 				asoc->size_on_reasm_queue += chk->send_size;
1043 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1044 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1045 				break;
1046 			}
1047 		}
1048 	}
1049 	/* Now the audits */
1050 	if (prev) {
1051 		prev_tsn = chk->rec.data.TSN_seq - 1;
1052 		if (prev_tsn == prev->rec.data.TSN_seq) {
1053 			/*
1054 			 * Ok the one I am dropping onto the end is the
1055 			 * NEXT. A bit of valdiation here.
1056 			 */
1057 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1058 			    SCTP_DATA_FIRST_FRAG ||
1059 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1060 			    SCTP_DATA_MIDDLE_FRAG) {
1061 				/*
1062 				 * Insert chk MUST be a MIDDLE or LAST
1063 				 * fragment
1064 				 */
1065 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1066 				    SCTP_DATA_FIRST_FRAG) {
1067 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1068 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1069 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1070 					    0, M_DONTWAIT, 1, MT_DATA);
1071 					if (oper) {
1072 						struct sctp_paramhdr *ph;
1073 						uint32_t *ippp;
1074 
1075 						SCTP_BUF_LEN(oper) =
1076 						    sizeof(struct sctp_paramhdr) +
1077 						    (3 * sizeof(uint32_t));
1078 						ph = mtod(oper,
1079 						    struct sctp_paramhdr *);
1080 						ph->param_type =
1081 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1082 						ph->param_length =
1083 						    htons(SCTP_BUF_LEN(oper));
1084 						ippp = (uint32_t *) (ph + 1);
1085 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1086 						ippp++;
1087 						*ippp = chk->rec.data.TSN_seq;
1088 						ippp++;
1089 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1090 
1091 					}
1092 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1093 					sctp_abort_an_association(stcb->sctp_ep,
1094 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1095 					*abort_flag = 1;
1096 					return;
1097 				}
1098 				if (chk->rec.data.stream_number !=
1099 				    prev->rec.data.stream_number) {
1100 					/*
1101 					 * Huh, need the correct STR here,
1102 					 * they must be the same.
1103 					 */
1104 					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1105 					    chk->rec.data.stream_number,
1106 					    prev->rec.data.stream_number);
1107 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1108 					    0, M_DONTWAIT, 1, MT_DATA);
1109 					if (oper) {
1110 						struct sctp_paramhdr *ph;
1111 						uint32_t *ippp;
1112 
1113 						SCTP_BUF_LEN(oper) =
1114 						    sizeof(struct sctp_paramhdr) +
1115 						    (3 * sizeof(uint32_t));
1116 						ph = mtod(oper,
1117 						    struct sctp_paramhdr *);
1118 						ph->param_type =
1119 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1120 						ph->param_length =
1121 						    htons(SCTP_BUF_LEN(oper));
1122 						ippp = (uint32_t *) (ph + 1);
1123 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1124 						ippp++;
1125 						*ippp = chk->rec.data.TSN_seq;
1126 						ippp++;
1127 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1128 					}
1129 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1130 					sctp_abort_an_association(stcb->sctp_ep,
1131 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1132 
1133 					*abort_flag = 1;
1134 					return;
1135 				}
1136 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1137 				    chk->rec.data.stream_seq !=
1138 				    prev->rec.data.stream_seq) {
1139 					/*
1140 					 * Huh, need the correct STR here,
1141 					 * they must be the same.
1142 					 */
1143 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1144 					    chk->rec.data.stream_seq,
1145 					    prev->rec.data.stream_seq);
1146 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1147 					    0, M_DONTWAIT, 1, MT_DATA);
1148 					if (oper) {
1149 						struct sctp_paramhdr *ph;
1150 						uint32_t *ippp;
1151 
1152 						SCTP_BUF_LEN(oper) =
1153 						    sizeof(struct sctp_paramhdr) +
1154 						    (3 * sizeof(uint32_t));
1155 						ph = mtod(oper,
1156 						    struct sctp_paramhdr *);
1157 						ph->param_type =
1158 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1159 						ph->param_length =
1160 						    htons(SCTP_BUF_LEN(oper));
1161 						ippp = (uint32_t *) (ph + 1);
1162 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1163 						ippp++;
1164 						*ippp = chk->rec.data.TSN_seq;
1165 						ippp++;
1166 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1167 					}
1168 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1169 					sctp_abort_an_association(stcb->sctp_ep,
1170 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1171 
1172 					*abort_flag = 1;
1173 					return;
1174 				}
1175 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1176 			    SCTP_DATA_LAST_FRAG) {
1177 				/* Insert chk MUST be a FIRST */
1178 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1179 				    SCTP_DATA_FIRST_FRAG) {
1180 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1181 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1182 					    0, M_DONTWAIT, 1, MT_DATA);
1183 					if (oper) {
1184 						struct sctp_paramhdr *ph;
1185 						uint32_t *ippp;
1186 
1187 						SCTP_BUF_LEN(oper) =
1188 						    sizeof(struct sctp_paramhdr) +
1189 						    (3 * sizeof(uint32_t));
1190 						ph = mtod(oper,
1191 						    struct sctp_paramhdr *);
1192 						ph->param_type =
1193 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1194 						ph->param_length =
1195 						    htons(SCTP_BUF_LEN(oper));
1196 						ippp = (uint32_t *) (ph + 1);
1197 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1198 						ippp++;
1199 						*ippp = chk->rec.data.TSN_seq;
1200 						ippp++;
1201 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1202 
1203 					}
1204 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1205 					sctp_abort_an_association(stcb->sctp_ep,
1206 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1207 
1208 					*abort_flag = 1;
1209 					return;
1210 				}
1211 			}
1212 		}
1213 	}
1214 	if (next) {
1215 		post_tsn = chk->rec.data.TSN_seq + 1;
1216 		if (post_tsn == next->rec.data.TSN_seq) {
1217 			/*
1218 			 * Ok the one I am inserting ahead of is my NEXT
1219 			 * one. A bit of valdiation here.
1220 			 */
1221 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1222 				/* Insert chk MUST be a last fragment */
1223 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1224 				    != SCTP_DATA_LAST_FRAG) {
1225 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1226 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1227 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1228 					    0, M_DONTWAIT, 1, MT_DATA);
1229 					if (oper) {
1230 						struct sctp_paramhdr *ph;
1231 						uint32_t *ippp;
1232 
1233 						SCTP_BUF_LEN(oper) =
1234 						    sizeof(struct sctp_paramhdr) +
1235 						    (3 * sizeof(uint32_t));
1236 						ph = mtod(oper,
1237 						    struct sctp_paramhdr *);
1238 						ph->param_type =
1239 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1240 						ph->param_length =
1241 						    htons(SCTP_BUF_LEN(oper));
1242 						ippp = (uint32_t *) (ph + 1);
1243 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1244 						ippp++;
1245 						*ippp = chk->rec.data.TSN_seq;
1246 						ippp++;
1247 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1248 					}
1249 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1250 					sctp_abort_an_association(stcb->sctp_ep,
1251 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1252 
1253 					*abort_flag = 1;
1254 					return;
1255 				}
1256 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1257 				    SCTP_DATA_MIDDLE_FRAG ||
1258 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1259 			    SCTP_DATA_LAST_FRAG) {
1260 				/*
1261 				 * Insert chk CAN be MIDDLE or FIRST NOT
1262 				 * LAST
1263 				 */
1264 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1265 				    SCTP_DATA_LAST_FRAG) {
1266 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1267 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1268 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1269 					    0, M_DONTWAIT, 1, MT_DATA);
1270 					if (oper) {
1271 						struct sctp_paramhdr *ph;
1272 						uint32_t *ippp;
1273 
1274 						SCTP_BUF_LEN(oper) =
1275 						    sizeof(struct sctp_paramhdr) +
1276 						    (3 * sizeof(uint32_t));
1277 						ph = mtod(oper,
1278 						    struct sctp_paramhdr *);
1279 						ph->param_type =
1280 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1281 						ph->param_length =
1282 						    htons(SCTP_BUF_LEN(oper));
1283 						ippp = (uint32_t *) (ph + 1);
1284 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1285 						ippp++;
1286 						*ippp = chk->rec.data.TSN_seq;
1287 						ippp++;
1288 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1289 
1290 					}
1291 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1292 					sctp_abort_an_association(stcb->sctp_ep,
1293 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1294 
1295 					*abort_flag = 1;
1296 					return;
1297 				}
1298 				if (chk->rec.data.stream_number !=
1299 				    next->rec.data.stream_number) {
1300 					/*
1301 					 * Huh, need the correct STR here,
1302 					 * they must be the same.
1303 					 */
1304 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1305 					    chk->rec.data.stream_number,
1306 					    next->rec.data.stream_number);
1307 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1308 					    0, M_DONTWAIT, 1, MT_DATA);
1309 					if (oper) {
1310 						struct sctp_paramhdr *ph;
1311 						uint32_t *ippp;
1312 
1313 						SCTP_BUF_LEN(oper) =
1314 						    sizeof(struct sctp_paramhdr) +
1315 						    (3 * sizeof(uint32_t));
1316 						ph = mtod(oper,
1317 						    struct sctp_paramhdr *);
1318 						ph->param_type =
1319 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1320 						ph->param_length =
1321 						    htons(SCTP_BUF_LEN(oper));
1322 						ippp = (uint32_t *) (ph + 1);
1323 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1324 						ippp++;
1325 						*ippp = chk->rec.data.TSN_seq;
1326 						ippp++;
1327 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1328 
1329 					}
1330 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1331 					sctp_abort_an_association(stcb->sctp_ep,
1332 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1333 
1334 					*abort_flag = 1;
1335 					return;
1336 				}
1337 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1338 				    chk->rec.data.stream_seq !=
1339 				    next->rec.data.stream_seq) {
1340 					/*
1341 					 * Huh, need the correct STR here,
1342 					 * they must be the same.
1343 					 */
1344 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1345 					    chk->rec.data.stream_seq,
1346 					    next->rec.data.stream_seq);
1347 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1348 					    0, M_DONTWAIT, 1, MT_DATA);
1349 					if (oper) {
1350 						struct sctp_paramhdr *ph;
1351 						uint32_t *ippp;
1352 
1353 						SCTP_BUF_LEN(oper) =
1354 						    sizeof(struct sctp_paramhdr) +
1355 						    (3 * sizeof(uint32_t));
1356 						ph = mtod(oper,
1357 						    struct sctp_paramhdr *);
1358 						ph->param_type =
1359 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1360 						ph->param_length =
1361 						    htons(SCTP_BUF_LEN(oper));
1362 						ippp = (uint32_t *) (ph + 1);
1363 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1364 						ippp++;
1365 						*ippp = chk->rec.data.TSN_seq;
1366 						ippp++;
1367 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1368 					}
1369 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1370 					sctp_abort_an_association(stcb->sctp_ep,
1371 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1372 
1373 					*abort_flag = 1;
1374 					return;
1375 				}
1376 			}
1377 		}
1378 	}
1379 	/* Do we need to do some delivery? check */
1380 	sctp_deliver_reasm_check(stcb, asoc);
1381 }
1382 
1383 /*
1384  * This is an unfortunate routine. It checks to make sure a evil guy is not
1385  * stuffing us full of bad packet fragments. A broken peer could also do this
1386  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1387  * :< more cycles.
1388  */
1389 static int
1390 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1391     uint32_t TSN_seq)
1392 {
1393 	struct sctp_tmit_chunk *at;
1394 	uint32_t tsn_est;
1395 
1396 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1397 		if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1398 			/* is it one bigger? */
1399 			tsn_est = at->rec.data.TSN_seq + 1;
1400 			if (tsn_est == TSN_seq) {
1401 				/* yep. It better be a last then */
1402 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1403 				    SCTP_DATA_LAST_FRAG) {
1404 					/*
1405 					 * Ok this guy belongs next to a guy
1406 					 * that is NOT last, it should be a
1407 					 * middle/last, not a complete
1408 					 * chunk.
1409 					 */
1410 					return (1);
1411 				} else {
1412 					/*
1413 					 * This guy is ok since its a LAST
1414 					 * and the new chunk is a fully
1415 					 * self- contained one.
1416 					 */
1417 					return (0);
1418 				}
1419 			}
1420 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1421 			/* Software error since I have a dup? */
1422 			return (1);
1423 		} else {
1424 			/*
1425 			 * Ok, 'at' is larger than new chunk but does it
1426 			 * need to be right before it.
1427 			 */
1428 			tsn_est = TSN_seq + 1;
1429 			if (tsn_est == at->rec.data.TSN_seq) {
1430 				/* Yep, It better be a first */
1431 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1432 				    SCTP_DATA_FIRST_FRAG) {
1433 					return (1);
1434 				} else {
1435 					return (0);
1436 				}
1437 			}
1438 		}
1439 	}
1440 	return (0);
1441 }
1442 
1443 
1444 static int
1445 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1446     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1447     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1448     int *break_flag, int last_chunk)
1449 {
1450 	/* Process a data chunk */
1451 	/* struct sctp_tmit_chunk *chk; */
1452 	struct sctp_tmit_chunk *chk;
1453 	uint32_t tsn, gap;
1454 	struct mbuf *dmbuf;
1455 	int the_len;
1456 	int need_reasm_check = 0;
1457 	uint16_t strmno, strmseq;
1458 	struct mbuf *oper;
1459 	struct sctp_queued_to_read *control;
1460 	int ordered;
1461 	uint32_t protocol_id;
1462 	uint8_t chunk_flags;
1463 	struct sctp_stream_reset_list *liste;
1464 
1465 	chk = NULL;
1466 	tsn = ntohl(ch->dp.tsn);
1467 	chunk_flags = ch->ch.chunk_flags;
1468 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1469 		asoc->send_sack = 1;
1470 	}
1471 	protocol_id = ch->dp.protocol_id;
1472 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1473 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1474 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1475 	}
1476 	if (stcb == NULL) {
1477 		return (0);
1478 	}
1479 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1480 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1481 		/* It is a duplicate */
1482 		SCTP_STAT_INCR(sctps_recvdupdata);
1483 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1484 			/* Record a dup for the next outbound sack */
1485 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1486 			asoc->numduptsns++;
1487 		}
1488 		asoc->send_sack = 1;
1489 		return (0);
1490 	}
1491 	/* Calculate the number of TSN's between the base and this TSN */
1492 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1493 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1494 		/* Can't hold the bit in the mapping at max array, toss it */
1495 		return (0);
1496 	}
1497 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1498 		SCTP_TCB_LOCK_ASSERT(stcb);
1499 		if (sctp_expand_mapping_array(asoc, gap)) {
1500 			/* Can't expand, drop it */
1501 			return (0);
1502 		}
1503 	}
1504 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1505 		*high_tsn = tsn;
1506 	}
1507 	/* See if we have received this one already */
1508 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1509 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1510 		SCTP_STAT_INCR(sctps_recvdupdata);
1511 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1512 			/* Record a dup for the next outbound sack */
1513 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1514 			asoc->numduptsns++;
1515 		}
1516 		asoc->send_sack = 1;
1517 		return (0);
1518 	}
1519 	/*
1520 	 * Check to see about the GONE flag, duplicates would cause a sack
1521 	 * to be sent up above
1522 	 */
1523 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1524 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1525 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1526 	    ) {
1527 		/*
1528 		 * wait a minute, this guy is gone, there is no longer a
1529 		 * receiver. Send peer an ABORT!
1530 		 */
1531 		struct mbuf *op_err;
1532 
1533 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1534 		sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1535 		*abort_flag = 1;
1536 		return (0);
1537 	}
1538 	/*
1539 	 * Now before going further we see if there is room. If NOT then we
1540 	 * MAY let one through only IF this TSN is the one we are waiting
1541 	 * for on a partial delivery API.
1542 	 */
1543 
1544 	/* now do the tests */
1545 	if (((asoc->cnt_on_all_streams +
1546 	    asoc->cnt_on_reasm_queue +
1547 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1548 	    (((int)asoc->my_rwnd) <= 0)) {
1549 		/*
1550 		 * When we have NO room in the rwnd we check to make sure
1551 		 * the reader is doing its job...
1552 		 */
1553 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1554 			/* some to read, wake-up */
1555 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1556 			struct socket *so;
1557 
1558 			so = SCTP_INP_SO(stcb->sctp_ep);
1559 			atomic_add_int(&stcb->asoc.refcnt, 1);
1560 			SCTP_TCB_UNLOCK(stcb);
1561 			SCTP_SOCKET_LOCK(so, 1);
1562 			SCTP_TCB_LOCK(stcb);
1563 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1564 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1565 				/* assoc was freed while we were unlocked */
1566 				SCTP_SOCKET_UNLOCK(so, 1);
1567 				return (0);
1568 			}
1569 #endif
1570 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1571 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1572 			SCTP_SOCKET_UNLOCK(so, 1);
1573 #endif
1574 		}
1575 		/* now is it in the mapping array of what we have accepted? */
1576 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1577 		    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1578 			/* Nope not in the valid range dump it */
1579 			sctp_set_rwnd(stcb, asoc);
1580 			if ((asoc->cnt_on_all_streams +
1581 			    asoc->cnt_on_reasm_queue +
1582 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1583 				SCTP_STAT_INCR(sctps_datadropchklmt);
1584 			} else {
1585 				SCTP_STAT_INCR(sctps_datadroprwnd);
1586 			}
1587 			*break_flag = 1;
1588 			return (0);
1589 		}
1590 	}
1591 	strmno = ntohs(ch->dp.stream_id);
1592 	if (strmno >= asoc->streamincnt) {
1593 		struct sctp_paramhdr *phdr;
1594 		struct mbuf *mb;
1595 
1596 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1597 		    0, M_DONTWAIT, 1, MT_DATA);
1598 		if (mb != NULL) {
1599 			/* add some space up front so prepend will work well */
1600 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1601 			phdr = mtod(mb, struct sctp_paramhdr *);
1602 			/*
1603 			 * Error causes are just param's and this one has
1604 			 * two back to back phdr, one with the error type
1605 			 * and size, the other with the streamid and a rsvd
1606 			 */
1607 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1608 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1609 			phdr->param_length =
1610 			    htons(sizeof(struct sctp_paramhdr) * 2);
1611 			phdr++;
1612 			/* We insert the stream in the type field */
1613 			phdr->param_type = ch->dp.stream_id;
1614 			/* And set the length to 0 for the rsvd field */
1615 			phdr->param_length = 0;
1616 			sctp_queue_op_err(stcb, mb);
1617 		}
1618 		SCTP_STAT_INCR(sctps_badsid);
1619 		SCTP_TCB_LOCK_ASSERT(stcb);
1620 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1621 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1622 			asoc->highest_tsn_inside_nr_map = tsn;
1623 		}
1624 		if (tsn == (asoc->cumulative_tsn + 1)) {
1625 			/* Update cum-ack */
1626 			asoc->cumulative_tsn = tsn;
1627 		}
1628 		return (0);
1629 	}
1630 	/*
1631 	 * Before we continue lets validate that we are not being fooled by
1632 	 * an evil attacker. We can only have 4k chunks based on our TSN
1633 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1634 	 * way our stream sequence numbers could have wrapped. We of course
1635 	 * only validate the FIRST fragment so the bit must be set.
1636 	 */
1637 	strmseq = ntohs(ch->dp.stream_sequence);
1638 #ifdef SCTP_ASOCLOG_OF_TSNS
1639 	SCTP_TCB_LOCK_ASSERT(stcb);
1640 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1641 		asoc->tsn_in_at = 0;
1642 		asoc->tsn_in_wrapped = 1;
1643 	}
1644 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1645 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1646 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1647 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1648 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1649 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1650 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1651 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1652 	asoc->tsn_in_at++;
1653 #endif
1654 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1655 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1656 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1657 	    SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1658 		/* The incoming sseq is behind where we last delivered? */
1659 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1660 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1661 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1662 		    0, M_DONTWAIT, 1, MT_DATA);
1663 		if (oper) {
1664 			struct sctp_paramhdr *ph;
1665 			uint32_t *ippp;
1666 
1667 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1668 			    (3 * sizeof(uint32_t));
1669 			ph = mtod(oper, struct sctp_paramhdr *);
1670 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1671 			ph->param_length = htons(SCTP_BUF_LEN(oper));
1672 			ippp = (uint32_t *) (ph + 1);
1673 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1674 			ippp++;
1675 			*ippp = tsn;
1676 			ippp++;
1677 			*ippp = ((strmno << 16) | strmseq);
1678 
1679 		}
1680 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1681 		sctp_abort_an_association(stcb->sctp_ep, stcb,
1682 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1683 		*abort_flag = 1;
1684 		return (0);
1685 	}
1686 	/************************************
1687 	 * From here down we may find ch-> invalid
1688 	 * so its a good idea NOT to use it.
1689 	 *************************************/
1690 
1691 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1692 	if (last_chunk == 0) {
1693 		dmbuf = SCTP_M_COPYM(*m,
1694 		    (offset + sizeof(struct sctp_data_chunk)),
1695 		    the_len, M_DONTWAIT);
1696 #ifdef SCTP_MBUF_LOGGING
1697 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1698 			struct mbuf *mat;
1699 
1700 			for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) {
1701 				if (SCTP_BUF_IS_EXTENDED(mat)) {
1702 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1703 				}
1704 			}
1705 		}
1706 #endif
1707 	} else {
1708 		/* We can steal the last chunk */
1709 		int l_len;
1710 
1711 		dmbuf = *m;
1712 		/* lop off the top part */
1713 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1714 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1715 			l_len = SCTP_BUF_LEN(dmbuf);
1716 		} else {
1717 			/*
1718 			 * need to count up the size hopefully does not hit
1719 			 * this to often :-0
1720 			 */
1721 			struct mbuf *lat;
1722 
1723 			l_len = 0;
1724 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1725 				l_len += SCTP_BUF_LEN(lat);
1726 			}
1727 		}
1728 		if (l_len > the_len) {
1729 			/* Trim the end round bytes off  too */
1730 			m_adj(dmbuf, -(l_len - the_len));
1731 		}
1732 	}
1733 	if (dmbuf == NULL) {
1734 		SCTP_STAT_INCR(sctps_nomem);
1735 		return (0);
1736 	}
1737 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1738 	    asoc->fragmented_delivery_inprogress == 0 &&
1739 	    TAILQ_EMPTY(&asoc->resetHead) &&
1740 	    ((ordered == 0) ||
1741 	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1742 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1743 		/* Candidate for express delivery */
1744 		/*
1745 		 * Its not fragmented, No PD-API is up, Nothing in the
1746 		 * delivery queue, Its un-ordered OR ordered and the next to
1747 		 * deliver AND nothing else is stuck on the stream queue,
1748 		 * And there is room for it in the socket buffer. Lets just
1749 		 * stuff it up the buffer....
1750 		 */
1751 
1752 		/* It would be nice to avoid this copy if we could :< */
1753 		sctp_alloc_a_readq(stcb, control);
1754 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1755 		    protocol_id,
1756 		    stcb->asoc.context,
1757 		    strmno, strmseq,
1758 		    chunk_flags,
1759 		    dmbuf);
1760 		if (control == NULL) {
1761 			goto failed_express_del;
1762 		}
1763 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1764 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1765 			asoc->highest_tsn_inside_nr_map = tsn;
1766 		}
1767 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1768 		    control, &stcb->sctp_socket->so_rcv,
1769 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1770 
1771 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1772 			/* for ordered, bump what we delivered */
1773 			asoc->strmin[strmno].last_sequence_delivered++;
1774 		}
1775 		SCTP_STAT_INCR(sctps_recvexpress);
1776 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1777 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1778 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1779 		}
1780 		control = NULL;
1781 
1782 		goto finish_express_del;
1783 	}
1784 failed_express_del:
1785 	/* If we reach here this is a new chunk */
1786 	chk = NULL;
1787 	control = NULL;
1788 	/* Express for fragmented delivery? */
1789 	if ((asoc->fragmented_delivery_inprogress) &&
1790 	    (stcb->asoc.control_pdapi) &&
1791 	    (asoc->str_of_pdapi == strmno) &&
1792 	    (asoc->ssn_of_pdapi == strmseq)
1793 	    ) {
1794 		control = stcb->asoc.control_pdapi;
1795 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1796 			/* Can't be another first? */
1797 			goto failed_pdapi_express_del;
1798 		}
1799 		if (tsn == (control->sinfo_tsn + 1)) {
1800 			/* Yep, we can add it on */
1801 			int end = 0;
1802 
1803 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1804 				end = 1;
1805 			}
1806 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1807 			    tsn,
1808 			    &stcb->sctp_socket->so_rcv)) {
1809 				SCTP_PRINTF("Append fails end:%d\n", end);
1810 				goto failed_pdapi_express_del;
1811 			}
1812 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1813 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1814 				asoc->highest_tsn_inside_nr_map = tsn;
1815 			}
1816 			SCTP_STAT_INCR(sctps_recvexpressm);
1817 			control->sinfo_tsn = tsn;
1818 			asoc->tsn_last_delivered = tsn;
1819 			asoc->fragment_flags = chunk_flags;
1820 			asoc->tsn_of_pdapi_last_delivered = tsn;
1821 			asoc->last_flags_delivered = chunk_flags;
1822 			asoc->last_strm_seq_delivered = strmseq;
1823 			asoc->last_strm_no_delivered = strmno;
1824 			if (end) {
1825 				/* clean up the flags and such */
1826 				asoc->fragmented_delivery_inprogress = 0;
1827 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1828 					asoc->strmin[strmno].last_sequence_delivered++;
1829 				}
1830 				stcb->asoc.control_pdapi = NULL;
1831 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1832 					/*
1833 					 * There could be another message
1834 					 * ready
1835 					 */
1836 					need_reasm_check = 1;
1837 				}
1838 			}
1839 			control = NULL;
1840 			goto finish_express_del;
1841 		}
1842 	}
1843 failed_pdapi_express_del:
1844 	control = NULL;
1845 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1846 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1847 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1848 			asoc->highest_tsn_inside_nr_map = tsn;
1849 		}
1850 	} else {
1851 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1852 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1853 			asoc->highest_tsn_inside_map = tsn;
1854 		}
1855 	}
1856 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1857 		sctp_alloc_a_chunk(stcb, chk);
1858 		if (chk == NULL) {
1859 			/* No memory so we drop the chunk */
1860 			SCTP_STAT_INCR(sctps_nomem);
1861 			if (last_chunk == 0) {
1862 				/* we copied it, free the copy */
1863 				sctp_m_freem(dmbuf);
1864 			}
1865 			return (0);
1866 		}
1867 		chk->rec.data.TSN_seq = tsn;
1868 		chk->no_fr_allowed = 0;
1869 		chk->rec.data.stream_seq = strmseq;
1870 		chk->rec.data.stream_number = strmno;
1871 		chk->rec.data.payloadtype = protocol_id;
1872 		chk->rec.data.context = stcb->asoc.context;
1873 		chk->rec.data.doing_fast_retransmit = 0;
1874 		chk->rec.data.rcv_flags = chunk_flags;
1875 		chk->asoc = asoc;
1876 		chk->send_size = the_len;
1877 		chk->whoTo = net;
1878 		atomic_add_int(&net->ref_count, 1);
1879 		chk->data = dmbuf;
1880 	} else {
1881 		sctp_alloc_a_readq(stcb, control);
1882 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1883 		    protocol_id,
1884 		    stcb->asoc.context,
1885 		    strmno, strmseq,
1886 		    chunk_flags,
1887 		    dmbuf);
1888 		if (control == NULL) {
1889 			/* No memory so we drop the chunk */
1890 			SCTP_STAT_INCR(sctps_nomem);
1891 			if (last_chunk == 0) {
1892 				/* we copied it, free the copy */
1893 				sctp_m_freem(dmbuf);
1894 			}
1895 			return (0);
1896 		}
1897 		control->length = the_len;
1898 	}
1899 
1900 	/* Mark it as received */
1901 	/* Now queue it where it belongs */
1902 	if (control != NULL) {
1903 		/* First a sanity check */
1904 		if (asoc->fragmented_delivery_inprogress) {
1905 			/*
1906 			 * Ok, we have a fragmented delivery in progress if
1907 			 * this chunk is next to deliver OR belongs in our
1908 			 * view to the reassembly, the peer is evil or
1909 			 * broken.
1910 			 */
1911 			uint32_t estimate_tsn;
1912 
1913 			estimate_tsn = asoc->tsn_last_delivered + 1;
1914 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1915 			    (estimate_tsn == control->sinfo_tsn)) {
1916 				/* Evil/Broke peer */
1917 				sctp_m_freem(control->data);
1918 				control->data = NULL;
1919 				if (control->whoFrom) {
1920 					sctp_free_remote_addr(control->whoFrom);
1921 					control->whoFrom = NULL;
1922 				}
1923 				sctp_free_a_readq(stcb, control);
1924 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1925 				    0, M_DONTWAIT, 1, MT_DATA);
1926 				if (oper) {
1927 					struct sctp_paramhdr *ph;
1928 					uint32_t *ippp;
1929 
1930 					SCTP_BUF_LEN(oper) =
1931 					    sizeof(struct sctp_paramhdr) +
1932 					    (3 * sizeof(uint32_t));
1933 					ph = mtod(oper, struct sctp_paramhdr *);
1934 					ph->param_type =
1935 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1936 					ph->param_length = htons(SCTP_BUF_LEN(oper));
1937 					ippp = (uint32_t *) (ph + 1);
1938 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1939 					ippp++;
1940 					*ippp = tsn;
1941 					ippp++;
1942 					*ippp = ((strmno << 16) | strmseq);
1943 				}
1944 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1945 				sctp_abort_an_association(stcb->sctp_ep, stcb,
1946 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1947 
1948 				*abort_flag = 1;
1949 				return (0);
1950 			} else {
1951 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1952 					sctp_m_freem(control->data);
1953 					control->data = NULL;
1954 					if (control->whoFrom) {
1955 						sctp_free_remote_addr(control->whoFrom);
1956 						control->whoFrom = NULL;
1957 					}
1958 					sctp_free_a_readq(stcb, control);
1959 
1960 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1961 					    0, M_DONTWAIT, 1, MT_DATA);
1962 					if (oper) {
1963 						struct sctp_paramhdr *ph;
1964 						uint32_t *ippp;
1965 
1966 						SCTP_BUF_LEN(oper) =
1967 						    sizeof(struct sctp_paramhdr) +
1968 						    (3 * sizeof(uint32_t));
1969 						ph = mtod(oper,
1970 						    struct sctp_paramhdr *);
1971 						ph->param_type =
1972 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1973 						ph->param_length =
1974 						    htons(SCTP_BUF_LEN(oper));
1975 						ippp = (uint32_t *) (ph + 1);
1976 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1977 						ippp++;
1978 						*ippp = tsn;
1979 						ippp++;
1980 						*ippp = ((strmno << 16) | strmseq);
1981 					}
1982 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1983 					sctp_abort_an_association(stcb->sctp_ep,
1984 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1985 
1986 					*abort_flag = 1;
1987 					return (0);
1988 				}
1989 			}
1990 		} else {
1991 			/* No PDAPI running */
1992 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1993 				/*
1994 				 * Reassembly queue is NOT empty validate
1995 				 * that this tsn does not need to be in
1996 				 * reasembly queue. If it does then our peer
1997 				 * is broken or evil.
1998 				 */
1999 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2000 					sctp_m_freem(control->data);
2001 					control->data = NULL;
2002 					if (control->whoFrom) {
2003 						sctp_free_remote_addr(control->whoFrom);
2004 						control->whoFrom = NULL;
2005 					}
2006 					sctp_free_a_readq(stcb, control);
2007 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2008 					    0, M_DONTWAIT, 1, MT_DATA);
2009 					if (oper) {
2010 						struct sctp_paramhdr *ph;
2011 						uint32_t *ippp;
2012 
2013 						SCTP_BUF_LEN(oper) =
2014 						    sizeof(struct sctp_paramhdr) +
2015 						    (3 * sizeof(uint32_t));
2016 						ph = mtod(oper,
2017 						    struct sctp_paramhdr *);
2018 						ph->param_type =
2019 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2020 						ph->param_length =
2021 						    htons(SCTP_BUF_LEN(oper));
2022 						ippp = (uint32_t *) (ph + 1);
2023 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2024 						ippp++;
2025 						*ippp = tsn;
2026 						ippp++;
2027 						*ippp = ((strmno << 16) | strmseq);
2028 					}
2029 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2030 					sctp_abort_an_association(stcb->sctp_ep,
2031 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2032 
2033 					*abort_flag = 1;
2034 					return (0);
2035 				}
2036 			}
2037 		}
2038 		/* ok, if we reach here we have passed the sanity checks */
2039 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2040 			/* queue directly into socket buffer */
2041 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2042 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2043 			    control,
2044 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2045 		} else {
2046 			/*
2047 			 * Special check for when streams are resetting. We
2048 			 * could be more smart about this and check the
2049 			 * actual stream to see if it is not being reset..
2050 			 * that way we would not create a HOLB when amongst
2051 			 * streams being reset and those not being reset.
2052 			 *
2053 			 * We take complete messages that have a stream reset
2054 			 * intervening (aka the TSN is after where our
2055 			 * cum-ack needs to be) off and put them on a
2056 			 * pending_reply_queue. The reassembly ones we do
2057 			 * not have to worry about since they are all sorted
2058 			 * and proceessed by TSN order. It is only the
2059 			 * singletons I must worry about.
2060 			 */
2061 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2062 			    SCTP_TSN_GT(tsn, liste->tsn)) {
2063 				/*
2064 				 * yep its past where we need to reset... go
2065 				 * ahead and queue it.
2066 				 */
2067 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2068 					/* first one on */
2069 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2070 				} else {
2071 					struct sctp_queued_to_read *ctlOn,
2072 					                   *nctlOn;
2073 					unsigned char inserted = 0;
2074 
2075 					TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2076 						if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2077 							continue;
2078 						} else {
2079 							/* found it */
2080 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2081 							inserted = 1;
2082 							break;
2083 						}
2084 					}
2085 					if (inserted == 0) {
2086 						/*
2087 						 * must be put at end, use
2088 						 * prevP (all setup from
2089 						 * loop) to setup nextP.
2090 						 */
2091 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2092 					}
2093 				}
2094 			} else {
2095 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2096 				if (*abort_flag) {
2097 					return (0);
2098 				}
2099 			}
2100 		}
2101 	} else {
2102 		/* Into the re-assembly queue */
2103 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2104 		if (*abort_flag) {
2105 			/*
2106 			 * the assoc is now gone and chk was put onto the
2107 			 * reasm queue, which has all been freed.
2108 			 */
2109 			*m = NULL;
2110 			return (0);
2111 		}
2112 	}
2113 finish_express_del:
2114 	if (tsn == (asoc->cumulative_tsn + 1)) {
2115 		/* Update cum-ack */
2116 		asoc->cumulative_tsn = tsn;
2117 	}
2118 	if (last_chunk) {
2119 		*m = NULL;
2120 	}
2121 	if (ordered) {
2122 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2123 	} else {
2124 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2125 	}
2126 	SCTP_STAT_INCR(sctps_recvdata);
2127 	/* Set it present please */
2128 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2129 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2130 	}
2131 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2132 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2133 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2134 	}
2135 	/* check the special flag for stream resets */
2136 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2137 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2138 		/*
2139 		 * we have finished working through the backlogged TSN's now
2140 		 * time to reset streams. 1: call reset function. 2: free
2141 		 * pending_reply space 3: distribute any chunks in
2142 		 * pending_reply_queue.
2143 		 */
2144 		struct sctp_queued_to_read *ctl, *nctl;
2145 
2146 		sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2147 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2148 		SCTP_FREE(liste, SCTP_M_STRESET);
2149 		/* sa_ignore FREED_MEMORY */
2150 		liste = TAILQ_FIRST(&asoc->resetHead);
2151 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2152 			/* All can be removed */
2153 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2154 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2155 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2156 				if (*abort_flag) {
2157 					return (0);
2158 				}
2159 			}
2160 		} else {
2161 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2162 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2163 					break;
2164 				}
2165 				/*
2166 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2167 				 * process it which is the NOT of
2168 				 * ctl->sinfo_tsn > liste->tsn
2169 				 */
2170 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2171 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2172 				if (*abort_flag) {
2173 					return (0);
2174 				}
2175 			}
2176 		}
2177 		/*
2178 		 * Now service re-assembly to pick up anything that has been
2179 		 * held on reassembly queue?
2180 		 */
2181 		sctp_deliver_reasm_check(stcb, asoc);
2182 		need_reasm_check = 0;
2183 	}
2184 	if (need_reasm_check) {
2185 		/* Another one waits ? */
2186 		sctp_deliver_reasm_check(stcb, asoc);
2187 	}
2188 	return (1);
2189 }
2190 
2191 int8_t sctp_map_lookup_tab[256] = {
2192 	0, 1, 0, 2, 0, 1, 0, 3,
2193 	0, 1, 0, 2, 0, 1, 0, 4,
2194 	0, 1, 0, 2, 0, 1, 0, 3,
2195 	0, 1, 0, 2, 0, 1, 0, 5,
2196 	0, 1, 0, 2, 0, 1, 0, 3,
2197 	0, 1, 0, 2, 0, 1, 0, 4,
2198 	0, 1, 0, 2, 0, 1, 0, 3,
2199 	0, 1, 0, 2, 0, 1, 0, 6,
2200 	0, 1, 0, 2, 0, 1, 0, 3,
2201 	0, 1, 0, 2, 0, 1, 0, 4,
2202 	0, 1, 0, 2, 0, 1, 0, 3,
2203 	0, 1, 0, 2, 0, 1, 0, 5,
2204 	0, 1, 0, 2, 0, 1, 0, 3,
2205 	0, 1, 0, 2, 0, 1, 0, 4,
2206 	0, 1, 0, 2, 0, 1, 0, 3,
2207 	0, 1, 0, 2, 0, 1, 0, 7,
2208 	0, 1, 0, 2, 0, 1, 0, 3,
2209 	0, 1, 0, 2, 0, 1, 0, 4,
2210 	0, 1, 0, 2, 0, 1, 0, 3,
2211 	0, 1, 0, 2, 0, 1, 0, 5,
2212 	0, 1, 0, 2, 0, 1, 0, 3,
2213 	0, 1, 0, 2, 0, 1, 0, 4,
2214 	0, 1, 0, 2, 0, 1, 0, 3,
2215 	0, 1, 0, 2, 0, 1, 0, 6,
2216 	0, 1, 0, 2, 0, 1, 0, 3,
2217 	0, 1, 0, 2, 0, 1, 0, 4,
2218 	0, 1, 0, 2, 0, 1, 0, 3,
2219 	0, 1, 0, 2, 0, 1, 0, 5,
2220 	0, 1, 0, 2, 0, 1, 0, 3,
2221 	0, 1, 0, 2, 0, 1, 0, 4,
2222 	0, 1, 0, 2, 0, 1, 0, 3,
2223 	0, 1, 0, 2, 0, 1, 0, 8
2224 };
2225 
2226 
2227 void
2228 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2229 {
2230 	/*
2231 	 * Now we also need to check the mapping array in a couple of ways.
2232 	 * 1) Did we move the cum-ack point?
2233 	 *
2234 	 * When you first glance at this you might think that all entries that
2235 	 * make up the postion of the cum-ack would be in the nr-mapping
2236 	 * array only.. i.e. things up to the cum-ack are always
2237 	 * deliverable. Thats true with one exception, when its a fragmented
2238 	 * message we may not deliver the data until some threshold (or all
2239 	 * of it) is in place. So we must OR the nr_mapping_array and
2240 	 * mapping_array to get a true picture of the cum-ack.
2241 	 */
2242 	struct sctp_association *asoc;
2243 	int at;
2244 	uint8_t val;
2245 	int slide_from, slide_end, lgap, distance;
2246 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2247 
2248 	asoc = &stcb->asoc;
2249 
2250 	old_cumack = asoc->cumulative_tsn;
2251 	old_base = asoc->mapping_array_base_tsn;
2252 	old_highest = asoc->highest_tsn_inside_map;
2253 	/*
2254 	 * We could probably improve this a small bit by calculating the
2255 	 * offset of the current cum-ack as the starting point.
2256 	 */
2257 	at = 0;
2258 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2259 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2260 		if (val == 0xff) {
2261 			at += 8;
2262 		} else {
2263 			/* there is a 0 bit */
2264 			at += sctp_map_lookup_tab[val];
2265 			break;
2266 		}
2267 	}
2268 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2269 
2270 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2271 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2272 #ifdef INVARIANTS
2273 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2274 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2275 #else
2276 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2277 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2278 		sctp_print_mapping_array(asoc);
2279 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2280 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2281 		}
2282 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2283 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2284 #endif
2285 	}
2286 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2287 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2288 	} else {
2289 		highest_tsn = asoc->highest_tsn_inside_map;
2290 	}
2291 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2292 		/* The complete array was completed by a single FR */
2293 		/* highest becomes the cum-ack */
2294 		int clr;
2295 
2296 #ifdef INVARIANTS
2297 		unsigned int i;
2298 
2299 #endif
2300 
2301 		/* clear the array */
2302 		clr = ((at + 7) >> 3);
2303 		if (clr > asoc->mapping_array_size) {
2304 			clr = asoc->mapping_array_size;
2305 		}
2306 		memset(asoc->mapping_array, 0, clr);
2307 		memset(asoc->nr_mapping_array, 0, clr);
2308 #ifdef INVARIANTS
2309 		for (i = 0; i < asoc->mapping_array_size; i++) {
2310 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2311 				printf("Error Mapping array's not clean at clear\n");
2312 				sctp_print_mapping_array(asoc);
2313 			}
2314 		}
2315 #endif
2316 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2317 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2318 	} else if (at >= 8) {
2319 		/* we can slide the mapping array down */
2320 		/* slide_from holds where we hit the first NON 0xff byte */
2321 
2322 		/*
2323 		 * now calculate the ceiling of the move using our highest
2324 		 * TSN value
2325 		 */
2326 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2327 		slide_end = (lgap >> 3);
2328 		if (slide_end < slide_from) {
2329 			sctp_print_mapping_array(asoc);
2330 #ifdef INVARIANTS
2331 			panic("impossible slide");
2332 #else
2333 			printf("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2334 			    lgap, slide_end, slide_from, at);
2335 			return;
2336 #endif
2337 		}
2338 		if (slide_end > asoc->mapping_array_size) {
2339 #ifdef INVARIANTS
2340 			panic("would overrun buffer");
2341 #else
2342 			printf("Gak, would have overrun map end:%d slide_end:%d\n",
2343 			    asoc->mapping_array_size, slide_end);
2344 			slide_end = asoc->mapping_array_size;
2345 #endif
2346 		}
2347 		distance = (slide_end - slide_from) + 1;
2348 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2349 			sctp_log_map(old_base, old_cumack, old_highest,
2350 			    SCTP_MAP_PREPARE_SLIDE);
2351 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2352 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2353 		}
2354 		if (distance + slide_from > asoc->mapping_array_size ||
2355 		    distance < 0) {
2356 			/*
2357 			 * Here we do NOT slide forward the array so that
2358 			 * hopefully when more data comes in to fill it up
2359 			 * we will be able to slide it forward. Really I
2360 			 * don't think this should happen :-0
2361 			 */
2362 
2363 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2364 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2365 				    (uint32_t) asoc->mapping_array_size,
2366 				    SCTP_MAP_SLIDE_NONE);
2367 			}
2368 		} else {
2369 			int ii;
2370 
2371 			for (ii = 0; ii < distance; ii++) {
2372 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2373 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2374 
2375 			}
2376 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2377 				asoc->mapping_array[ii] = 0;
2378 				asoc->nr_mapping_array[ii] = 0;
2379 			}
2380 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2381 				asoc->highest_tsn_inside_map += (slide_from << 3);
2382 			}
2383 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2384 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2385 			}
2386 			asoc->mapping_array_base_tsn += (slide_from << 3);
2387 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2388 				sctp_log_map(asoc->mapping_array_base_tsn,
2389 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2390 				    SCTP_MAP_SLIDE_RESULT);
2391 			}
2392 		}
2393 	}
2394 }
2395 
2396 void
2397 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2398 {
2399 	struct sctp_association *asoc;
2400 	uint32_t highest_tsn;
2401 
2402 	asoc = &stcb->asoc;
2403 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2404 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2405 	} else {
2406 		highest_tsn = asoc->highest_tsn_inside_map;
2407 	}
2408 
2409 	/*
2410 	 * Now we need to see if we need to queue a sack or just start the
2411 	 * timer (if allowed).
2412 	 */
2413 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2414 		/*
2415 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2416 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2417 		 * SACK
2418 		 */
2419 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2420 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2421 			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2422 		}
2423 		sctp_send_shutdown(stcb,
2424 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2425 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2426 	} else {
2427 		int is_a_gap;
2428 
2429 		/* is there a gap now ? */
2430 		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2431 
2432 		/*
2433 		 * CMT DAC algorithm: increase number of packets received
2434 		 * since last ack
2435 		 */
2436 		stcb->asoc.cmt_dac_pkts_rcvd++;
2437 
2438 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2439 							 * SACK */
2440 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2441 							 * longer is one */
2442 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2443 		    (is_a_gap) ||	/* is still a gap */
2444 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2445 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2446 		    ) {
2447 
2448 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2449 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2450 			    (stcb->asoc.send_sack == 0) &&
2451 			    (stcb->asoc.numduptsns == 0) &&
2452 			    (stcb->asoc.delayed_ack) &&
2453 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2454 
2455 				/*
2456 				 * CMT DAC algorithm: With CMT, delay acks
2457 				 * even in the face of
2458 				 *
2459 				 * reordering. Therefore, if acks that do not
2460 				 * have to be sent because of the above
2461 				 * reasons, will be delayed. That is, acks
2462 				 * that would have been sent due to gap
2463 				 * reports will be delayed with DAC. Start
2464 				 * the delayed ack timer.
2465 				 */
2466 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2467 				    stcb->sctp_ep, stcb, NULL);
2468 			} else {
2469 				/*
2470 				 * Ok we must build a SACK since the timer
2471 				 * is pending, we got our first packet OR
2472 				 * there are gaps or duplicates.
2473 				 */
2474 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2475 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2476 			}
2477 		} else {
2478 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2479 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2480 				    stcb->sctp_ep, stcb, NULL);
2481 			}
2482 		}
2483 	}
2484 }
2485 
2486 void
2487 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2488 {
2489 	struct sctp_tmit_chunk *chk;
2490 	uint32_t tsize, pd_point;
2491 	uint16_t nxt_todel;
2492 
2493 	if (asoc->fragmented_delivery_inprogress) {
2494 		sctp_service_reassembly(stcb, asoc);
2495 	}
2496 	/* Can we proceed further, i.e. the PD-API is complete */
2497 	if (asoc->fragmented_delivery_inprogress) {
2498 		/* no */
2499 		return;
2500 	}
2501 	/*
2502 	 * Now is there some other chunk I can deliver from the reassembly
2503 	 * queue.
2504 	 */
2505 doit_again:
2506 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2507 	if (chk == NULL) {
2508 		asoc->size_on_reasm_queue = 0;
2509 		asoc->cnt_on_reasm_queue = 0;
2510 		return;
2511 	}
2512 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2513 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2514 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2515 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2516 		/*
2517 		 * Yep the first one is here. We setup to start reception,
2518 		 * by backing down the TSN just in case we can't deliver.
2519 		 */
2520 
2521 		/*
2522 		 * Before we start though either all of the message should
2523 		 * be here or the socket buffer max or nothing on the
2524 		 * delivery queue and something can be delivered.
2525 		 */
2526 		if (stcb->sctp_socket) {
2527 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2528 			    stcb->sctp_ep->partial_delivery_point);
2529 		} else {
2530 			pd_point = stcb->sctp_ep->partial_delivery_point;
2531 		}
2532 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2533 			asoc->fragmented_delivery_inprogress = 1;
2534 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2535 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2536 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2537 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2538 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2539 			sctp_service_reassembly(stcb, asoc);
2540 			if (asoc->fragmented_delivery_inprogress == 0) {
2541 				goto doit_again;
2542 			}
2543 		}
2544 	}
2545 }
2546 
2547 int
2548 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2549     struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2550     struct sctp_nets *net, uint32_t * high_tsn)
2551 {
2552 	struct sctp_data_chunk *ch, chunk_buf;
2553 	struct sctp_association *asoc;
2554 	int num_chunks = 0;	/* number of control chunks processed */
2555 	int stop_proc = 0;
2556 	int chk_length, break_flag, last_chunk;
2557 	int abort_flag = 0, was_a_gap;
2558 	struct mbuf *m;
2559 	uint32_t highest_tsn;
2560 
2561 	/* set the rwnd */
2562 	sctp_set_rwnd(stcb, &stcb->asoc);
2563 
2564 	m = *mm;
2565 	SCTP_TCB_LOCK_ASSERT(stcb);
2566 	asoc = &stcb->asoc;
2567 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2568 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2569 	} else {
2570 		highest_tsn = asoc->highest_tsn_inside_map;
2571 	}
2572 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2573 	/*
2574 	 * setup where we got the last DATA packet from for any SACK that
2575 	 * may need to go out. Don't bump the net. This is done ONLY when a
2576 	 * chunk is assigned.
2577 	 */
2578 	asoc->last_data_chunk_from = net;
2579 
2580 	/*-
2581 	 * Now before we proceed we must figure out if this is a wasted
2582 	 * cluster... i.e. it is a small packet sent in and yet the driver
2583 	 * underneath allocated a full cluster for it. If so we must copy it
2584 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2585 	 * with cluster starvation. Note for __Panda__ we don't do this
2586 	 * since it has clusters all the way down to 64 bytes.
2587 	 */
2588 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2589 		/* we only handle mbufs that are singletons.. not chains */
2590 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2591 		if (m) {
2592 			/* ok lets see if we can copy the data up */
2593 			caddr_t *from, *to;
2594 
2595 			/* get the pointers and copy */
2596 			to = mtod(m, caddr_t *);
2597 			from = mtod((*mm), caddr_t *);
2598 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2599 			/* copy the length and free up the old */
2600 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2601 			sctp_m_freem(*mm);
2602 			/* sucess, back copy */
2603 			*mm = m;
2604 		} else {
2605 			/* We are in trouble in the mbuf world .. yikes */
2606 			m = *mm;
2607 		}
2608 	}
2609 	/* get pointer to the first chunk header */
2610 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2611 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2612 	if (ch == NULL) {
2613 		return (1);
2614 	}
2615 	/*
2616 	 * process all DATA chunks...
2617 	 */
2618 	*high_tsn = asoc->cumulative_tsn;
2619 	break_flag = 0;
2620 	asoc->data_pkts_seen++;
2621 	while (stop_proc == 0) {
2622 		/* validate chunk length */
2623 		chk_length = ntohs(ch->ch.chunk_length);
2624 		if (length - *offset < chk_length) {
2625 			/* all done, mutulated chunk */
2626 			stop_proc = 1;
2627 			continue;
2628 		}
2629 		if (ch->ch.chunk_type == SCTP_DATA) {
2630 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2631 				/*
2632 				 * Need to send an abort since we had a
2633 				 * invalid data chunk.
2634 				 */
2635 				struct mbuf *op_err;
2636 
2637 				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2638 				    0, M_DONTWAIT, 1, MT_DATA);
2639 
2640 				if (op_err) {
2641 					struct sctp_paramhdr *ph;
2642 					uint32_t *ippp;
2643 
2644 					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2645 					    (2 * sizeof(uint32_t));
2646 					ph = mtod(op_err, struct sctp_paramhdr *);
2647 					ph->param_type =
2648 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2649 					ph->param_length = htons(SCTP_BUF_LEN(op_err));
2650 					ippp = (uint32_t *) (ph + 1);
2651 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2652 					ippp++;
2653 					*ippp = asoc->cumulative_tsn;
2654 
2655 				}
2656 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2657 				sctp_abort_association(inp, stcb, m, iphlen, sh,
2658 				    op_err, 0, net->port);
2659 				return (2);
2660 			}
2661 #ifdef SCTP_AUDITING_ENABLED
2662 			sctp_audit_log(0xB1, 0);
2663 #endif
2664 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2665 				last_chunk = 1;
2666 			} else {
2667 				last_chunk = 0;
2668 			}
2669 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2670 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2671 			    last_chunk)) {
2672 				num_chunks++;
2673 			}
2674 			if (abort_flag)
2675 				return (2);
2676 
2677 			if (break_flag) {
2678 				/*
2679 				 * Set because of out of rwnd space and no
2680 				 * drop rep space left.
2681 				 */
2682 				stop_proc = 1;
2683 				continue;
2684 			}
2685 		} else {
2686 			/* not a data chunk in the data region */
2687 			switch (ch->ch.chunk_type) {
2688 			case SCTP_INITIATION:
2689 			case SCTP_INITIATION_ACK:
2690 			case SCTP_SELECTIVE_ACK:
2691 			case SCTP_NR_SELECTIVE_ACK:
2692 			case SCTP_HEARTBEAT_REQUEST:
2693 			case SCTP_HEARTBEAT_ACK:
2694 			case SCTP_ABORT_ASSOCIATION:
2695 			case SCTP_SHUTDOWN:
2696 			case SCTP_SHUTDOWN_ACK:
2697 			case SCTP_OPERATION_ERROR:
2698 			case SCTP_COOKIE_ECHO:
2699 			case SCTP_COOKIE_ACK:
2700 			case SCTP_ECN_ECHO:
2701 			case SCTP_ECN_CWR:
2702 			case SCTP_SHUTDOWN_COMPLETE:
2703 			case SCTP_AUTHENTICATION:
2704 			case SCTP_ASCONF_ACK:
2705 			case SCTP_PACKET_DROPPED:
2706 			case SCTP_STREAM_RESET:
2707 			case SCTP_FORWARD_CUM_TSN:
2708 			case SCTP_ASCONF:
2709 				/*
2710 				 * Now, what do we do with KNOWN chunks that
2711 				 * are NOT in the right place?
2712 				 *
2713 				 * For now, I do nothing but ignore them. We
2714 				 * may later want to add sysctl stuff to
2715 				 * switch out and do either an ABORT() or
2716 				 * possibly process them.
2717 				 */
2718 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2719 					struct mbuf *op_err;
2720 
2721 					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2722 					sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
2723 					return (2);
2724 				}
2725 				break;
2726 			default:
2727 				/* unknown chunk type, use bit rules */
2728 				if (ch->ch.chunk_type & 0x40) {
2729 					/* Add a error report to the queue */
2730 					struct mbuf *merr;
2731 					struct sctp_paramhdr *phd;
2732 
2733 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2734 					if (merr) {
2735 						phd = mtod(merr, struct sctp_paramhdr *);
2736 						/*
2737 						 * We cheat and use param
2738 						 * type since we did not
2739 						 * bother to define a error
2740 						 * cause struct. They are
2741 						 * the same basic format
2742 						 * with different names.
2743 						 */
2744 						phd->param_type =
2745 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2746 						phd->param_length =
2747 						    htons(chk_length + sizeof(*phd));
2748 						SCTP_BUF_LEN(merr) = sizeof(*phd);
2749 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
2750 						    SCTP_SIZE32(chk_length),
2751 						    M_DONTWAIT);
2752 						if (SCTP_BUF_NEXT(merr)) {
2753 							sctp_queue_op_err(stcb, merr);
2754 						} else {
2755 							sctp_m_freem(merr);
2756 						}
2757 					}
2758 				}
2759 				if ((ch->ch.chunk_type & 0x80) == 0) {
2760 					/* discard the rest of this packet */
2761 					stop_proc = 1;
2762 				}	/* else skip this bad chunk and
2763 					 * continue... */
2764 				break;
2765 			}	/* switch of chunk type */
2766 		}
2767 		*offset += SCTP_SIZE32(chk_length);
2768 		if ((*offset >= length) || stop_proc) {
2769 			/* no more data left in the mbuf chain */
2770 			stop_proc = 1;
2771 			continue;
2772 		}
2773 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2774 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2775 		if (ch == NULL) {
2776 			*offset = length;
2777 			stop_proc = 1;
2778 			continue;
2779 		}
2780 	}
2781 	if (break_flag) {
2782 		/*
2783 		 * we need to report rwnd overrun drops.
2784 		 */
2785 		sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2786 	}
2787 	if (num_chunks) {
2788 		/*
2789 		 * Did we get data, if so update the time for auto-close and
2790 		 * give peer credit for being alive.
2791 		 */
2792 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2793 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2794 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2795 			    stcb->asoc.overall_error_count,
2796 			    0,
2797 			    SCTP_FROM_SCTP_INDATA,
2798 			    __LINE__);
2799 		}
2800 		stcb->asoc.overall_error_count = 0;
2801 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2802 	}
2803 	/* now service all of the reassm queue if needed */
2804 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2805 		sctp_service_queues(stcb, asoc);
2806 
2807 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2808 		/* Assure that we ack right away */
2809 		stcb->asoc.send_sack = 1;
2810 	}
2811 	/* Start a sack timer or QUEUE a SACK for sending */
2812 	sctp_sack_check(stcb, was_a_gap);
2813 	return (0);
2814 }
2815 
2816 static int
2817 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2818     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2819     int *num_frs,
2820     uint32_t * biggest_newly_acked_tsn,
2821     uint32_t * this_sack_lowest_newack,
2822     int *rto_ok)
2823 {
2824 	struct sctp_tmit_chunk *tp1;
2825 	unsigned int theTSN;
2826 	int j, wake_him = 0, circled = 0;
2827 
2828 	/* Recover the tp1 we last saw */
2829 	tp1 = *p_tp1;
2830 	if (tp1 == NULL) {
2831 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2832 	}
2833 	for (j = frag_strt; j <= frag_end; j++) {
2834 		theTSN = j + last_tsn;
2835 		while (tp1) {
2836 			if (tp1->rec.data.doing_fast_retransmit)
2837 				(*num_frs) += 1;
2838 
2839 			/*-
2840 			 * CMT: CUCv2 algorithm. For each TSN being
2841 			 * processed from the sent queue, track the
2842 			 * next expected pseudo-cumack, or
2843 			 * rtx_pseudo_cumack, if required. Separate
2844 			 * cumack trackers for first transmissions,
2845 			 * and retransmissions.
2846 			 */
2847 			if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2848 			    (tp1->snd_count == 1)) {
2849 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2850 				tp1->whoTo->find_pseudo_cumack = 0;
2851 			}
2852 			if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2853 			    (tp1->snd_count > 1)) {
2854 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2855 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2856 			}
2857 			if (tp1->rec.data.TSN_seq == theTSN) {
2858 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2859 					/*-
2860 					 * must be held until
2861 					 * cum-ack passes
2862 					 */
2863 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2864 						/*-
2865 						 * If it is less than RESEND, it is
2866 						 * now no-longer in flight.
2867 						 * Higher values may already be set
2868 						 * via previous Gap Ack Blocks...
2869 						 * i.e. ACKED or RESEND.
2870 						 */
2871 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2872 						    *biggest_newly_acked_tsn)) {
2873 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2874 						}
2875 						/*-
2876 						 * CMT: SFR algo (and HTNA) - set
2877 						 * saw_newack to 1 for dest being
2878 						 * newly acked. update
2879 						 * this_sack_highest_newack if
2880 						 * appropriate.
2881 						 */
2882 						if (tp1->rec.data.chunk_was_revoked == 0)
2883 							tp1->whoTo->saw_newack = 1;
2884 
2885 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2886 						    tp1->whoTo->this_sack_highest_newack)) {
2887 							tp1->whoTo->this_sack_highest_newack =
2888 							    tp1->rec.data.TSN_seq;
2889 						}
2890 						/*-
2891 						 * CMT DAC algo: also update
2892 						 * this_sack_lowest_newack
2893 						 */
2894 						if (*this_sack_lowest_newack == 0) {
2895 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2896 								sctp_log_sack(*this_sack_lowest_newack,
2897 								    last_tsn,
2898 								    tp1->rec.data.TSN_seq,
2899 								    0,
2900 								    0,
2901 								    SCTP_LOG_TSN_ACKED);
2902 							}
2903 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2904 						}
2905 						/*-
2906 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2907 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2908 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2909 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2910 						 * Separate pseudo_cumack trackers for first transmissions and
2911 						 * retransmissions.
2912 						 */
2913 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2914 							if (tp1->rec.data.chunk_was_revoked == 0) {
2915 								tp1->whoTo->new_pseudo_cumack = 1;
2916 							}
2917 							tp1->whoTo->find_pseudo_cumack = 1;
2918 						}
2919 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2920 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2921 						}
2922 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2923 							if (tp1->rec.data.chunk_was_revoked == 0) {
2924 								tp1->whoTo->new_pseudo_cumack = 1;
2925 							}
2926 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2927 						}
2928 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2929 							sctp_log_sack(*biggest_newly_acked_tsn,
2930 							    last_tsn,
2931 							    tp1->rec.data.TSN_seq,
2932 							    frag_strt,
2933 							    frag_end,
2934 							    SCTP_LOG_TSN_ACKED);
2935 						}
2936 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2937 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2938 							    tp1->whoTo->flight_size,
2939 							    tp1->book_size,
2940 							    (uintptr_t) tp1->whoTo,
2941 							    tp1->rec.data.TSN_seq);
2942 						}
2943 						sctp_flight_size_decrease(tp1);
2944 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2945 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2946 							    tp1);
2947 						}
2948 						sctp_total_flight_decrease(stcb, tp1);
2949 
2950 						tp1->whoTo->net_ack += tp1->send_size;
2951 						if (tp1->snd_count < 2) {
2952 							/*-
2953 							 * True non-retransmited chunk
2954 							 */
2955 							tp1->whoTo->net_ack2 += tp1->send_size;
2956 
2957 							/*-
2958 							 * update RTO too ?
2959 							 */
2960 							if (tp1->do_rtt) {
2961 								if (*rto_ok) {
2962 									tp1->whoTo->RTO =
2963 									    sctp_calculate_rto(stcb,
2964 									    &stcb->asoc,
2965 									    tp1->whoTo,
2966 									    &tp1->sent_rcv_time,
2967 									    sctp_align_safe_nocopy,
2968 									    SCTP_RTT_FROM_DATA);
2969 									*rto_ok = 0;
2970 								}
2971 								if (tp1->whoTo->rto_needed == 0) {
2972 									tp1->whoTo->rto_needed = 1;
2973 								}
2974 								tp1->do_rtt = 0;
2975 							}
2976 						}
2977 					}
2978 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2979 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2980 						    stcb->asoc.this_sack_highest_gap)) {
2981 							stcb->asoc.this_sack_highest_gap =
2982 							    tp1->rec.data.TSN_seq;
2983 						}
2984 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2985 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2986 #ifdef SCTP_AUDITING_ENABLED
2987 							sctp_audit_log(0xB2,
2988 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2989 #endif
2990 						}
2991 					}
2992 					/*-
2993 					 * All chunks NOT UNSENT fall through here and are marked
2994 					 * (leave PR-SCTP ones that are to skip alone though)
2995 					 */
2996 					if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
2997 						tp1->sent = SCTP_DATAGRAM_MARKED;
2998 
2999 					if (tp1->rec.data.chunk_was_revoked) {
3000 						/* deflate the cwnd */
3001 						tp1->whoTo->cwnd -= tp1->book_size;
3002 						tp1->rec.data.chunk_was_revoked = 0;
3003 					}
3004 					/* NR Sack code here */
3005 					if (nr_sacking) {
3006 						if (tp1->data) {
3007 							/*
3008 							 * sa_ignore
3009 							 * NO_NULL_CHK
3010 							 */
3011 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3012 							sctp_m_freem(tp1->data);
3013 							tp1->data = NULL;
3014 						}
3015 						wake_him++;
3016 					}
3017 				}
3018 				break;
3019 			}	/* if (tp1->TSN_seq == theTSN) */
3020 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
3021 				break;
3022 			}
3023 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3024 			if ((tp1 == NULL) && (circled == 0)) {
3025 				circled++;
3026 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3027 			}
3028 		}		/* end while (tp1) */
3029 		if (tp1 == NULL) {
3030 			circled = 0;
3031 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3032 		}
3033 		/* In case the fragments were not in order we must reset */
3034 	}			/* end for (j = fragStart */
3035 	*p_tp1 = tp1;
3036 	return (wake_him);	/* Return value only used for nr-sack */
3037 }
3038 
3039 
3040 static int
3041 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3042     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3043     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3044     int num_seg, int num_nr_seg, int *rto_ok)
3045 {
3046 	struct sctp_gap_ack_block *frag, block;
3047 	struct sctp_tmit_chunk *tp1;
3048 	int i;
3049 	int num_frs = 0;
3050 	int chunk_freed;
3051 	int non_revocable;
3052 	uint16_t frag_strt, frag_end, prev_frag_end;
3053 
3054 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3055 	prev_frag_end = 0;
3056 	chunk_freed = 0;
3057 
3058 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3059 		if (i == num_seg) {
3060 			prev_frag_end = 0;
3061 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3062 		}
3063 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3064 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3065 		*offset += sizeof(block);
3066 		if (frag == NULL) {
3067 			return (chunk_freed);
3068 		}
3069 		frag_strt = ntohs(frag->start);
3070 		frag_end = ntohs(frag->end);
3071 
3072 		if (frag_strt > frag_end) {
3073 			/* This gap report is malformed, skip it. */
3074 			continue;
3075 		}
3076 		if (frag_strt <= prev_frag_end) {
3077 			/* This gap report is not in order, so restart. */
3078 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3079 		}
3080 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3081 			*biggest_tsn_acked = last_tsn + frag_end;
3082 		}
3083 		if (i < num_seg) {
3084 			non_revocable = 0;
3085 		} else {
3086 			non_revocable = 1;
3087 		}
3088 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3089 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3090 		    this_sack_lowest_newack, rto_ok)) {
3091 			chunk_freed = 1;
3092 		}
3093 		prev_frag_end = frag_end;
3094 	}
3095 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3096 		if (num_frs)
3097 			sctp_log_fr(*biggest_tsn_acked,
3098 			    *biggest_newly_acked_tsn,
3099 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3100 	}
3101 	return (chunk_freed);
3102 }
3103 
3104 static void
3105 sctp_check_for_revoked(struct sctp_tcb *stcb,
3106     struct sctp_association *asoc, uint32_t cumack,
3107     uint32_t biggest_tsn_acked)
3108 {
3109 	struct sctp_tmit_chunk *tp1;
3110 	int tot_revoked = 0;
3111 
3112 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3113 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3114 			/*
3115 			 * ok this guy is either ACK or MARKED. If it is
3116 			 * ACKED it has been previously acked but not this
3117 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3118 			 * again.
3119 			 */
3120 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3121 				break;
3122 			}
3123 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3124 				/* it has been revoked */
3125 				tp1->sent = SCTP_DATAGRAM_SENT;
3126 				tp1->rec.data.chunk_was_revoked = 1;
3127 				/*
3128 				 * We must add this stuff back in to assure
3129 				 * timers and such get started.
3130 				 */
3131 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3132 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3133 					    tp1->whoTo->flight_size,
3134 					    tp1->book_size,
3135 					    (uintptr_t) tp1->whoTo,
3136 					    tp1->rec.data.TSN_seq);
3137 				}
3138 				sctp_flight_size_increase(tp1);
3139 				sctp_total_flight_increase(stcb, tp1);
3140 				/*
3141 				 * We inflate the cwnd to compensate for our
3142 				 * artificial inflation of the flight_size.
3143 				 */
3144 				tp1->whoTo->cwnd += tp1->book_size;
3145 				tot_revoked++;
3146 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3147 					sctp_log_sack(asoc->last_acked_seq,
3148 					    cumack,
3149 					    tp1->rec.data.TSN_seq,
3150 					    0,
3151 					    0,
3152 					    SCTP_LOG_TSN_REVOKED);
3153 				}
3154 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3155 				/* it has been re-acked in this SACK */
3156 				tp1->sent = SCTP_DATAGRAM_ACKED;
3157 			}
3158 		}
3159 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3160 			break;
3161 	}
3162 }
3163 
3164 
3165 static void
3166 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3167     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3168 {
3169 	struct sctp_tmit_chunk *tp1;
3170 	int strike_flag = 0;
3171 	struct timeval now;
3172 	int tot_retrans = 0;
3173 	uint32_t sending_seq;
3174 	struct sctp_nets *net;
3175 	int num_dests_sacked = 0;
3176 
3177 	/*
3178 	 * select the sending_seq, this is either the next thing ready to be
3179 	 * sent but not transmitted, OR, the next seq we assign.
3180 	 */
3181 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3182 	if (tp1 == NULL) {
3183 		sending_seq = asoc->sending_seq;
3184 	} else {
3185 		sending_seq = tp1->rec.data.TSN_seq;
3186 	}
3187 
3188 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3189 	if ((asoc->sctp_cmt_on_off > 0) &&
3190 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3191 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3192 			if (net->saw_newack)
3193 				num_dests_sacked++;
3194 		}
3195 	}
3196 	if (stcb->asoc.peer_supports_prsctp) {
3197 		(void)SCTP_GETTIME_TIMEVAL(&now);
3198 	}
3199 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3200 		strike_flag = 0;
3201 		if (tp1->no_fr_allowed) {
3202 			/* this one had a timeout or something */
3203 			continue;
3204 		}
3205 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3206 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3207 				sctp_log_fr(biggest_tsn_newly_acked,
3208 				    tp1->rec.data.TSN_seq,
3209 				    tp1->sent,
3210 				    SCTP_FR_LOG_CHECK_STRIKE);
3211 		}
3212 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3213 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3214 			/* done */
3215 			break;
3216 		}
3217 		if (stcb->asoc.peer_supports_prsctp) {
3218 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3219 				/* Is it expired? */
3220 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3221 					/* Yes so drop it */
3222 					if (tp1->data != NULL) {
3223 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3224 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3225 						    SCTP_SO_NOT_LOCKED);
3226 					}
3227 					continue;
3228 				}
3229 			}
3230 		}
3231 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3232 			/* we are beyond the tsn in the sack  */
3233 			break;
3234 		}
3235 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3236 			/* either a RESEND, ACKED, or MARKED */
3237 			/* skip */
3238 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3239 				/* Continue strikin FWD-TSN chunks */
3240 				tp1->rec.data.fwd_tsn_cnt++;
3241 			}
3242 			continue;
3243 		}
3244 		/*
3245 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3246 		 */
3247 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3248 			/*
3249 			 * No new acks were receieved for data sent to this
3250 			 * dest. Therefore, according to the SFR algo for
3251 			 * CMT, no data sent to this dest can be marked for
3252 			 * FR using this SACK.
3253 			 */
3254 			continue;
3255 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3256 		    tp1->whoTo->this_sack_highest_newack)) {
3257 			/*
3258 			 * CMT: New acks were receieved for data sent to
3259 			 * this dest. But no new acks were seen for data
3260 			 * sent after tp1. Therefore, according to the SFR
3261 			 * algo for CMT, tp1 cannot be marked for FR using
3262 			 * this SACK. This step covers part of the DAC algo
3263 			 * and the HTNA algo as well.
3264 			 */
3265 			continue;
3266 		}
3267 		/*
3268 		 * Here we check to see if we were have already done a FR
3269 		 * and if so we see if the biggest TSN we saw in the sack is
3270 		 * smaller than the recovery point. If so we don't strike
3271 		 * the tsn... otherwise we CAN strike the TSN.
3272 		 */
3273 		/*
3274 		 * @@@ JRI: Check for CMT if (accum_moved &&
3275 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3276 		 * 0)) {
3277 		 */
3278 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3279 			/*
3280 			 * Strike the TSN if in fast-recovery and cum-ack
3281 			 * moved.
3282 			 */
3283 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3284 				sctp_log_fr(biggest_tsn_newly_acked,
3285 				    tp1->rec.data.TSN_seq,
3286 				    tp1->sent,
3287 				    SCTP_FR_LOG_STRIKE_CHUNK);
3288 			}
3289 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3290 				tp1->sent++;
3291 			}
3292 			if ((asoc->sctp_cmt_on_off > 0) &&
3293 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3294 				/*
3295 				 * CMT DAC algorithm: If SACK flag is set to
3296 				 * 0, then lowest_newack test will not pass
3297 				 * because it would have been set to the
3298 				 * cumack earlier. If not already to be
3299 				 * rtx'd, If not a mixed sack and if tp1 is
3300 				 * not between two sacked TSNs, then mark by
3301 				 * one more. NOTE that we are marking by one
3302 				 * additional time since the SACK DAC flag
3303 				 * indicates that two packets have been
3304 				 * received after this missing TSN.
3305 				 */
3306 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3307 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3308 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3309 						sctp_log_fr(16 + num_dests_sacked,
3310 						    tp1->rec.data.TSN_seq,
3311 						    tp1->sent,
3312 						    SCTP_FR_LOG_STRIKE_CHUNK);
3313 					}
3314 					tp1->sent++;
3315 				}
3316 			}
3317 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3318 		    (asoc->sctp_cmt_on_off == 0)) {
3319 			/*
3320 			 * For those that have done a FR we must take
3321 			 * special consideration if we strike. I.e the
3322 			 * biggest_newly_acked must be higher than the
3323 			 * sending_seq at the time we did the FR.
3324 			 */
3325 			if (
3326 #ifdef SCTP_FR_TO_ALTERNATE
3327 			/*
3328 			 * If FR's go to new networks, then we must only do
3329 			 * this for singly homed asoc's. However if the FR's
3330 			 * go to the same network (Armando's work) then its
3331 			 * ok to FR multiple times.
3332 			 */
3333 			    (asoc->numnets < 2)
3334 #else
3335 			    (1)
3336 #endif
3337 			    ) {
3338 
3339 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3340 				    tp1->rec.data.fast_retran_tsn)) {
3341 					/*
3342 					 * Strike the TSN, since this ack is
3343 					 * beyond where things were when we
3344 					 * did a FR.
3345 					 */
3346 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3347 						sctp_log_fr(biggest_tsn_newly_acked,
3348 						    tp1->rec.data.TSN_seq,
3349 						    tp1->sent,
3350 						    SCTP_FR_LOG_STRIKE_CHUNK);
3351 					}
3352 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3353 						tp1->sent++;
3354 					}
3355 					strike_flag = 1;
3356 					if ((asoc->sctp_cmt_on_off > 0) &&
3357 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3358 						/*
3359 						 * CMT DAC algorithm: If
3360 						 * SACK flag is set to 0,
3361 						 * then lowest_newack test
3362 						 * will not pass because it
3363 						 * would have been set to
3364 						 * the cumack earlier. If
3365 						 * not already to be rtx'd,
3366 						 * If not a mixed sack and
3367 						 * if tp1 is not between two
3368 						 * sacked TSNs, then mark by
3369 						 * one more. NOTE that we
3370 						 * are marking by one
3371 						 * additional time since the
3372 						 * SACK DAC flag indicates
3373 						 * that two packets have
3374 						 * been received after this
3375 						 * missing TSN.
3376 						 */
3377 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3378 						    (num_dests_sacked == 1) &&
3379 						    SCTP_TSN_GT(this_sack_lowest_newack,
3380 						    tp1->rec.data.TSN_seq)) {
3381 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3382 								sctp_log_fr(32 + num_dests_sacked,
3383 								    tp1->rec.data.TSN_seq,
3384 								    tp1->sent,
3385 								    SCTP_FR_LOG_STRIKE_CHUNK);
3386 							}
3387 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3388 								tp1->sent++;
3389 							}
3390 						}
3391 					}
3392 				}
3393 			}
3394 			/*
3395 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3396 			 * algo covers HTNA.
3397 			 */
3398 		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3399 		    biggest_tsn_newly_acked)) {
3400 			/*
3401 			 * We don't strike these: This is the  HTNA
3402 			 * algorithm i.e. we don't strike If our TSN is
3403 			 * larger than the Highest TSN Newly Acked.
3404 			 */
3405 			;
3406 		} else {
3407 			/* Strike the TSN */
3408 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3409 				sctp_log_fr(biggest_tsn_newly_acked,
3410 				    tp1->rec.data.TSN_seq,
3411 				    tp1->sent,
3412 				    SCTP_FR_LOG_STRIKE_CHUNK);
3413 			}
3414 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3415 				tp1->sent++;
3416 			}
3417 			if ((asoc->sctp_cmt_on_off > 0) &&
3418 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3419 				/*
3420 				 * CMT DAC algorithm: If SACK flag is set to
3421 				 * 0, then lowest_newack test will not pass
3422 				 * because it would have been set to the
3423 				 * cumack earlier. If not already to be
3424 				 * rtx'd, If not a mixed sack and if tp1 is
3425 				 * not between two sacked TSNs, then mark by
3426 				 * one more. NOTE that we are marking by one
3427 				 * additional time since the SACK DAC flag
3428 				 * indicates that two packets have been
3429 				 * received after this missing TSN.
3430 				 */
3431 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3432 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3433 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3434 						sctp_log_fr(48 + num_dests_sacked,
3435 						    tp1->rec.data.TSN_seq,
3436 						    tp1->sent,
3437 						    SCTP_FR_LOG_STRIKE_CHUNK);
3438 					}
3439 					tp1->sent++;
3440 				}
3441 			}
3442 		}
3443 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3444 			struct sctp_nets *alt;
3445 
3446 			/* fix counts and things */
3447 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3448 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3449 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3450 				    tp1->book_size,
3451 				    (uintptr_t) tp1->whoTo,
3452 				    tp1->rec.data.TSN_seq);
3453 			}
3454 			if (tp1->whoTo) {
3455 				tp1->whoTo->net_ack++;
3456 				sctp_flight_size_decrease(tp1);
3457 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3458 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3459 					    tp1);
3460 				}
3461 			}
3462 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3463 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3464 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3465 			}
3466 			/* add back to the rwnd */
3467 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3468 
3469 			/* remove from the total flight */
3470 			sctp_total_flight_decrease(stcb, tp1);
3471 
3472 			if ((stcb->asoc.peer_supports_prsctp) &&
3473 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3474 				/*
3475 				 * Has it been retransmitted tv_sec times? -
3476 				 * we store the retran count there.
3477 				 */
3478 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3479 					/* Yes, so drop it */
3480 					if (tp1->data != NULL) {
3481 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3482 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3483 						    SCTP_SO_NOT_LOCKED);
3484 					}
3485 					/* Make sure to flag we had a FR */
3486 					tp1->whoTo->net_ack++;
3487 					continue;
3488 				}
3489 			}
3490 			/* printf("OK, we are now ready to FR this guy\n"); */
3491 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3492 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3493 				    0, SCTP_FR_MARKED);
3494 			}
3495 			if (strike_flag) {
3496 				/* This is a subsequent FR */
3497 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3498 			}
3499 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3500 			if (asoc->sctp_cmt_on_off > 0) {
3501 				/*
3502 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3503 				 * If CMT is being used, then pick dest with
3504 				 * largest ssthresh for any retransmission.
3505 				 */
3506 				tp1->no_fr_allowed = 1;
3507 				alt = tp1->whoTo;
3508 				/* sa_ignore NO_NULL_CHK */
3509 				if (asoc->sctp_cmt_pf > 0) {
3510 					/*
3511 					 * JRS 5/18/07 - If CMT PF is on,
3512 					 * use the PF version of
3513 					 * find_alt_net()
3514 					 */
3515 					alt = sctp_find_alternate_net(stcb, alt, 2);
3516 				} else {
3517 					/*
3518 					 * JRS 5/18/07 - If only CMT is on,
3519 					 * use the CMT version of
3520 					 * find_alt_net()
3521 					 */
3522 					/* sa_ignore NO_NULL_CHK */
3523 					alt = sctp_find_alternate_net(stcb, alt, 1);
3524 				}
3525 				if (alt == NULL) {
3526 					alt = tp1->whoTo;
3527 				}
3528 				/*
3529 				 * CUCv2: If a different dest is picked for
3530 				 * the retransmission, then new
3531 				 * (rtx-)pseudo_cumack needs to be tracked
3532 				 * for orig dest. Let CUCv2 track new (rtx-)
3533 				 * pseudo-cumack always.
3534 				 */
3535 				if (tp1->whoTo) {
3536 					tp1->whoTo->find_pseudo_cumack = 1;
3537 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3538 				}
3539 			} else {/* CMT is OFF */
3540 
3541 #ifdef SCTP_FR_TO_ALTERNATE
3542 				/* Can we find an alternate? */
3543 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3544 #else
3545 				/*
3546 				 * default behavior is to NOT retransmit
3547 				 * FR's to an alternate. Armando Caro's
3548 				 * paper details why.
3549 				 */
3550 				alt = tp1->whoTo;
3551 #endif
3552 			}
3553 
3554 			tp1->rec.data.doing_fast_retransmit = 1;
3555 			tot_retrans++;
3556 			/* mark the sending seq for possible subsequent FR's */
3557 			/*
3558 			 * printf("Marking TSN for FR new value %x\n",
3559 			 * (uint32_t)tpi->rec.data.TSN_seq);
3560 			 */
3561 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3562 				/*
3563 				 * If the queue of send is empty then its
3564 				 * the next sequence number that will be
3565 				 * assigned so we subtract one from this to
3566 				 * get the one we last sent.
3567 				 */
3568 				tp1->rec.data.fast_retran_tsn = sending_seq;
3569 			} else {
3570 				/*
3571 				 * If there are chunks on the send queue
3572 				 * (unsent data that has made it from the
3573 				 * stream queues but not out the door, we
3574 				 * take the first one (which will have the
3575 				 * lowest TSN) and subtract one to get the
3576 				 * one we last sent.
3577 				 */
3578 				struct sctp_tmit_chunk *ttt;
3579 
3580 				ttt = TAILQ_FIRST(&asoc->send_queue);
3581 				tp1->rec.data.fast_retran_tsn =
3582 				    ttt->rec.data.TSN_seq;
3583 			}
3584 
3585 			if (tp1->do_rtt) {
3586 				/*
3587 				 * this guy had a RTO calculation pending on
3588 				 * it, cancel it
3589 				 */
3590 				if ((tp1->whoTo != NULL) &&
3591 				    (tp1->whoTo->rto_needed == 0)) {
3592 					tp1->whoTo->rto_needed = 1;
3593 				}
3594 				tp1->do_rtt = 0;
3595 			}
3596 			if (alt != tp1->whoTo) {
3597 				/* yes, there is an alternate. */
3598 				sctp_free_remote_addr(tp1->whoTo);
3599 				/* sa_ignore FREED_MEMORY */
3600 				tp1->whoTo = alt;
3601 				atomic_add_int(&alt->ref_count, 1);
3602 			}
3603 		}
3604 	}
3605 }
3606 
3607 struct sctp_tmit_chunk *
3608 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3609     struct sctp_association *asoc)
3610 {
3611 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3612 	struct timeval now;
3613 	int now_filled = 0;
3614 
3615 	if (asoc->peer_supports_prsctp == 0) {
3616 		return (NULL);
3617 	}
3618 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3619 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3620 		    tp1->sent != SCTP_DATAGRAM_RESEND) {
3621 			/* no chance to advance, out of here */
3622 			break;
3623 		}
3624 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3625 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3626 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3627 				    asoc->advanced_peer_ack_point,
3628 				    tp1->rec.data.TSN_seq, 0, 0);
3629 			}
3630 		}
3631 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3632 			/*
3633 			 * We can't fwd-tsn past any that are reliable aka
3634 			 * retransmitted until the asoc fails.
3635 			 */
3636 			break;
3637 		}
3638 		if (!now_filled) {
3639 			(void)SCTP_GETTIME_TIMEVAL(&now);
3640 			now_filled = 1;
3641 		}
3642 		/*
3643 		 * now we got a chunk which is marked for another
3644 		 * retransmission to a PR-stream but has run out its chances
3645 		 * already maybe OR has been marked to skip now. Can we skip
3646 		 * it if its a resend?
3647 		 */
3648 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3649 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3650 			/*
3651 			 * Now is this one marked for resend and its time is
3652 			 * now up?
3653 			 */
3654 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3655 				/* Yes so drop it */
3656 				if (tp1->data) {
3657 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3658 					    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3659 					    SCTP_SO_NOT_LOCKED);
3660 				}
3661 			} else {
3662 				/*
3663 				 * No, we are done when hit one for resend
3664 				 * whos time as not expired.
3665 				 */
3666 				break;
3667 			}
3668 		}
3669 		/*
3670 		 * Ok now if this chunk is marked to drop it we can clean up
3671 		 * the chunk, advance our peer ack point and we can check
3672 		 * the next chunk.
3673 		 */
3674 		if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3675 			/* advance PeerAckPoint goes forward */
3676 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3677 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3678 				a_adv = tp1;
3679 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3680 				/* No update but we do save the chk */
3681 				a_adv = tp1;
3682 			}
3683 		} else {
3684 			/*
3685 			 * If it is still in RESEND we can advance no
3686 			 * further
3687 			 */
3688 			break;
3689 		}
3690 	}
3691 	return (a_adv);
3692 }
3693 
3694 static int
3695 sctp_fs_audit(struct sctp_association *asoc)
3696 {
3697 	struct sctp_tmit_chunk *chk;
3698 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3699 	int entry_flight, entry_cnt, ret;
3700 
3701 	entry_flight = asoc->total_flight;
3702 	entry_cnt = asoc->total_flight_count;
3703 	ret = 0;
3704 
3705 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3706 		return (0);
3707 
3708 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3709 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3710 			printf("Chk TSN:%u size:%d inflight cnt:%d\n",
3711 			    chk->rec.data.TSN_seq,
3712 			    chk->send_size,
3713 			    chk->snd_count
3714 			    );
3715 			inflight++;
3716 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3717 			resend++;
3718 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3719 			inbetween++;
3720 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3721 			above++;
3722 		} else {
3723 			acked++;
3724 		}
3725 	}
3726 
3727 	if ((inflight > 0) || (inbetween > 0)) {
3728 #ifdef INVARIANTS
3729 		panic("Flight size-express incorrect? \n");
3730 #else
3731 		printf("asoc->total_flight:%d cnt:%d\n",
3732 		    entry_flight, entry_cnt);
3733 
3734 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3735 		    inflight, inbetween, resend, above, acked);
3736 		ret = 1;
3737 #endif
3738 	}
3739 	return (ret);
3740 }
3741 
3742 
3743 static void
3744 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3745     struct sctp_association *asoc,
3746     struct sctp_tmit_chunk *tp1)
3747 {
3748 	tp1->window_probe = 0;
3749 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3750 		/* TSN's skipped we do NOT move back. */
3751 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3752 		    tp1->whoTo->flight_size,
3753 		    tp1->book_size,
3754 		    (uintptr_t) tp1->whoTo,
3755 		    tp1->rec.data.TSN_seq);
3756 		return;
3757 	}
3758 	/* First setup this by shrinking flight */
3759 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3760 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3761 		    tp1);
3762 	}
3763 	sctp_flight_size_decrease(tp1);
3764 	sctp_total_flight_decrease(stcb, tp1);
3765 	/* Now mark for resend */
3766 	tp1->sent = SCTP_DATAGRAM_RESEND;
3767 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3768 
3769 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3770 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3771 		    tp1->whoTo->flight_size,
3772 		    tp1->book_size,
3773 		    (uintptr_t) tp1->whoTo,
3774 		    tp1->rec.data.TSN_seq);
3775 	}
3776 }
3777 
3778 void
3779 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3780     uint32_t rwnd, int *abort_now, int ecne_seen)
3781 {
3782 	struct sctp_nets *net;
3783 	struct sctp_association *asoc;
3784 	struct sctp_tmit_chunk *tp1, *tp2;
3785 	uint32_t old_rwnd;
3786 	int win_probe_recovery = 0;
3787 	int win_probe_recovered = 0;
3788 	int j, done_once = 0;
3789 	int rto_ok = 1;
3790 
3791 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3792 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3793 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3794 	}
3795 	SCTP_TCB_LOCK_ASSERT(stcb);
3796 #ifdef SCTP_ASOCLOG_OF_TSNS
3797 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3798 	stcb->asoc.cumack_log_at++;
3799 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3800 		stcb->asoc.cumack_log_at = 0;
3801 	}
3802 #endif
3803 	asoc = &stcb->asoc;
3804 	old_rwnd = asoc->peers_rwnd;
3805 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3806 		/* old ack */
3807 		return;
3808 	} else if (asoc->last_acked_seq == cumack) {
3809 		/* Window update sack */
3810 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3811 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3812 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3813 			/* SWS sender side engages */
3814 			asoc->peers_rwnd = 0;
3815 		}
3816 		if (asoc->peers_rwnd > old_rwnd) {
3817 			goto again;
3818 		}
3819 		return;
3820 	}
3821 	/* First setup for CC stuff */
3822 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3823 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3824 			/* Drag along the window_tsn for cwr's */
3825 			net->cwr_window_tsn = cumack;
3826 		}
3827 		net->prev_cwnd = net->cwnd;
3828 		net->net_ack = 0;
3829 		net->net_ack2 = 0;
3830 
3831 		/*
3832 		 * CMT: Reset CUC and Fast recovery algo variables before
3833 		 * SACK processing
3834 		 */
3835 		net->new_pseudo_cumack = 0;
3836 		net->will_exit_fast_recovery = 0;
3837 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3838 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3839 		}
3840 	}
3841 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3842 		uint32_t send_s;
3843 
3844 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3845 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3846 			    sctpchunk_listhead);
3847 			send_s = tp1->rec.data.TSN_seq + 1;
3848 		} else {
3849 			send_s = asoc->sending_seq;
3850 		}
3851 		if (SCTP_TSN_GE(cumack, send_s)) {
3852 #ifndef INVARIANTS
3853 			struct mbuf *oper;
3854 
3855 #endif
3856 #ifdef INVARIANTS
3857 			panic("Impossible sack 1");
3858 #else
3859 
3860 			*abort_now = 1;
3861 			/* XXX */
3862 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3863 			    0, M_DONTWAIT, 1, MT_DATA);
3864 			if (oper) {
3865 				struct sctp_paramhdr *ph;
3866 				uint32_t *ippp;
3867 
3868 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3869 				    sizeof(uint32_t);
3870 				ph = mtod(oper, struct sctp_paramhdr *);
3871 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3872 				ph->param_length = htons(SCTP_BUF_LEN(oper));
3873 				ippp = (uint32_t *) (ph + 1);
3874 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3875 			}
3876 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3877 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
3878 			return;
3879 #endif
3880 		}
3881 	}
3882 	asoc->this_sack_highest_gap = cumack;
3883 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3884 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3885 		    stcb->asoc.overall_error_count,
3886 		    0,
3887 		    SCTP_FROM_SCTP_INDATA,
3888 		    __LINE__);
3889 	}
3890 	stcb->asoc.overall_error_count = 0;
3891 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3892 		/* process the new consecutive TSN first */
3893 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3894 			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3895 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3896 					printf("Warning, an unsent is now acked?\n");
3897 				}
3898 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3899 					/*
3900 					 * If it is less than ACKED, it is
3901 					 * now no-longer in flight. Higher
3902 					 * values may occur during marking
3903 					 */
3904 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3905 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3906 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3907 							    tp1->whoTo->flight_size,
3908 							    tp1->book_size,
3909 							    (uintptr_t) tp1->whoTo,
3910 							    tp1->rec.data.TSN_seq);
3911 						}
3912 						sctp_flight_size_decrease(tp1);
3913 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3914 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3915 							    tp1);
3916 						}
3917 						/* sa_ignore NO_NULL_CHK */
3918 						sctp_total_flight_decrease(stcb, tp1);
3919 					}
3920 					tp1->whoTo->net_ack += tp1->send_size;
3921 					if (tp1->snd_count < 2) {
3922 						/*
3923 						 * True non-retransmited
3924 						 * chunk
3925 						 */
3926 						tp1->whoTo->net_ack2 +=
3927 						    tp1->send_size;
3928 
3929 						/* update RTO too? */
3930 						if (tp1->do_rtt) {
3931 							if (rto_ok) {
3932 								tp1->whoTo->RTO =
3933 								/*
3934 								 * sa_ignore
3935 								 * NO_NULL_CH
3936 								 * K
3937 								 */
3938 								    sctp_calculate_rto(stcb,
3939 								    asoc, tp1->whoTo,
3940 								    &tp1->sent_rcv_time,
3941 								    sctp_align_safe_nocopy,
3942 								    SCTP_RTT_FROM_DATA);
3943 								rto_ok = 0;
3944 							}
3945 							if (tp1->whoTo->rto_needed == 0) {
3946 								tp1->whoTo->rto_needed = 1;
3947 							}
3948 							tp1->do_rtt = 0;
3949 						}
3950 					}
3951 					/*
3952 					 * CMT: CUCv2 algorithm. From the
3953 					 * cumack'd TSNs, for each TSN being
3954 					 * acked for the first time, set the
3955 					 * following variables for the
3956 					 * corresp destination.
3957 					 * new_pseudo_cumack will trigger a
3958 					 * cwnd update.
3959 					 * find_(rtx_)pseudo_cumack will
3960 					 * trigger search for the next
3961 					 * expected (rtx-)pseudo-cumack.
3962 					 */
3963 					tp1->whoTo->new_pseudo_cumack = 1;
3964 					tp1->whoTo->find_pseudo_cumack = 1;
3965 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3966 
3967 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3968 						/* sa_ignore NO_NULL_CHK */
3969 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3970 					}
3971 				}
3972 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3973 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3974 				}
3975 				if (tp1->rec.data.chunk_was_revoked) {
3976 					/* deflate the cwnd */
3977 					tp1->whoTo->cwnd -= tp1->book_size;
3978 					tp1->rec.data.chunk_was_revoked = 0;
3979 				}
3980 				tp1->sent = SCTP_DATAGRAM_ACKED;
3981 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3982 				if (tp1->data) {
3983 					/* sa_ignore NO_NULL_CHK */
3984 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3985 					sctp_m_freem(tp1->data);
3986 					tp1->data = NULL;
3987 				}
3988 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3989 					sctp_log_sack(asoc->last_acked_seq,
3990 					    cumack,
3991 					    tp1->rec.data.TSN_seq,
3992 					    0,
3993 					    0,
3994 					    SCTP_LOG_FREE_SENT);
3995 				}
3996 				asoc->sent_queue_cnt--;
3997 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3998 			} else {
3999 				break;
4000 			}
4001 		}
4002 
4003 	}
4004 	/* sa_ignore NO_NULL_CHK */
4005 	if (stcb->sctp_socket) {
4006 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4007 		struct socket *so;
4008 
4009 #endif
4010 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4011 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4012 			/* sa_ignore NO_NULL_CHK */
4013 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4014 		}
4015 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4016 		so = SCTP_INP_SO(stcb->sctp_ep);
4017 		atomic_add_int(&stcb->asoc.refcnt, 1);
4018 		SCTP_TCB_UNLOCK(stcb);
4019 		SCTP_SOCKET_LOCK(so, 1);
4020 		SCTP_TCB_LOCK(stcb);
4021 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4022 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4023 			/* assoc was freed while we were unlocked */
4024 			SCTP_SOCKET_UNLOCK(so, 1);
4025 			return;
4026 		}
4027 #endif
4028 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4029 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4030 		SCTP_SOCKET_UNLOCK(so, 1);
4031 #endif
4032 	} else {
4033 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4034 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4035 		}
4036 	}
4037 
4038 	/* JRS - Use the congestion control given in the CC module */
4039 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4040 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4041 			if (net->net_ack2 > 0) {
4042 				/*
4043 				 * Karn's rule applies to clearing error
4044 				 * count, this is optional.
4045 				 */
4046 				net->error_count = 0;
4047 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4048 					/* addr came good */
4049 					net->dest_state |= SCTP_ADDR_REACHABLE;
4050 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4051 					    SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
4052 				}
4053 				if (net == stcb->asoc.primary_destination) {
4054 					if (stcb->asoc.alternate) {
4055 						/*
4056 						 * release the alternate,
4057 						 * primary is good
4058 						 */
4059 						sctp_free_remote_addr(stcb->asoc.alternate);
4060 						stcb->asoc.alternate = NULL;
4061 					}
4062 				}
4063 				if (net->dest_state & SCTP_ADDR_PF) {
4064 					net->dest_state &= ~SCTP_ADDR_PF;
4065 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4066 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4067 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4068 					/* Done with this net */
4069 					net->net_ack = 0;
4070 				}
4071 				/* restore any doubled timers */
4072 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4073 				if (net->RTO < stcb->asoc.minrto) {
4074 					net->RTO = stcb->asoc.minrto;
4075 				}
4076 				if (net->RTO > stcb->asoc.maxrto) {
4077 					net->RTO = stcb->asoc.maxrto;
4078 				}
4079 			}
4080 		}
4081 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4082 	}
4083 	asoc->last_acked_seq = cumack;
4084 
4085 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4086 		/* nothing left in-flight */
4087 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4088 			net->flight_size = 0;
4089 			net->partial_bytes_acked = 0;
4090 		}
4091 		asoc->total_flight = 0;
4092 		asoc->total_flight_count = 0;
4093 	}
4094 	/* RWND update */
4095 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4096 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4097 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4098 		/* SWS sender side engages */
4099 		asoc->peers_rwnd = 0;
4100 	}
4101 	if (asoc->peers_rwnd > old_rwnd) {
4102 		win_probe_recovery = 1;
4103 	}
4104 	/* Now assure a timer where data is queued at */
4105 again:
4106 	j = 0;
4107 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4108 		int to_ticks;
4109 
4110 		if (win_probe_recovery && (net->window_probe)) {
4111 			win_probe_recovered = 1;
4112 			/*
4113 			 * Find first chunk that was used with window probe
4114 			 * and clear the sent
4115 			 */
4116 			/* sa_ignore FREED_MEMORY */
4117 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4118 				if (tp1->window_probe) {
4119 					/* move back to data send queue */
4120 					sctp_window_probe_recovery(stcb, asoc, tp1);
4121 					break;
4122 				}
4123 			}
4124 		}
4125 		if (net->RTO == 0) {
4126 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4127 		} else {
4128 			to_ticks = MSEC_TO_TICKS(net->RTO);
4129 		}
4130 		if (net->flight_size) {
4131 			j++;
4132 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4133 			    sctp_timeout_handler, &net->rxt_timer);
4134 			if (net->window_probe) {
4135 				net->window_probe = 0;
4136 			}
4137 		} else {
4138 			if (net->window_probe) {
4139 				/*
4140 				 * In window probes we must assure a timer
4141 				 * is still running there
4142 				 */
4143 				net->window_probe = 0;
4144 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4145 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4146 					    sctp_timeout_handler, &net->rxt_timer);
4147 				}
4148 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4149 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4150 				    stcb, net,
4151 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4152 			}
4153 		}
4154 	}
4155 	if ((j == 0) &&
4156 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4157 	    (asoc->sent_queue_retran_cnt == 0) &&
4158 	    (win_probe_recovered == 0) &&
4159 	    (done_once == 0)) {
4160 		/*
4161 		 * huh, this should not happen unless all packets are
4162 		 * PR-SCTP and marked to skip of course.
4163 		 */
4164 		if (sctp_fs_audit(asoc)) {
4165 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4166 				net->flight_size = 0;
4167 			}
4168 			asoc->total_flight = 0;
4169 			asoc->total_flight_count = 0;
4170 			asoc->sent_queue_retran_cnt = 0;
4171 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4172 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4173 					sctp_flight_size_increase(tp1);
4174 					sctp_total_flight_increase(stcb, tp1);
4175 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4176 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4177 				}
4178 			}
4179 		}
4180 		done_once = 1;
4181 		goto again;
4182 	}
4183 	/**********************************/
4184 	/* Now what about shutdown issues */
4185 	/**********************************/
4186 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4187 		/* nothing left on sendqueue.. consider done */
4188 		/* clean up */
4189 		if ((asoc->stream_queue_cnt == 1) &&
4190 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4191 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4192 		    (asoc->locked_on_sending)
4193 		    ) {
4194 			struct sctp_stream_queue_pending *sp;
4195 
4196 			/*
4197 			 * I may be in a state where we got all across.. but
4198 			 * cannot write more due to a shutdown... we abort
4199 			 * since the user did not indicate EOR in this case.
4200 			 * The sp will be cleaned during free of the asoc.
4201 			 */
4202 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4203 			    sctp_streamhead);
4204 			if ((sp) && (sp->length == 0)) {
4205 				/* Let cleanup code purge it */
4206 				if (sp->msg_is_complete) {
4207 					asoc->stream_queue_cnt--;
4208 				} else {
4209 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4210 					asoc->locked_on_sending = NULL;
4211 					asoc->stream_queue_cnt--;
4212 				}
4213 			}
4214 		}
4215 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4216 		    (asoc->stream_queue_cnt == 0)) {
4217 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4218 				/* Need to abort here */
4219 				struct mbuf *oper;
4220 
4221 		abort_out_now:
4222 				*abort_now = 1;
4223 				/* XXX */
4224 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4225 				    0, M_DONTWAIT, 1, MT_DATA);
4226 				if (oper) {
4227 					struct sctp_paramhdr *ph;
4228 					uint32_t *ippp;
4229 
4230 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4231 					    sizeof(uint32_t);
4232 					ph = mtod(oper, struct sctp_paramhdr *);
4233 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4234 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4235 					ippp = (uint32_t *) (ph + 1);
4236 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4237 				}
4238 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4239 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4240 			} else {
4241 				struct sctp_nets *netp;
4242 
4243 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4244 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4245 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4246 				}
4247 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4248 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4249 				sctp_stop_timers_for_shutdown(stcb);
4250 				if (asoc->alternate) {
4251 					netp = asoc->alternate;
4252 				} else {
4253 					netp = asoc->primary_destination;
4254 				}
4255 				sctp_send_shutdown(stcb, netp);
4256 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4257 				    stcb->sctp_ep, stcb, netp);
4258 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4259 				    stcb->sctp_ep, stcb, netp);
4260 			}
4261 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4262 		    (asoc->stream_queue_cnt == 0)) {
4263 			struct sctp_nets *netp;
4264 
4265 			if (asoc->alternate) {
4266 				netp = asoc->alternate;
4267 			} else {
4268 				netp = asoc->primary_destination;
4269 			}
4270 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4271 				goto abort_out_now;
4272 			}
4273 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4274 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4275 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4276 			sctp_send_shutdown_ack(stcb, netp);
4277 			sctp_stop_timers_for_shutdown(stcb);
4278 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4279 			    stcb->sctp_ep, stcb, netp);
4280 		}
4281 	}
4282 	/*********************************************/
4283 	/* Here we perform PR-SCTP procedures        */
4284 	/* (section 4.2)                             */
4285 	/*********************************************/
4286 	/* C1. update advancedPeerAckPoint */
4287 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4288 		asoc->advanced_peer_ack_point = cumack;
4289 	}
4290 	/* PR-Sctp issues need to be addressed too */
4291 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4292 		struct sctp_tmit_chunk *lchk;
4293 		uint32_t old_adv_peer_ack_point;
4294 
4295 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4296 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4297 		/* C3. See if we need to send a Fwd-TSN */
4298 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4299 			/*
4300 			 * ISSUE with ECN, see FWD-TSN processing.
4301 			 */
4302 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4303 				send_forward_tsn(stcb, asoc);
4304 			} else if (lchk) {
4305 				/* try to FR fwd-tsn's that get lost too */
4306 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4307 					send_forward_tsn(stcb, asoc);
4308 				}
4309 			}
4310 		}
4311 		if (lchk) {
4312 			/* Assure a timer is up */
4313 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4314 			    stcb->sctp_ep, stcb, lchk->whoTo);
4315 		}
4316 	}
4317 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4318 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4319 		    rwnd,
4320 		    stcb->asoc.peers_rwnd,
4321 		    stcb->asoc.total_flight,
4322 		    stcb->asoc.total_output_queue_size);
4323 	}
4324 }
4325 
4326 void
4327 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4328     struct sctp_tcb *stcb,
4329     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4330     int *abort_now, uint8_t flags,
4331     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4332 {
4333 	struct sctp_association *asoc;
4334 	struct sctp_tmit_chunk *tp1, *tp2;
4335 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4336 	uint16_t wake_him = 0;
4337 	uint32_t send_s = 0;
4338 	long j;
4339 	int accum_moved = 0;
4340 	int will_exit_fast_recovery = 0;
4341 	uint32_t a_rwnd, old_rwnd;
4342 	int win_probe_recovery = 0;
4343 	int win_probe_recovered = 0;
4344 	struct sctp_nets *net = NULL;
4345 	int done_once;
4346 	int rto_ok = 1;
4347 	uint8_t reneged_all = 0;
4348 	uint8_t cmt_dac_flag;
4349 
4350 	/*
4351 	 * we take any chance we can to service our queues since we cannot
4352 	 * get awoken when the socket is read from :<
4353 	 */
4354 	/*
4355 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4356 	 * old sack, if so discard. 2) If there is nothing left in the send
4357 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4358 	 * too, update any rwnd change and verify no timers are running.
4359 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4360 	 * moved process these first and note that it moved. 4) Process any
4361 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4362 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4363 	 * sync up flightsizes and things, stop all timers and also check
4364 	 * for shutdown_pending state. If so then go ahead and send off the
4365 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4366 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4367 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4368 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4369 	 * if in shutdown_recv state.
4370 	 */
4371 	SCTP_TCB_LOCK_ASSERT(stcb);
4372 	/* CMT DAC algo */
4373 	this_sack_lowest_newack = 0;
4374 	SCTP_STAT_INCR(sctps_slowpath_sack);
4375 	last_tsn = cum_ack;
4376 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4377 #ifdef SCTP_ASOCLOG_OF_TSNS
4378 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4379 	stcb->asoc.cumack_log_at++;
4380 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4381 		stcb->asoc.cumack_log_at = 0;
4382 	}
4383 #endif
4384 	a_rwnd = rwnd;
4385 
4386 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4387 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4388 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4389 	}
4390 	old_rwnd = stcb->asoc.peers_rwnd;
4391 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4392 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4393 		    stcb->asoc.overall_error_count,
4394 		    0,
4395 		    SCTP_FROM_SCTP_INDATA,
4396 		    __LINE__);
4397 	}
4398 	stcb->asoc.overall_error_count = 0;
4399 	asoc = &stcb->asoc;
4400 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4401 		sctp_log_sack(asoc->last_acked_seq,
4402 		    cum_ack,
4403 		    0,
4404 		    num_seg,
4405 		    num_dup,
4406 		    SCTP_LOG_NEW_SACK);
4407 	}
4408 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4409 		uint16_t i;
4410 		uint32_t *dupdata, dblock;
4411 
4412 		for (i = 0; i < num_dup; i++) {
4413 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4414 			    sizeof(uint32_t), (uint8_t *) & dblock);
4415 			if (dupdata == NULL) {
4416 				break;
4417 			}
4418 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4419 		}
4420 	}
4421 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4422 		/* reality check */
4423 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4424 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4425 			    sctpchunk_listhead);
4426 			send_s = tp1->rec.data.TSN_seq + 1;
4427 		} else {
4428 			tp1 = NULL;
4429 			send_s = asoc->sending_seq;
4430 		}
4431 		if (SCTP_TSN_GE(cum_ack, send_s)) {
4432 			struct mbuf *oper;
4433 
4434 			/*
4435 			 * no way, we have not even sent this TSN out yet.
4436 			 * Peer is hopelessly messed up with us.
4437 			 */
4438 			printf("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4439 			    cum_ack, send_s);
4440 			if (tp1) {
4441 				printf("Got send_s from tsn:%x + 1 of tp1:%p\n",
4442 				    tp1->rec.data.TSN_seq, tp1);
4443 			}
4444 	hopeless_peer:
4445 			*abort_now = 1;
4446 			/* XXX */
4447 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4448 			    0, M_DONTWAIT, 1, MT_DATA);
4449 			if (oper) {
4450 				struct sctp_paramhdr *ph;
4451 				uint32_t *ippp;
4452 
4453 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4454 				    sizeof(uint32_t);
4455 				ph = mtod(oper, struct sctp_paramhdr *);
4456 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4457 				ph->param_length = htons(SCTP_BUF_LEN(oper));
4458 				ippp = (uint32_t *) (ph + 1);
4459 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4460 			}
4461 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4462 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4463 			return;
4464 		}
4465 	}
4466 	/**********************/
4467 	/* 1) check the range */
4468 	/**********************/
4469 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4470 		/* acking something behind */
4471 		return;
4472 	}
4473 	/* update the Rwnd of the peer */
4474 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4475 	    TAILQ_EMPTY(&asoc->send_queue) &&
4476 	    (asoc->stream_queue_cnt == 0)) {
4477 		/* nothing left on send/sent and strmq */
4478 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4479 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4480 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4481 		}
4482 		asoc->peers_rwnd = a_rwnd;
4483 		if (asoc->sent_queue_retran_cnt) {
4484 			asoc->sent_queue_retran_cnt = 0;
4485 		}
4486 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4487 			/* SWS sender side engages */
4488 			asoc->peers_rwnd = 0;
4489 		}
4490 		/* stop any timers */
4491 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4492 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4493 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4494 			net->partial_bytes_acked = 0;
4495 			net->flight_size = 0;
4496 		}
4497 		asoc->total_flight = 0;
4498 		asoc->total_flight_count = 0;
4499 		return;
4500 	}
4501 	/*
4502 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4503 	 * things. The total byte count acked is tracked in netAckSz AND
4504 	 * netAck2 is used to track the total bytes acked that are un-
4505 	 * amibguious and were never retransmitted. We track these on a per
4506 	 * destination address basis.
4507 	 */
4508 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4509 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4510 			/* Drag along the window_tsn for cwr's */
4511 			net->cwr_window_tsn = cum_ack;
4512 		}
4513 		net->prev_cwnd = net->cwnd;
4514 		net->net_ack = 0;
4515 		net->net_ack2 = 0;
4516 
4517 		/*
4518 		 * CMT: Reset CUC and Fast recovery algo variables before
4519 		 * SACK processing
4520 		 */
4521 		net->new_pseudo_cumack = 0;
4522 		net->will_exit_fast_recovery = 0;
4523 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4524 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4525 		}
4526 	}
4527 	/* process the new consecutive TSN first */
4528 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4529 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4530 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4531 				accum_moved = 1;
4532 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4533 					/*
4534 					 * If it is less than ACKED, it is
4535 					 * now no-longer in flight. Higher
4536 					 * values may occur during marking
4537 					 */
4538 					if ((tp1->whoTo->dest_state &
4539 					    SCTP_ADDR_UNCONFIRMED) &&
4540 					    (tp1->snd_count < 2)) {
4541 						/*
4542 						 * If there was no retran
4543 						 * and the address is
4544 						 * un-confirmed and we sent
4545 						 * there and are now
4546 						 * sacked.. its confirmed,
4547 						 * mark it so.
4548 						 */
4549 						tp1->whoTo->dest_state &=
4550 						    ~SCTP_ADDR_UNCONFIRMED;
4551 					}
4552 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4553 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4554 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4555 							    tp1->whoTo->flight_size,
4556 							    tp1->book_size,
4557 							    (uintptr_t) tp1->whoTo,
4558 							    tp1->rec.data.TSN_seq);
4559 						}
4560 						sctp_flight_size_decrease(tp1);
4561 						sctp_total_flight_decrease(stcb, tp1);
4562 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4563 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4564 							    tp1);
4565 						}
4566 					}
4567 					tp1->whoTo->net_ack += tp1->send_size;
4568 
4569 					/* CMT SFR and DAC algos */
4570 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4571 					tp1->whoTo->saw_newack = 1;
4572 
4573 					if (tp1->snd_count < 2) {
4574 						/*
4575 						 * True non-retransmited
4576 						 * chunk
4577 						 */
4578 						tp1->whoTo->net_ack2 +=
4579 						    tp1->send_size;
4580 
4581 						/* update RTO too? */
4582 						if (tp1->do_rtt) {
4583 							if (rto_ok) {
4584 								tp1->whoTo->RTO =
4585 								    sctp_calculate_rto(stcb,
4586 								    asoc, tp1->whoTo,
4587 								    &tp1->sent_rcv_time,
4588 								    sctp_align_safe_nocopy,
4589 								    SCTP_RTT_FROM_DATA);
4590 								rto_ok = 0;
4591 							}
4592 							if (tp1->whoTo->rto_needed == 0) {
4593 								tp1->whoTo->rto_needed = 1;
4594 							}
4595 							tp1->do_rtt = 0;
4596 						}
4597 					}
4598 					/*
4599 					 * CMT: CUCv2 algorithm. From the
4600 					 * cumack'd TSNs, for each TSN being
4601 					 * acked for the first time, set the
4602 					 * following variables for the
4603 					 * corresp destination.
4604 					 * new_pseudo_cumack will trigger a
4605 					 * cwnd update.
4606 					 * find_(rtx_)pseudo_cumack will
4607 					 * trigger search for the next
4608 					 * expected (rtx-)pseudo-cumack.
4609 					 */
4610 					tp1->whoTo->new_pseudo_cumack = 1;
4611 					tp1->whoTo->find_pseudo_cumack = 1;
4612 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4613 
4614 
4615 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4616 						sctp_log_sack(asoc->last_acked_seq,
4617 						    cum_ack,
4618 						    tp1->rec.data.TSN_seq,
4619 						    0,
4620 						    0,
4621 						    SCTP_LOG_TSN_ACKED);
4622 					}
4623 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4624 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4625 					}
4626 				}
4627 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4628 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4629 #ifdef SCTP_AUDITING_ENABLED
4630 					sctp_audit_log(0xB3,
4631 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4632 #endif
4633 				}
4634 				if (tp1->rec.data.chunk_was_revoked) {
4635 					/* deflate the cwnd */
4636 					tp1->whoTo->cwnd -= tp1->book_size;
4637 					tp1->rec.data.chunk_was_revoked = 0;
4638 				}
4639 				tp1->sent = SCTP_DATAGRAM_ACKED;
4640 			}
4641 		} else {
4642 			break;
4643 		}
4644 	}
4645 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4646 	/* always set this up to cum-ack */
4647 	asoc->this_sack_highest_gap = last_tsn;
4648 
4649 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4650 
4651 		/*
4652 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4653 		 * to be greater than the cumack. Also reset saw_newack to 0
4654 		 * for all dests.
4655 		 */
4656 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4657 			net->saw_newack = 0;
4658 			net->this_sack_highest_newack = last_tsn;
4659 		}
4660 
4661 		/*
4662 		 * thisSackHighestGap will increase while handling NEW
4663 		 * segments this_sack_highest_newack will increase while
4664 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4665 		 * used for CMT DAC algo. saw_newack will also change.
4666 		 */
4667 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4668 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4669 		    num_seg, num_nr_seg, &rto_ok)) {
4670 			wake_him++;
4671 		}
4672 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4673 			/*
4674 			 * validate the biggest_tsn_acked in the gap acks if
4675 			 * strict adherence is wanted.
4676 			 */
4677 			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4678 				/*
4679 				 * peer is either confused or we are under
4680 				 * attack. We must abort.
4681 				 */
4682 				printf("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4683 				    biggest_tsn_acked,
4684 				    send_s);
4685 
4686 				goto hopeless_peer;
4687 			}
4688 		}
4689 	}
4690 	/*******************************************/
4691 	/* cancel ALL T3-send timer if accum moved */
4692 	/*******************************************/
4693 	if (asoc->sctp_cmt_on_off > 0) {
4694 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4695 			if (net->new_pseudo_cumack)
4696 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4697 				    stcb, net,
4698 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4699 
4700 		}
4701 	} else {
4702 		if (accum_moved) {
4703 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4704 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4705 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4706 			}
4707 		}
4708 	}
4709 	/********************************************/
4710 	/* drop the acked chunks from the sentqueue */
4711 	/********************************************/
4712 	asoc->last_acked_seq = cum_ack;
4713 
4714 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4715 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4716 			break;
4717 		}
4718 		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4719 			/* no more sent on list */
4720 			printf("Warning, tp1->sent == %d and its now acked?\n",
4721 			    tp1->sent);
4722 		}
4723 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4724 		if (tp1->pr_sctp_on) {
4725 			if (asoc->pr_sctp_cnt != 0)
4726 				asoc->pr_sctp_cnt--;
4727 		}
4728 		asoc->sent_queue_cnt--;
4729 		if (tp1->data) {
4730 			/* sa_ignore NO_NULL_CHK */
4731 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4732 			sctp_m_freem(tp1->data);
4733 			tp1->data = NULL;
4734 			if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4735 				asoc->sent_queue_cnt_removeable--;
4736 			}
4737 		}
4738 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4739 			sctp_log_sack(asoc->last_acked_seq,
4740 			    cum_ack,
4741 			    tp1->rec.data.TSN_seq,
4742 			    0,
4743 			    0,
4744 			    SCTP_LOG_FREE_SENT);
4745 		}
4746 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4747 		wake_him++;
4748 	}
4749 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4750 #ifdef INVARIANTS
4751 		panic("Warning flight size is postive and should be 0");
4752 #else
4753 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4754 		    asoc->total_flight);
4755 #endif
4756 		asoc->total_flight = 0;
4757 	}
4758 	/* sa_ignore NO_NULL_CHK */
4759 	if ((wake_him) && (stcb->sctp_socket)) {
4760 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4761 		struct socket *so;
4762 
4763 #endif
4764 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4765 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4766 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4767 		}
4768 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4769 		so = SCTP_INP_SO(stcb->sctp_ep);
4770 		atomic_add_int(&stcb->asoc.refcnt, 1);
4771 		SCTP_TCB_UNLOCK(stcb);
4772 		SCTP_SOCKET_LOCK(so, 1);
4773 		SCTP_TCB_LOCK(stcb);
4774 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4775 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4776 			/* assoc was freed while we were unlocked */
4777 			SCTP_SOCKET_UNLOCK(so, 1);
4778 			return;
4779 		}
4780 #endif
4781 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4782 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4783 		SCTP_SOCKET_UNLOCK(so, 1);
4784 #endif
4785 	} else {
4786 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4787 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4788 		}
4789 	}
4790 
4791 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4792 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4793 			/* Setup so we will exit RFC2582 fast recovery */
4794 			will_exit_fast_recovery = 1;
4795 		}
4796 	}
4797 	/*
4798 	 * Check for revoked fragments:
4799 	 *
4800 	 * if Previous sack - Had no frags then we can't have any revoked if
4801 	 * Previous sack - Had frag's then - If we now have frags aka
4802 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4803 	 * some of them. else - The peer revoked all ACKED fragments, since
4804 	 * we had some before and now we have NONE.
4805 	 */
4806 
4807 	if (num_seg) {
4808 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4809 		asoc->saw_sack_with_frags = 1;
4810 	} else if (asoc->saw_sack_with_frags) {
4811 		int cnt_revoked = 0;
4812 
4813 		/* Peer revoked all dg's marked or acked */
4814 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4815 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4816 				tp1->sent = SCTP_DATAGRAM_SENT;
4817 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4818 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4819 					    tp1->whoTo->flight_size,
4820 					    tp1->book_size,
4821 					    (uintptr_t) tp1->whoTo,
4822 					    tp1->rec.data.TSN_seq);
4823 				}
4824 				sctp_flight_size_increase(tp1);
4825 				sctp_total_flight_increase(stcb, tp1);
4826 				tp1->rec.data.chunk_was_revoked = 1;
4827 				/*
4828 				 * To ensure that this increase in
4829 				 * flightsize, which is artificial, does not
4830 				 * throttle the sender, we also increase the
4831 				 * cwnd artificially.
4832 				 */
4833 				tp1->whoTo->cwnd += tp1->book_size;
4834 				cnt_revoked++;
4835 			}
4836 		}
4837 		if (cnt_revoked) {
4838 			reneged_all = 1;
4839 		}
4840 		asoc->saw_sack_with_frags = 0;
4841 	}
4842 	if (num_nr_seg > 0)
4843 		asoc->saw_sack_with_nr_frags = 1;
4844 	else
4845 		asoc->saw_sack_with_nr_frags = 0;
4846 
4847 	/* JRS - Use the congestion control given in the CC module */
4848 	if (ecne_seen == 0) {
4849 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4850 			if (net->net_ack2 > 0) {
4851 				/*
4852 				 * Karn's rule applies to clearing error
4853 				 * count, this is optional.
4854 				 */
4855 				net->error_count = 0;
4856 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4857 					/* addr came good */
4858 					net->dest_state |= SCTP_ADDR_REACHABLE;
4859 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4860 					    SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
4861 				}
4862 				if (net == stcb->asoc.primary_destination) {
4863 					if (stcb->asoc.alternate) {
4864 						/*
4865 						 * release the alternate,
4866 						 * primary is good
4867 						 */
4868 						sctp_free_remote_addr(stcb->asoc.alternate);
4869 						stcb->asoc.alternate = NULL;
4870 					}
4871 				}
4872 				if (net->dest_state & SCTP_ADDR_PF) {
4873 					net->dest_state &= ~SCTP_ADDR_PF;
4874 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4875 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4876 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4877 					/* Done with this net */
4878 					net->net_ack = 0;
4879 				}
4880 				/* restore any doubled timers */
4881 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4882 				if (net->RTO < stcb->asoc.minrto) {
4883 					net->RTO = stcb->asoc.minrto;
4884 				}
4885 				if (net->RTO > stcb->asoc.maxrto) {
4886 					net->RTO = stcb->asoc.maxrto;
4887 				}
4888 			}
4889 		}
4890 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4891 	}
4892 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4893 		/* nothing left in-flight */
4894 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4895 			/* stop all timers */
4896 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4897 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4898 			net->flight_size = 0;
4899 			net->partial_bytes_acked = 0;
4900 		}
4901 		asoc->total_flight = 0;
4902 		asoc->total_flight_count = 0;
4903 	}
4904 	/**********************************/
4905 	/* Now what about shutdown issues */
4906 	/**********************************/
4907 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4908 		/* nothing left on sendqueue.. consider done */
4909 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4910 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4911 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4912 		}
4913 		asoc->peers_rwnd = a_rwnd;
4914 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4915 			/* SWS sender side engages */
4916 			asoc->peers_rwnd = 0;
4917 		}
4918 		/* clean up */
4919 		if ((asoc->stream_queue_cnt == 1) &&
4920 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4921 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4922 		    (asoc->locked_on_sending)
4923 		    ) {
4924 			struct sctp_stream_queue_pending *sp;
4925 
4926 			/*
4927 			 * I may be in a state where we got all across.. but
4928 			 * cannot write more due to a shutdown... we abort
4929 			 * since the user did not indicate EOR in this case.
4930 			 */
4931 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4932 			    sctp_streamhead);
4933 			if ((sp) && (sp->length == 0)) {
4934 				asoc->locked_on_sending = NULL;
4935 				if (sp->msg_is_complete) {
4936 					asoc->stream_queue_cnt--;
4937 				} else {
4938 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4939 					asoc->stream_queue_cnt--;
4940 				}
4941 			}
4942 		}
4943 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4944 		    (asoc->stream_queue_cnt == 0)) {
4945 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4946 				/* Need to abort here */
4947 				struct mbuf *oper;
4948 
4949 		abort_out_now:
4950 				*abort_now = 1;
4951 				/* XXX */
4952 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4953 				    0, M_DONTWAIT, 1, MT_DATA);
4954 				if (oper) {
4955 					struct sctp_paramhdr *ph;
4956 					uint32_t *ippp;
4957 
4958 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4959 					    sizeof(uint32_t);
4960 					ph = mtod(oper, struct sctp_paramhdr *);
4961 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4962 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4963 					ippp = (uint32_t *) (ph + 1);
4964 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4965 				}
4966 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4967 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4968 				return;
4969 			} else {
4970 				struct sctp_nets *netp;
4971 
4972 				if (asoc->alternate) {
4973 					netp = asoc->alternate;
4974 				} else {
4975 					netp = asoc->primary_destination;
4976 				}
4977 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4978 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4979 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4980 				}
4981 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4982 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4983 				sctp_stop_timers_for_shutdown(stcb);
4984 				sctp_send_shutdown(stcb, netp);
4985 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4986 				    stcb->sctp_ep, stcb, netp);
4987 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4988 				    stcb->sctp_ep, stcb, netp);
4989 			}
4990 			return;
4991 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4992 		    (asoc->stream_queue_cnt == 0)) {
4993 			struct sctp_nets *netp;
4994 
4995 			if (asoc->alternate) {
4996 				netp = asoc->alternate;
4997 			} else {
4998 				netp = asoc->primary_destination;
4999 			}
5000 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5001 				goto abort_out_now;
5002 			}
5003 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5004 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5005 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5006 			sctp_send_shutdown_ack(stcb, netp);
5007 			sctp_stop_timers_for_shutdown(stcb);
5008 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5009 			    stcb->sctp_ep, stcb, netp);
5010 			return;
5011 		}
5012 	}
5013 	/*
5014 	 * Now here we are going to recycle net_ack for a different use...
5015 	 * HEADS UP.
5016 	 */
5017 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5018 		net->net_ack = 0;
5019 	}
5020 
5021 	/*
5022 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5023 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5024 	 * automatically ensure that.
5025 	 */
5026 	if ((asoc->sctp_cmt_on_off > 0) &&
5027 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5028 	    (cmt_dac_flag == 0)) {
5029 		this_sack_lowest_newack = cum_ack;
5030 	}
5031 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5032 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5033 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5034 	}
5035 	/* JRS - Use the congestion control given in the CC module */
5036 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5037 
5038 	/* Now are we exiting loss recovery ? */
5039 	if (will_exit_fast_recovery) {
5040 		/* Ok, we must exit fast recovery */
5041 		asoc->fast_retran_loss_recovery = 0;
5042 	}
5043 	if ((asoc->sat_t3_loss_recovery) &&
5044 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5045 		/* end satellite t3 loss recovery */
5046 		asoc->sat_t3_loss_recovery = 0;
5047 	}
5048 	/*
5049 	 * CMT Fast recovery
5050 	 */
5051 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5052 		if (net->will_exit_fast_recovery) {
5053 			/* Ok, we must exit fast recovery */
5054 			net->fast_retran_loss_recovery = 0;
5055 		}
5056 	}
5057 
5058 	/* Adjust and set the new rwnd value */
5059 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5060 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5061 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5062 	}
5063 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5064 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5065 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5066 		/* SWS sender side engages */
5067 		asoc->peers_rwnd = 0;
5068 	}
5069 	if (asoc->peers_rwnd > old_rwnd) {
5070 		win_probe_recovery = 1;
5071 	}
5072 	/*
5073 	 * Now we must setup so we have a timer up for anyone with
5074 	 * outstanding data.
5075 	 */
5076 	done_once = 0;
5077 again:
5078 	j = 0;
5079 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5080 		if (win_probe_recovery && (net->window_probe)) {
5081 			win_probe_recovered = 1;
5082 			/*-
5083 			 * Find first chunk that was used with
5084 			 * window probe and clear the event. Put
5085 			 * it back into the send queue as if has
5086 			 * not been sent.
5087 			 */
5088 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5089 				if (tp1->window_probe) {
5090 					sctp_window_probe_recovery(stcb, asoc, tp1);
5091 					break;
5092 				}
5093 			}
5094 		}
5095 		if (net->flight_size) {
5096 			j++;
5097 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5098 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5099 				    stcb->sctp_ep, stcb, net);
5100 			}
5101 			if (net->window_probe) {
5102 				net->window_probe = 0;
5103 			}
5104 		} else {
5105 			if (net->window_probe) {
5106 				/*
5107 				 * In window probes we must assure a timer
5108 				 * is still running there
5109 				 */
5110 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5111 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5112 					    stcb->sctp_ep, stcb, net);
5113 
5114 				}
5115 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5116 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5117 				    stcb, net,
5118 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5119 			}
5120 		}
5121 	}
5122 	if ((j == 0) &&
5123 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5124 	    (asoc->sent_queue_retran_cnt == 0) &&
5125 	    (win_probe_recovered == 0) &&
5126 	    (done_once == 0)) {
5127 		/*
5128 		 * huh, this should not happen unless all packets are
5129 		 * PR-SCTP and marked to skip of course.
5130 		 */
5131 		if (sctp_fs_audit(asoc)) {
5132 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5133 				net->flight_size = 0;
5134 			}
5135 			asoc->total_flight = 0;
5136 			asoc->total_flight_count = 0;
5137 			asoc->sent_queue_retran_cnt = 0;
5138 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5139 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5140 					sctp_flight_size_increase(tp1);
5141 					sctp_total_flight_increase(stcb, tp1);
5142 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5143 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5144 				}
5145 			}
5146 		}
5147 		done_once = 1;
5148 		goto again;
5149 	}
5150 	/*********************************************/
5151 	/* Here we perform PR-SCTP procedures        */
5152 	/* (section 4.2)                             */
5153 	/*********************************************/
5154 	/* C1. update advancedPeerAckPoint */
5155 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5156 		asoc->advanced_peer_ack_point = cum_ack;
5157 	}
5158 	/* C2. try to further move advancedPeerAckPoint ahead */
5159 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5160 		struct sctp_tmit_chunk *lchk;
5161 		uint32_t old_adv_peer_ack_point;
5162 
5163 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5164 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5165 		/* C3. See if we need to send a Fwd-TSN */
5166 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5167 			/*
5168 			 * ISSUE with ECN, see FWD-TSN processing.
5169 			 */
5170 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5171 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5172 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5173 				    old_adv_peer_ack_point);
5174 			}
5175 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5176 				send_forward_tsn(stcb, asoc);
5177 			} else if (lchk) {
5178 				/* try to FR fwd-tsn's that get lost too */
5179 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5180 					send_forward_tsn(stcb, asoc);
5181 				}
5182 			}
5183 		}
5184 		if (lchk) {
5185 			/* Assure a timer is up */
5186 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5187 			    stcb->sctp_ep, stcb, lchk->whoTo);
5188 		}
5189 	}
5190 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5191 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5192 		    a_rwnd,
5193 		    stcb->asoc.peers_rwnd,
5194 		    stcb->asoc.total_flight,
5195 		    stcb->asoc.total_output_queue_size);
5196 	}
5197 }
5198 
5199 void
5200 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5201 {
5202 	/* Copy cum-ack */
5203 	uint32_t cum_ack, a_rwnd;
5204 
5205 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5206 	/* Arrange so a_rwnd does NOT change */
5207 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5208 
5209 	/* Now call the express sack handling */
5210 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5211 }
5212 
5213 static void
5214 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5215     struct sctp_stream_in *strmin)
5216 {
5217 	struct sctp_queued_to_read *ctl, *nctl;
5218 	struct sctp_association *asoc;
5219 	uint16_t tt;
5220 
5221 	asoc = &stcb->asoc;
5222 	tt = strmin->last_sequence_delivered;
5223 	/*
5224 	 * First deliver anything prior to and including the stream no that
5225 	 * came in
5226 	 */
5227 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5228 		if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
5229 			/* this is deliverable now */
5230 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5231 			/* subtract pending on streams */
5232 			asoc->size_on_all_streams -= ctl->length;
5233 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5234 			/* deliver it to at least the delivery-q */
5235 			if (stcb->sctp_socket) {
5236 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5237 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5238 				    ctl,
5239 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5240 			}
5241 		} else {
5242 			/* no more delivery now. */
5243 			break;
5244 		}
5245 	}
5246 	/*
5247 	 * now we must deliver things in queue the normal way  if any are
5248 	 * now ready.
5249 	 */
5250 	tt = strmin->last_sequence_delivered + 1;
5251 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5252 		if (tt == ctl->sinfo_ssn) {
5253 			/* this is deliverable now */
5254 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5255 			/* subtract pending on streams */
5256 			asoc->size_on_all_streams -= ctl->length;
5257 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5258 			/* deliver it to at least the delivery-q */
5259 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5260 			if (stcb->sctp_socket) {
5261 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5262 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5263 				    ctl,
5264 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5265 
5266 			}
5267 			tt = strmin->last_sequence_delivered + 1;
5268 		} else {
5269 			break;
5270 		}
5271 	}
5272 }
5273 
5274 static void
5275 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5276     struct sctp_association *asoc,
5277     uint16_t stream, uint16_t seq)
5278 {
5279 	struct sctp_tmit_chunk *chk, *nchk;
5280 
5281 	/* For each one on here see if we need to toss it */
5282 	/*
5283 	 * For now large messages held on the reasmqueue that are complete
5284 	 * will be tossed too. We could in theory do more work to spin
5285 	 * through and stop after dumping one msg aka seeing the start of a
5286 	 * new msg at the head, and call the delivery function... to see if
5287 	 * it can be delivered... But for now we just dump everything on the
5288 	 * queue.
5289 	 */
5290 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5291 		/*
5292 		 * Do not toss it if on a different stream or marked for
5293 		 * unordered delivery in which case the stream sequence
5294 		 * number has no meaning.
5295 		 */
5296 		if ((chk->rec.data.stream_number != stream) ||
5297 		    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5298 			continue;
5299 		}
5300 		if (chk->rec.data.stream_seq == seq) {
5301 			/* It needs to be tossed */
5302 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5303 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5304 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5305 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5306 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5307 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5308 			}
5309 			asoc->size_on_reasm_queue -= chk->send_size;
5310 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5311 
5312 			/* Clear up any stream problem */
5313 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5314 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5315 				/*
5316 				 * We must dump forward this streams
5317 				 * sequence number if the chunk is not
5318 				 * unordered that is being skipped. There is
5319 				 * a chance that if the peer does not
5320 				 * include the last fragment in its FWD-TSN
5321 				 * we WILL have a problem here since you
5322 				 * would have a partial chunk in queue that
5323 				 * may not be deliverable. Also if a Partial
5324 				 * delivery API as started the user may get
5325 				 * a partial chunk. The next read returning
5326 				 * a new chunk... really ugly but I see no
5327 				 * way around it! Maybe a notify??
5328 				 */
5329 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5330 			}
5331 			if (chk->data) {
5332 				sctp_m_freem(chk->data);
5333 				chk->data = NULL;
5334 			}
5335 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5336 		} else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5337 			/*
5338 			 * If the stream_seq is > than the purging one, we
5339 			 * are done
5340 			 */
5341 			break;
5342 		}
5343 	}
5344 }
5345 
5346 
5347 void
5348 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5349     struct sctp_forward_tsn_chunk *fwd,
5350     int *abort_flag, struct mbuf *m, int offset)
5351 {
5352 	/* The pr-sctp fwd tsn */
5353 	/*
5354 	 * here we will perform all the data receiver side steps for
5355 	 * processing FwdTSN, as required in by pr-sctp draft:
5356 	 *
5357 	 * Assume we get FwdTSN(x):
5358 	 *
5359 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5360 	 * others we have 3) examine and update re-ordering queue on
5361 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5362 	 * report where we are.
5363 	 */
5364 	struct sctp_association *asoc;
5365 	uint32_t new_cum_tsn, gap;
5366 	unsigned int i, fwd_sz, m_size;
5367 	uint32_t str_seq;
5368 	struct sctp_stream_in *strm;
5369 	struct sctp_tmit_chunk *chk, *nchk;
5370 	struct sctp_queued_to_read *ctl, *sv;
5371 
5372 	asoc = &stcb->asoc;
5373 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5374 		SCTPDBG(SCTP_DEBUG_INDATA1,
5375 		    "Bad size too small/big fwd-tsn\n");
5376 		return;
5377 	}
5378 	m_size = (stcb->asoc.mapping_array_size << 3);
5379 	/*************************************************************/
5380 	/* 1. Here we update local cumTSN and shift the bitmap array */
5381 	/*************************************************************/
5382 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5383 
5384 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5385 		/* Already got there ... */
5386 		return;
5387 	}
5388 	/*
5389 	 * now we know the new TSN is more advanced, let's find the actual
5390 	 * gap
5391 	 */
5392 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5393 	asoc->cumulative_tsn = new_cum_tsn;
5394 	if (gap >= m_size) {
5395 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5396 			struct mbuf *oper;
5397 
5398 			/*
5399 			 * out of range (of single byte chunks in the rwnd I
5400 			 * give out). This must be an attacker.
5401 			 */
5402 			*abort_flag = 1;
5403 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5404 			    0, M_DONTWAIT, 1, MT_DATA);
5405 			if (oper) {
5406 				struct sctp_paramhdr *ph;
5407 				uint32_t *ippp;
5408 
5409 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5410 				    (sizeof(uint32_t) * 3);
5411 				ph = mtod(oper, struct sctp_paramhdr *);
5412 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5413 				ph->param_length = htons(SCTP_BUF_LEN(oper));
5414 				ippp = (uint32_t *) (ph + 1);
5415 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5416 				ippp++;
5417 				*ippp = asoc->highest_tsn_inside_map;
5418 				ippp++;
5419 				*ippp = new_cum_tsn;
5420 			}
5421 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5422 			sctp_abort_an_association(stcb->sctp_ep, stcb,
5423 			    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
5424 			return;
5425 		}
5426 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5427 
5428 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5429 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5430 		asoc->highest_tsn_inside_map = new_cum_tsn;
5431 
5432 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5433 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5434 
5435 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5436 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5437 		}
5438 	} else {
5439 		SCTP_TCB_LOCK_ASSERT(stcb);
5440 		for (i = 0; i <= gap; i++) {
5441 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5442 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5443 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5444 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5445 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5446 				}
5447 			}
5448 		}
5449 	}
5450 	/*************************************************************/
5451 	/* 2. Clear up re-assembly queue                             */
5452 	/*************************************************************/
5453 	/*
5454 	 * First service it if pd-api is up, just in case we can progress it
5455 	 * forward
5456 	 */
5457 	if (asoc->fragmented_delivery_inprogress) {
5458 		sctp_service_reassembly(stcb, asoc);
5459 	}
5460 	/* For each one on here see if we need to toss it */
5461 	/*
5462 	 * For now large messages held on the reasmqueue that are complete
5463 	 * will be tossed too. We could in theory do more work to spin
5464 	 * through and stop after dumping one msg aka seeing the start of a
5465 	 * new msg at the head, and call the delivery function... to see if
5466 	 * it can be delivered... But for now we just dump everything on the
5467 	 * queue.
5468 	 */
5469 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5470 		if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5471 			/* It needs to be tossed */
5472 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5473 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5474 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5475 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5476 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5477 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5478 			}
5479 			asoc->size_on_reasm_queue -= chk->send_size;
5480 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5481 
5482 			/* Clear up any stream problem */
5483 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5484 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5485 				/*
5486 				 * We must dump forward this streams
5487 				 * sequence number if the chunk is not
5488 				 * unordered that is being skipped. There is
5489 				 * a chance that if the peer does not
5490 				 * include the last fragment in its FWD-TSN
5491 				 * we WILL have a problem here since you
5492 				 * would have a partial chunk in queue that
5493 				 * may not be deliverable. Also if a Partial
5494 				 * delivery API as started the user may get
5495 				 * a partial chunk. The next read returning
5496 				 * a new chunk... really ugly but I see no
5497 				 * way around it! Maybe a notify??
5498 				 */
5499 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5500 			}
5501 			if (chk->data) {
5502 				sctp_m_freem(chk->data);
5503 				chk->data = NULL;
5504 			}
5505 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5506 		} else {
5507 			/*
5508 			 * Ok we have gone beyond the end of the fwd-tsn's
5509 			 * mark.
5510 			 */
5511 			break;
5512 		}
5513 	}
5514 	/*******************************************************/
5515 	/* 3. Update the PR-stream re-ordering queues and fix  */
5516 	/* delivery issues as needed.                       */
5517 	/*******************************************************/
5518 	fwd_sz -= sizeof(*fwd);
5519 	if (m && fwd_sz) {
5520 		/* New method. */
5521 		unsigned int num_str;
5522 		struct sctp_strseq *stseq, strseqbuf;
5523 
5524 		offset += sizeof(*fwd);
5525 
5526 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5527 		num_str = fwd_sz / sizeof(struct sctp_strseq);
5528 		for (i = 0; i < num_str; i++) {
5529 			uint16_t st;
5530 
5531 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5532 			    sizeof(struct sctp_strseq),
5533 			    (uint8_t *) & strseqbuf);
5534 			offset += sizeof(struct sctp_strseq);
5535 			if (stseq == NULL) {
5536 				break;
5537 			}
5538 			/* Convert */
5539 			st = ntohs(stseq->stream);
5540 			stseq->stream = st;
5541 			st = ntohs(stseq->sequence);
5542 			stseq->sequence = st;
5543 
5544 			/* now process */
5545 
5546 			/*
5547 			 * Ok we now look for the stream/seq on the read
5548 			 * queue where its not all delivered. If we find it
5549 			 * we transmute the read entry into a PDI_ABORTED.
5550 			 */
5551 			if (stseq->stream >= asoc->streamincnt) {
5552 				/* screwed up streams, stop!  */
5553 				break;
5554 			}
5555 			if ((asoc->str_of_pdapi == stseq->stream) &&
5556 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5557 				/*
5558 				 * If this is the one we were partially
5559 				 * delivering now then we no longer are.
5560 				 * Note this will change with the reassembly
5561 				 * re-write.
5562 				 */
5563 				asoc->fragmented_delivery_inprogress = 0;
5564 			}
5565 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5566 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5567 				if ((ctl->sinfo_stream == stseq->stream) &&
5568 				    (ctl->sinfo_ssn == stseq->sequence)) {
5569 					str_seq = (stseq->stream << 16) | stseq->sequence;
5570 					ctl->end_added = 1;
5571 					ctl->pdapi_aborted = 1;
5572 					sv = stcb->asoc.control_pdapi;
5573 					stcb->asoc.control_pdapi = ctl;
5574 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5575 					    stcb,
5576 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5577 					    (void *)&str_seq,
5578 					    SCTP_SO_NOT_LOCKED);
5579 					stcb->asoc.control_pdapi = sv;
5580 					break;
5581 				} else if ((ctl->sinfo_stream == stseq->stream) &&
5582 				    SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5583 					/* We are past our victim SSN */
5584 					break;
5585 				}
5586 			}
5587 			strm = &asoc->strmin[stseq->stream];
5588 			if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5589 				/* Update the sequence number */
5590 				strm->last_sequence_delivered = stseq->sequence;
5591 			}
5592 			/* now kick the stream the new way */
5593 			/* sa_ignore NO_NULL_CHK */
5594 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5595 		}
5596 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5597 	}
5598 	/*
5599 	 * Now slide thing forward.
5600 	 */
5601 	sctp_slide_mapping_arrays(stcb);
5602 
5603 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5604 		/* now lets kick out and check for more fragmented delivery */
5605 		/* sa_ignore NO_NULL_CHK */
5606 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5607 	}
5608 }
5609