xref: /freebsd/sys/netinet/sctp_indata.c (revision d940bfec8c329dd82d8d54efebd81c8aa420503b)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 
48 
49 /*
50  * NOTES: On the outbound side of things I need to check the sack timer to
51  * see if I should generate a sack into the chunk queue (if I have data to
52  * send that is and will be sending it .. for bundling.
53  *
54  * The callback in sctp_usrreq.c will get called when the socket is read from.
55  * This will cause sctp_service_queues() to get called on the top entry in
56  * the list.
57  */
58 
59 void
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61 {
62 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63 }
64 
65 /* Calculate what the rwnd would be */
66 uint32_t
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68 {
69 	uint32_t calc = 0;
70 
71 	/*
72 	 * This is really set wrong with respect to a 1-2-m socket. Since
73 	 * the sb_cc is the count that everyone as put up. When we re-write
74 	 * sctp_soreceive then we will fix this so that ONLY this
75 	 * associations data is taken into account.
76 	 */
77 	if (stcb->sctp_socket == NULL)
78 		return (calc);
79 
80 	if (stcb->asoc.sb_cc == 0 &&
81 	    asoc->size_on_reasm_queue == 0 &&
82 	    asoc->size_on_all_streams == 0) {
83 		/* Full rwnd granted */
84 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85 		return (calc);
86 	}
87 	/* get actual space */
88 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89 
90 	/*
91 	 * take out what has NOT been put on socket queue and we yet hold
92 	 * for putting up.
93 	 */
94 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 	    asoc->cnt_on_reasm_queue * MSIZE));
96 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 	    asoc->cnt_on_all_streams * MSIZE));
98 
99 	if (calc == 0) {
100 		/* out of space */
101 		return (calc);
102 	}
103 	/* what is the overhead of all these rwnd's */
104 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
105 	/*
106 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 	 * even it is 0. SWS engaged
108 	 */
109 	if (calc < stcb->asoc.my_rwnd_control_len) {
110 		calc = 1;
111 	}
112 	return (calc);
113 }
114 
115 
116 
117 /*
118  * Build out our readq entry based on the incoming packet.
119  */
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122     struct sctp_nets *net,
123     uint32_t tsn, uint32_t ppid,
124     uint32_t context, uint16_t stream_no,
125     uint16_t stream_seq, uint8_t flags,
126     struct mbuf *dm)
127 {
128 	struct sctp_queued_to_read *read_queue_e = NULL;
129 
130 	sctp_alloc_a_readq(stcb, read_queue_e);
131 	if (read_queue_e == NULL) {
132 		goto failed_build;
133 	}
134 	read_queue_e->sinfo_stream = stream_no;
135 	read_queue_e->sinfo_ssn = stream_seq;
136 	read_queue_e->sinfo_flags = (flags << 8);
137 	read_queue_e->sinfo_ppid = ppid;
138 	read_queue_e->sinfo_context = context;
139 	read_queue_e->sinfo_timetolive = 0;
140 	read_queue_e->sinfo_tsn = tsn;
141 	read_queue_e->sinfo_cumtsn = tsn;
142 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 	read_queue_e->whoFrom = net;
144 	read_queue_e->length = 0;
145 	atomic_add_int(&net->ref_count, 1);
146 	read_queue_e->data = dm;
147 	read_queue_e->spec_flags = 0;
148 	read_queue_e->tail_mbuf = NULL;
149 	read_queue_e->aux_data = NULL;
150 	read_queue_e->stcb = stcb;
151 	read_queue_e->port_from = stcb->rport;
152 	read_queue_e->do_not_ref_stcb = 0;
153 	read_queue_e->end_added = 0;
154 	read_queue_e->some_taken = 0;
155 	read_queue_e->pdapi_aborted = 0;
156 failed_build:
157 	return (read_queue_e);
158 }
159 
160 
161 /*
162  * Build out our readq entry based on the incoming packet.
163  */
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166     struct sctp_tmit_chunk *chk)
167 {
168 	struct sctp_queued_to_read *read_queue_e = NULL;
169 
170 	sctp_alloc_a_readq(stcb, read_queue_e);
171 	if (read_queue_e == NULL) {
172 		goto failed_build;
173 	}
174 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 	read_queue_e->sinfo_context = stcb->asoc.context;
179 	read_queue_e->sinfo_timetolive = 0;
180 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 	read_queue_e->whoFrom = chk->whoTo;
184 	read_queue_e->aux_data = NULL;
185 	read_queue_e->length = 0;
186 	atomic_add_int(&chk->whoTo->ref_count, 1);
187 	read_queue_e->data = chk->data;
188 	read_queue_e->tail_mbuf = NULL;
189 	read_queue_e->stcb = stcb;
190 	read_queue_e->port_from = stcb->rport;
191 	read_queue_e->spec_flags = 0;
192 	read_queue_e->do_not_ref_stcb = 0;
193 	read_queue_e->end_added = 0;
194 	read_queue_e->some_taken = 0;
195 	read_queue_e->pdapi_aborted = 0;
196 failed_build:
197 	return (read_queue_e);
198 }
199 
200 
201 struct mbuf *
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
203 {
204 	struct sctp_extrcvinfo *seinfo;
205 	struct sctp_sndrcvinfo *outinfo;
206 	struct sctp_rcvinfo *rcvinfo;
207 	struct sctp_nxtinfo *nxtinfo;
208 	struct cmsghdr *cmh;
209 	struct mbuf *ret;
210 	int len;
211 	int use_extended;
212 	int provide_nxt;
213 
214 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217 		/* user does not want any ancillary data */
218 		return (NULL);
219 	}
220 	len = 0;
221 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
223 	}
224 	seinfo = (struct sctp_extrcvinfo *)sinfo;
225 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226 	    (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
227 		provide_nxt = 1;
228 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
229 	} else {
230 		provide_nxt = 0;
231 	}
232 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
234 			use_extended = 1;
235 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
236 		} else {
237 			use_extended = 0;
238 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
239 		}
240 	} else {
241 		use_extended = 0;
242 	}
243 
244 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
245 	if (ret == NULL) {
246 		/* No space */
247 		return (ret);
248 	}
249 	SCTP_BUF_LEN(ret) = 0;
250 
251 	/* We need a CMSG header followed by the struct */
252 	cmh = mtod(ret, struct cmsghdr *);
253 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
254 		cmh->cmsg_level = IPPROTO_SCTP;
255 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
256 		cmh->cmsg_type = SCTP_RCVINFO;
257 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
258 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
259 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
260 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
261 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
262 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
263 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
264 		rcvinfo->rcv_context = sinfo->sinfo_context;
265 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
266 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
267 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
268 	}
269 	if (provide_nxt) {
270 		cmh->cmsg_level = IPPROTO_SCTP;
271 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
272 		cmh->cmsg_type = SCTP_NXTINFO;
273 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
274 		nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
275 		nxtinfo->nxt_flags = 0;
276 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
277 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
278 		}
279 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
280 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
281 		}
282 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
283 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
284 		}
285 		nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
286 		nxtinfo->nxt_length = seinfo->sreinfo_next_length;
287 		nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
288 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
289 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
290 	}
291 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
292 		cmh->cmsg_level = IPPROTO_SCTP;
293 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
294 		if (use_extended) {
295 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
296 			cmh->cmsg_type = SCTP_EXTRCV;
297 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
298 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
299 		} else {
300 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
301 			cmh->cmsg_type = SCTP_SNDRCV;
302 			*outinfo = *sinfo;
303 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
304 		}
305 	}
306 	return (ret);
307 }
308 
309 
310 static void
311 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
312 {
313 	uint32_t gap, i, cumackp1;
314 	int fnd = 0;
315 
316 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
317 		return;
318 	}
319 	cumackp1 = asoc->cumulative_tsn + 1;
320 	if (SCTP_TSN_GT(cumackp1, tsn)) {
321 		/*
322 		 * this tsn is behind the cum ack and thus we don't need to
323 		 * worry about it being moved from one to the other.
324 		 */
325 		return;
326 	}
327 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
328 	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
329 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
330 		sctp_print_mapping_array(asoc);
331 #ifdef INVARIANTS
332 		panic("Things are really messed up now!!");
333 #endif
334 	}
335 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
336 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
337 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
338 		asoc->highest_tsn_inside_nr_map = tsn;
339 	}
340 	if (tsn == asoc->highest_tsn_inside_map) {
341 		/* We must back down to see what the new highest is */
342 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
343 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
344 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
345 				asoc->highest_tsn_inside_map = i;
346 				fnd = 1;
347 				break;
348 			}
349 		}
350 		if (!fnd) {
351 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
352 		}
353 	}
354 }
355 
356 
357 /*
358  * We are delivering currently from the reassembly queue. We must continue to
359  * deliver until we either: 1) run out of space. 2) run out of sequential
360  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
361  */
362 static void
363 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
364 {
365 	struct sctp_tmit_chunk *chk, *nchk;
366 	uint16_t nxt_todel;
367 	uint16_t stream_no;
368 	int end = 0;
369 	int cntDel;
370 	struct sctp_queued_to_read *control, *ctl, *nctl;
371 
372 	if (stcb == NULL)
373 		return;
374 
375 	cntDel = stream_no = 0;
376 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
377 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
378 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
379 		/* socket above is long gone or going.. */
380 abandon:
381 		asoc->fragmented_delivery_inprogress = 0;
382 		TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
383 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
384 			asoc->size_on_reasm_queue -= chk->send_size;
385 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
386 			/*
387 			 * Lose the data pointer, since its in the socket
388 			 * buffer
389 			 */
390 			if (chk->data) {
391 				sctp_m_freem(chk->data);
392 				chk->data = NULL;
393 			}
394 			/* Now free the address and data */
395 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
396 			/* sa_ignore FREED_MEMORY */
397 		}
398 		return;
399 	}
400 	SCTP_TCB_LOCK_ASSERT(stcb);
401 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
402 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
403 			/* Can't deliver more :< */
404 			return;
405 		}
406 		stream_no = chk->rec.data.stream_number;
407 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
408 		if (nxt_todel != chk->rec.data.stream_seq &&
409 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
410 			/*
411 			 * Not the next sequence to deliver in its stream OR
412 			 * unordered
413 			 */
414 			return;
415 		}
416 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
417 
418 			control = sctp_build_readq_entry_chk(stcb, chk);
419 			if (control == NULL) {
420 				/* out of memory? */
421 				return;
422 			}
423 			/* save it off for our future deliveries */
424 			stcb->asoc.control_pdapi = control;
425 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
426 				end = 1;
427 			else
428 				end = 0;
429 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
430 			sctp_add_to_readq(stcb->sctp_ep,
431 			    stcb, control, &stcb->sctp_socket->so_rcv, end,
432 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
433 			cntDel++;
434 		} else {
435 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
436 				end = 1;
437 			else
438 				end = 0;
439 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
440 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
441 			    stcb->asoc.control_pdapi,
442 			    chk->data, end, chk->rec.data.TSN_seq,
443 			    &stcb->sctp_socket->so_rcv)) {
444 				/*
445 				 * something is very wrong, either
446 				 * control_pdapi is NULL, or the tail_mbuf
447 				 * is corrupt, or there is a EOM already on
448 				 * the mbuf chain.
449 				 */
450 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
451 					goto abandon;
452 				} else {
453 #ifdef INVARIANTS
454 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
455 						panic("This should not happen control_pdapi NULL?");
456 					}
457 					/* if we did not panic, it was a EOM */
458 					panic("Bad chunking ??");
459 #else
460 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
461 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
462 					}
463 					SCTP_PRINTF("Bad chunking ??\n");
464 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
465 
466 #endif
467 					goto abandon;
468 				}
469 			}
470 			cntDel++;
471 		}
472 		/* pull it we did it */
473 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
474 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
475 			asoc->fragmented_delivery_inprogress = 0;
476 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
477 				asoc->strmin[stream_no].last_sequence_delivered++;
478 			}
479 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
480 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
481 			}
482 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
483 			/*
484 			 * turn the flag back on since we just  delivered
485 			 * yet another one.
486 			 */
487 			asoc->fragmented_delivery_inprogress = 1;
488 		}
489 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
490 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
491 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
492 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
493 
494 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
495 		asoc->size_on_reasm_queue -= chk->send_size;
496 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
497 		/* free up the chk */
498 		chk->data = NULL;
499 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
500 
501 		if (asoc->fragmented_delivery_inprogress == 0) {
502 			/*
503 			 * Now lets see if we can deliver the next one on
504 			 * the stream
505 			 */
506 			struct sctp_stream_in *strm;
507 
508 			strm = &asoc->strmin[stream_no];
509 			nxt_todel = strm->last_sequence_delivered + 1;
510 			TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
511 				/* Deliver more if we can. */
512 				if (nxt_todel == ctl->sinfo_ssn) {
513 					TAILQ_REMOVE(&strm->inqueue, ctl, next);
514 					asoc->size_on_all_streams -= ctl->length;
515 					sctp_ucount_decr(asoc->cnt_on_all_streams);
516 					strm->last_sequence_delivered++;
517 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
518 					sctp_add_to_readq(stcb->sctp_ep, stcb,
519 					    ctl,
520 					    &stcb->sctp_socket->so_rcv, 1,
521 					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
522 				} else {
523 					break;
524 				}
525 				nxt_todel = strm->last_sequence_delivered + 1;
526 			}
527 			break;
528 		}
529 	}
530 }
531 
532 /*
533  * Queue the chunk either right into the socket buffer if it is the next one
534  * to go OR put it in the correct place in the delivery queue.  If we do
535  * append to the so_buf, keep doing so until we are out of order. One big
536  * question still remains, what to do when the socket buffer is FULL??
537  */
538 static void
539 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
540     struct sctp_queued_to_read *control, int *abort_flag)
541 {
542 	/*
543 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
544 	 * all the data in one stream this could happen quite rapidly. One
545 	 * could use the TSN to keep track of things, but this scheme breaks
546 	 * down in the other type of stream useage that could occur. Send a
547 	 * single msg to stream 0, send 4Billion messages to stream 1, now
548 	 * send a message to stream 0. You have a situation where the TSN
549 	 * has wrapped but not in the stream. Is this worth worrying about
550 	 * or should we just change our queue sort at the bottom to be by
551 	 * TSN.
552 	 *
553 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
554 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
555 	 * assignment this could happen... and I don't see how this would be
556 	 * a violation. So for now I am undecided an will leave the sort by
557 	 * SSN alone. Maybe a hybred approach is the answer
558 	 *
559 	 */
560 	struct sctp_stream_in *strm;
561 	struct sctp_queued_to_read *at;
562 	int queue_needed;
563 	uint16_t nxt_todel;
564 	struct mbuf *oper;
565 
566 	queue_needed = 1;
567 	asoc->size_on_all_streams += control->length;
568 	sctp_ucount_incr(asoc->cnt_on_all_streams);
569 	strm = &asoc->strmin[control->sinfo_stream];
570 	nxt_todel = strm->last_sequence_delivered + 1;
571 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
572 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
573 	}
574 	SCTPDBG(SCTP_DEBUG_INDATA1,
575 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
576 	    (uint32_t) control->sinfo_stream,
577 	    (uint32_t) strm->last_sequence_delivered,
578 	    (uint32_t) nxt_todel);
579 	if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
580 		/* The incoming sseq is behind where we last delivered? */
581 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
582 		    control->sinfo_ssn, strm->last_sequence_delivered);
583 protocol_error:
584 		/*
585 		 * throw it in the stream so it gets cleaned up in
586 		 * association destruction
587 		 */
588 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
589 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
590 		    0, M_NOWAIT, 1, MT_DATA);
591 		if (oper) {
592 			struct sctp_paramhdr *ph;
593 			uint32_t *ippp;
594 
595 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
596 			    (sizeof(uint32_t) * 3);
597 			ph = mtod(oper, struct sctp_paramhdr *);
598 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
599 			ph->param_length = htons(SCTP_BUF_LEN(oper));
600 			ippp = (uint32_t *) (ph + 1);
601 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
602 			ippp++;
603 			*ippp = control->sinfo_tsn;
604 			ippp++;
605 			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
606 		}
607 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
608 		sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
609 		*abort_flag = 1;
610 		return;
611 
612 	}
613 	if (nxt_todel == control->sinfo_ssn) {
614 		/* can be delivered right away? */
615 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
616 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
617 		}
618 		/* EY it wont be queued if it could be delivered directly */
619 		queue_needed = 0;
620 		asoc->size_on_all_streams -= control->length;
621 		sctp_ucount_decr(asoc->cnt_on_all_streams);
622 		strm->last_sequence_delivered++;
623 
624 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
625 		sctp_add_to_readq(stcb->sctp_ep, stcb,
626 		    control,
627 		    &stcb->sctp_socket->so_rcv, 1,
628 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
629 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
630 			/* all delivered */
631 			nxt_todel = strm->last_sequence_delivered + 1;
632 			if (nxt_todel == control->sinfo_ssn) {
633 				TAILQ_REMOVE(&strm->inqueue, control, next);
634 				asoc->size_on_all_streams -= control->length;
635 				sctp_ucount_decr(asoc->cnt_on_all_streams);
636 				strm->last_sequence_delivered++;
637 				/*
638 				 * We ignore the return of deliver_data here
639 				 * since we always can hold the chunk on the
640 				 * d-queue. And we have a finite number that
641 				 * can be delivered from the strq.
642 				 */
643 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
644 					sctp_log_strm_del(control, NULL,
645 					    SCTP_STR_LOG_FROM_IMMED_DEL);
646 				}
647 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
648 				sctp_add_to_readq(stcb->sctp_ep, stcb,
649 				    control,
650 				    &stcb->sctp_socket->so_rcv, 1,
651 				    SCTP_READ_LOCK_NOT_HELD,
652 				    SCTP_SO_NOT_LOCKED);
653 				continue;
654 			}
655 			break;
656 		}
657 	}
658 	if (queue_needed) {
659 		/*
660 		 * Ok, we did not deliver this guy, find the correct place
661 		 * to put it on the queue.
662 		 */
663 		if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
664 			goto protocol_error;
665 		}
666 		if (TAILQ_EMPTY(&strm->inqueue)) {
667 			/* Empty queue */
668 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
669 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
670 			}
671 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
672 		} else {
673 			TAILQ_FOREACH(at, &strm->inqueue, next) {
674 				if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
675 					/*
676 					 * one in queue is bigger than the
677 					 * new one, insert before this one
678 					 */
679 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
680 						sctp_log_strm_del(control, at,
681 						    SCTP_STR_LOG_FROM_INSERT_MD);
682 					}
683 					TAILQ_INSERT_BEFORE(at, control, next);
684 					break;
685 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
686 					/*
687 					 * Gak, He sent me a duplicate str
688 					 * seq number
689 					 */
690 					/*
691 					 * foo bar, I guess I will just free
692 					 * this new guy, should we abort
693 					 * too? FIX ME MAYBE? Or it COULD be
694 					 * that the SSN's have wrapped.
695 					 * Maybe I should compare to TSN
696 					 * somehow... sigh for now just blow
697 					 * away the chunk!
698 					 */
699 
700 					if (control->data)
701 						sctp_m_freem(control->data);
702 					control->data = NULL;
703 					asoc->size_on_all_streams -= control->length;
704 					sctp_ucount_decr(asoc->cnt_on_all_streams);
705 					if (control->whoFrom) {
706 						sctp_free_remote_addr(control->whoFrom);
707 						control->whoFrom = NULL;
708 					}
709 					sctp_free_a_readq(stcb, control);
710 					return;
711 				} else {
712 					if (TAILQ_NEXT(at, next) == NULL) {
713 						/*
714 						 * We are at the end, insert
715 						 * it after this one
716 						 */
717 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
718 							sctp_log_strm_del(control, at,
719 							    SCTP_STR_LOG_FROM_INSERT_TL);
720 						}
721 						TAILQ_INSERT_AFTER(&strm->inqueue,
722 						    at, control, next);
723 						break;
724 					}
725 				}
726 			}
727 		}
728 	}
729 }
730 
731 /*
732  * Returns two things: You get the total size of the deliverable parts of the
733  * first fragmented message on the reassembly queue. And you get a 1 back if
734  * all of the message is ready or a 0 back if the message is still incomplete
735  */
736 static int
737 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
738 {
739 	struct sctp_tmit_chunk *chk;
740 	uint32_t tsn;
741 
742 	*t_size = 0;
743 	chk = TAILQ_FIRST(&asoc->reasmqueue);
744 	if (chk == NULL) {
745 		/* nothing on the queue */
746 		return (0);
747 	}
748 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
749 		/* Not a first on the queue */
750 		return (0);
751 	}
752 	tsn = chk->rec.data.TSN_seq;
753 	TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
754 		if (tsn != chk->rec.data.TSN_seq) {
755 			return (0);
756 		}
757 		*t_size += chk->send_size;
758 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
759 			return (1);
760 		}
761 		tsn++;
762 	}
763 	return (0);
764 }
765 
766 static void
767 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
768 {
769 	struct sctp_tmit_chunk *chk;
770 	uint16_t nxt_todel;
771 	uint32_t tsize, pd_point;
772 
773 doit_again:
774 	chk = TAILQ_FIRST(&asoc->reasmqueue);
775 	if (chk == NULL) {
776 		/* Huh? */
777 		asoc->size_on_reasm_queue = 0;
778 		asoc->cnt_on_reasm_queue = 0;
779 		return;
780 	}
781 	if (asoc->fragmented_delivery_inprogress == 0) {
782 		nxt_todel =
783 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
784 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
785 		    (nxt_todel == chk->rec.data.stream_seq ||
786 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
787 			/*
788 			 * Yep the first one is here and its ok to deliver
789 			 * but should we?
790 			 */
791 			if (stcb->sctp_socket) {
792 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
793 				    stcb->sctp_ep->partial_delivery_point);
794 			} else {
795 				pd_point = stcb->sctp_ep->partial_delivery_point;
796 			}
797 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
798 
799 				/*
800 				 * Yes, we setup to start reception, by
801 				 * backing down the TSN just in case we
802 				 * can't deliver. If we
803 				 */
804 				asoc->fragmented_delivery_inprogress = 1;
805 				asoc->tsn_last_delivered =
806 				    chk->rec.data.TSN_seq - 1;
807 				asoc->str_of_pdapi =
808 				    chk->rec.data.stream_number;
809 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
810 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
811 				asoc->fragment_flags = chk->rec.data.rcv_flags;
812 				sctp_service_reassembly(stcb, asoc);
813 			}
814 		}
815 	} else {
816 		/*
817 		 * Service re-assembly will deliver stream data queued at
818 		 * the end of fragmented delivery.. but it wont know to go
819 		 * back and call itself again... we do that here with the
820 		 * got doit_again
821 		 */
822 		sctp_service_reassembly(stcb, asoc);
823 		if (asoc->fragmented_delivery_inprogress == 0) {
824 			/*
825 			 * finished our Fragmented delivery, could be more
826 			 * waiting?
827 			 */
828 			goto doit_again;
829 		}
830 	}
831 }
832 
833 /*
834  * Dump onto the re-assembly queue, in its proper place. After dumping on the
835  * queue, see if anthing can be delivered. If so pull it off (or as much as
836  * we can. If we run out of space then we must dump what we can and set the
837  * appropriate flag to say we queued what we could.
838  */
839 static void
840 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
841     struct sctp_tmit_chunk *chk, int *abort_flag)
842 {
843 	struct mbuf *oper;
844 	uint32_t cum_ackp1, prev_tsn, post_tsn;
845 	struct sctp_tmit_chunk *at, *prev, *next;
846 
847 	prev = next = NULL;
848 	cum_ackp1 = asoc->tsn_last_delivered + 1;
849 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
850 		/* This is the first one on the queue */
851 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
852 		/*
853 		 * we do not check for delivery of anything when only one
854 		 * fragment is here
855 		 */
856 		asoc->size_on_reasm_queue = chk->send_size;
857 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
858 		if (chk->rec.data.TSN_seq == cum_ackp1) {
859 			if (asoc->fragmented_delivery_inprogress == 0 &&
860 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
861 			    SCTP_DATA_FIRST_FRAG) {
862 				/*
863 				 * An empty queue, no delivery inprogress,
864 				 * we hit the next one and it does NOT have
865 				 * a FIRST fragment mark.
866 				 */
867 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
868 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
869 				    0, M_NOWAIT, 1, MT_DATA);
870 
871 				if (oper) {
872 					struct sctp_paramhdr *ph;
873 					uint32_t *ippp;
874 
875 					SCTP_BUF_LEN(oper) =
876 					    sizeof(struct sctp_paramhdr) +
877 					    (sizeof(uint32_t) * 3);
878 					ph = mtod(oper, struct sctp_paramhdr *);
879 					ph->param_type =
880 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
881 					ph->param_length = htons(SCTP_BUF_LEN(oper));
882 					ippp = (uint32_t *) (ph + 1);
883 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
884 					ippp++;
885 					*ippp = chk->rec.data.TSN_seq;
886 					ippp++;
887 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
888 
889 				}
890 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
891 				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
892 				*abort_flag = 1;
893 			} else if (asoc->fragmented_delivery_inprogress &&
894 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
895 				/*
896 				 * We are doing a partial delivery and the
897 				 * NEXT chunk MUST be either the LAST or
898 				 * MIDDLE fragment NOT a FIRST
899 				 */
900 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
901 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
902 				    0, M_NOWAIT, 1, MT_DATA);
903 				if (oper) {
904 					struct sctp_paramhdr *ph;
905 					uint32_t *ippp;
906 
907 					SCTP_BUF_LEN(oper) =
908 					    sizeof(struct sctp_paramhdr) +
909 					    (3 * sizeof(uint32_t));
910 					ph = mtod(oper, struct sctp_paramhdr *);
911 					ph->param_type =
912 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
913 					ph->param_length = htons(SCTP_BUF_LEN(oper));
914 					ippp = (uint32_t *) (ph + 1);
915 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
916 					ippp++;
917 					*ippp = chk->rec.data.TSN_seq;
918 					ippp++;
919 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
920 				}
921 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
922 				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
923 				*abort_flag = 1;
924 			} else if (asoc->fragmented_delivery_inprogress) {
925 				/*
926 				 * Here we are ok with a MIDDLE or LAST
927 				 * piece
928 				 */
929 				if (chk->rec.data.stream_number !=
930 				    asoc->str_of_pdapi) {
931 					/* Got to be the right STR No */
932 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
933 					    chk->rec.data.stream_number,
934 					    asoc->str_of_pdapi);
935 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
936 					    0, M_NOWAIT, 1, MT_DATA);
937 					if (oper) {
938 						struct sctp_paramhdr *ph;
939 						uint32_t *ippp;
940 
941 						SCTP_BUF_LEN(oper) =
942 						    sizeof(struct sctp_paramhdr) +
943 						    (sizeof(uint32_t) * 3);
944 						ph = mtod(oper,
945 						    struct sctp_paramhdr *);
946 						ph->param_type =
947 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
948 						ph->param_length =
949 						    htons(SCTP_BUF_LEN(oper));
950 						ippp = (uint32_t *) (ph + 1);
951 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
952 						ippp++;
953 						*ippp = chk->rec.data.TSN_seq;
954 						ippp++;
955 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
956 					}
957 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
958 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
959 					*abort_flag = 1;
960 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
961 					    SCTP_DATA_UNORDERED &&
962 				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
963 					/* Got to be the right STR Seq */
964 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
965 					    chk->rec.data.stream_seq,
966 					    asoc->ssn_of_pdapi);
967 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
968 					    0, M_NOWAIT, 1, MT_DATA);
969 					if (oper) {
970 						struct sctp_paramhdr *ph;
971 						uint32_t *ippp;
972 
973 						SCTP_BUF_LEN(oper) =
974 						    sizeof(struct sctp_paramhdr) +
975 						    (3 * sizeof(uint32_t));
976 						ph = mtod(oper,
977 						    struct sctp_paramhdr *);
978 						ph->param_type =
979 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
980 						ph->param_length =
981 						    htons(SCTP_BUF_LEN(oper));
982 						ippp = (uint32_t *) (ph + 1);
983 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
984 						ippp++;
985 						*ippp = chk->rec.data.TSN_seq;
986 						ippp++;
987 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
988 
989 					}
990 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
991 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
992 					*abort_flag = 1;
993 				}
994 			}
995 		}
996 		return;
997 	}
998 	/* Find its place */
999 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1000 		if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
1001 			/*
1002 			 * one in queue is bigger than the new one, insert
1003 			 * before this one
1004 			 */
1005 			/* A check */
1006 			asoc->size_on_reasm_queue += chk->send_size;
1007 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1008 			next = at;
1009 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1010 			break;
1011 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1012 			/* Gak, He sent me a duplicate str seq number */
1013 			/*
1014 			 * foo bar, I guess I will just free this new guy,
1015 			 * should we abort too? FIX ME MAYBE? Or it COULD be
1016 			 * that the SSN's have wrapped. Maybe I should
1017 			 * compare to TSN somehow... sigh for now just blow
1018 			 * away the chunk!
1019 			 */
1020 			if (chk->data) {
1021 				sctp_m_freem(chk->data);
1022 				chk->data = NULL;
1023 			}
1024 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1025 			return;
1026 		} else {
1027 			prev = at;
1028 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1029 				/*
1030 				 * We are at the end, insert it after this
1031 				 * one
1032 				 */
1033 				/* check it first */
1034 				asoc->size_on_reasm_queue += chk->send_size;
1035 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1036 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1037 				break;
1038 			}
1039 		}
1040 	}
1041 	/* Now the audits */
1042 	if (prev) {
1043 		prev_tsn = chk->rec.data.TSN_seq - 1;
1044 		if (prev_tsn == prev->rec.data.TSN_seq) {
1045 			/*
1046 			 * Ok the one I am dropping onto the end is the
1047 			 * NEXT. A bit of valdiation here.
1048 			 */
1049 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1050 			    SCTP_DATA_FIRST_FRAG ||
1051 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1052 			    SCTP_DATA_MIDDLE_FRAG) {
1053 				/*
1054 				 * Insert chk MUST be a MIDDLE or LAST
1055 				 * fragment
1056 				 */
1057 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1058 				    SCTP_DATA_FIRST_FRAG) {
1059 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1060 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1061 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1062 					    0, M_NOWAIT, 1, MT_DATA);
1063 					if (oper) {
1064 						struct sctp_paramhdr *ph;
1065 						uint32_t *ippp;
1066 
1067 						SCTP_BUF_LEN(oper) =
1068 						    sizeof(struct sctp_paramhdr) +
1069 						    (3 * sizeof(uint32_t));
1070 						ph = mtod(oper,
1071 						    struct sctp_paramhdr *);
1072 						ph->param_type =
1073 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1074 						ph->param_length =
1075 						    htons(SCTP_BUF_LEN(oper));
1076 						ippp = (uint32_t *) (ph + 1);
1077 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1078 						ippp++;
1079 						*ippp = chk->rec.data.TSN_seq;
1080 						ippp++;
1081 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1082 
1083 					}
1084 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1085 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1086 					*abort_flag = 1;
1087 					return;
1088 				}
1089 				if (chk->rec.data.stream_number !=
1090 				    prev->rec.data.stream_number) {
1091 					/*
1092 					 * Huh, need the correct STR here,
1093 					 * they must be the same.
1094 					 */
1095 					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1096 					    chk->rec.data.stream_number,
1097 					    prev->rec.data.stream_number);
1098 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1099 					    0, M_NOWAIT, 1, MT_DATA);
1100 					if (oper) {
1101 						struct sctp_paramhdr *ph;
1102 						uint32_t *ippp;
1103 
1104 						SCTP_BUF_LEN(oper) =
1105 						    sizeof(struct sctp_paramhdr) +
1106 						    (3 * sizeof(uint32_t));
1107 						ph = mtod(oper,
1108 						    struct sctp_paramhdr *);
1109 						ph->param_type =
1110 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1111 						ph->param_length =
1112 						    htons(SCTP_BUF_LEN(oper));
1113 						ippp = (uint32_t *) (ph + 1);
1114 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1115 						ippp++;
1116 						*ippp = chk->rec.data.TSN_seq;
1117 						ippp++;
1118 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1119 					}
1120 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1121 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1122 					*abort_flag = 1;
1123 					return;
1124 				}
1125 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1126 				    chk->rec.data.stream_seq !=
1127 				    prev->rec.data.stream_seq) {
1128 					/*
1129 					 * Huh, need the correct STR here,
1130 					 * they must be the same.
1131 					 */
1132 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1133 					    chk->rec.data.stream_seq,
1134 					    prev->rec.data.stream_seq);
1135 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1136 					    0, M_NOWAIT, 1, MT_DATA);
1137 					if (oper) {
1138 						struct sctp_paramhdr *ph;
1139 						uint32_t *ippp;
1140 
1141 						SCTP_BUF_LEN(oper) =
1142 						    sizeof(struct sctp_paramhdr) +
1143 						    (3 * sizeof(uint32_t));
1144 						ph = mtod(oper,
1145 						    struct sctp_paramhdr *);
1146 						ph->param_type =
1147 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1148 						ph->param_length =
1149 						    htons(SCTP_BUF_LEN(oper));
1150 						ippp = (uint32_t *) (ph + 1);
1151 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1152 						ippp++;
1153 						*ippp = chk->rec.data.TSN_seq;
1154 						ippp++;
1155 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1156 					}
1157 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1158 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1159 					*abort_flag = 1;
1160 					return;
1161 				}
1162 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1163 			    SCTP_DATA_LAST_FRAG) {
1164 				/* Insert chk MUST be a FIRST */
1165 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1166 				    SCTP_DATA_FIRST_FRAG) {
1167 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1168 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1169 					    0, M_NOWAIT, 1, MT_DATA);
1170 					if (oper) {
1171 						struct sctp_paramhdr *ph;
1172 						uint32_t *ippp;
1173 
1174 						SCTP_BUF_LEN(oper) =
1175 						    sizeof(struct sctp_paramhdr) +
1176 						    (3 * sizeof(uint32_t));
1177 						ph = mtod(oper,
1178 						    struct sctp_paramhdr *);
1179 						ph->param_type =
1180 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1181 						ph->param_length =
1182 						    htons(SCTP_BUF_LEN(oper));
1183 						ippp = (uint32_t *) (ph + 1);
1184 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1185 						ippp++;
1186 						*ippp = chk->rec.data.TSN_seq;
1187 						ippp++;
1188 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1189 
1190 					}
1191 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1192 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1193 					*abort_flag = 1;
1194 					return;
1195 				}
1196 			}
1197 		}
1198 	}
1199 	if (next) {
1200 		post_tsn = chk->rec.data.TSN_seq + 1;
1201 		if (post_tsn == next->rec.data.TSN_seq) {
1202 			/*
1203 			 * Ok the one I am inserting ahead of is my NEXT
1204 			 * one. A bit of valdiation here.
1205 			 */
1206 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1207 				/* Insert chk MUST be a last fragment */
1208 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1209 				    != SCTP_DATA_LAST_FRAG) {
1210 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1211 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1212 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1213 					    0, M_NOWAIT, 1, MT_DATA);
1214 					if (oper) {
1215 						struct sctp_paramhdr *ph;
1216 						uint32_t *ippp;
1217 
1218 						SCTP_BUF_LEN(oper) =
1219 						    sizeof(struct sctp_paramhdr) +
1220 						    (3 * sizeof(uint32_t));
1221 						ph = mtod(oper,
1222 						    struct sctp_paramhdr *);
1223 						ph->param_type =
1224 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1225 						ph->param_length =
1226 						    htons(SCTP_BUF_LEN(oper));
1227 						ippp = (uint32_t *) (ph + 1);
1228 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1229 						ippp++;
1230 						*ippp = chk->rec.data.TSN_seq;
1231 						ippp++;
1232 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1233 					}
1234 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1235 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1236 					*abort_flag = 1;
1237 					return;
1238 				}
1239 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1240 				    SCTP_DATA_MIDDLE_FRAG ||
1241 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1242 			    SCTP_DATA_LAST_FRAG) {
1243 				/*
1244 				 * Insert chk CAN be MIDDLE or FIRST NOT
1245 				 * LAST
1246 				 */
1247 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1248 				    SCTP_DATA_LAST_FRAG) {
1249 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1250 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1251 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1252 					    0, M_NOWAIT, 1, MT_DATA);
1253 					if (oper) {
1254 						struct sctp_paramhdr *ph;
1255 						uint32_t *ippp;
1256 
1257 						SCTP_BUF_LEN(oper) =
1258 						    sizeof(struct sctp_paramhdr) +
1259 						    (3 * sizeof(uint32_t));
1260 						ph = mtod(oper,
1261 						    struct sctp_paramhdr *);
1262 						ph->param_type =
1263 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1264 						ph->param_length =
1265 						    htons(SCTP_BUF_LEN(oper));
1266 						ippp = (uint32_t *) (ph + 1);
1267 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1268 						ippp++;
1269 						*ippp = chk->rec.data.TSN_seq;
1270 						ippp++;
1271 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1272 
1273 					}
1274 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1275 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1276 					*abort_flag = 1;
1277 					return;
1278 				}
1279 				if (chk->rec.data.stream_number !=
1280 				    next->rec.data.stream_number) {
1281 					/*
1282 					 * Huh, need the correct STR here,
1283 					 * they must be the same.
1284 					 */
1285 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1286 					    chk->rec.data.stream_number,
1287 					    next->rec.data.stream_number);
1288 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1289 					    0, M_NOWAIT, 1, MT_DATA);
1290 					if (oper) {
1291 						struct sctp_paramhdr *ph;
1292 						uint32_t *ippp;
1293 
1294 						SCTP_BUF_LEN(oper) =
1295 						    sizeof(struct sctp_paramhdr) +
1296 						    (3 * sizeof(uint32_t));
1297 						ph = mtod(oper,
1298 						    struct sctp_paramhdr *);
1299 						ph->param_type =
1300 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1301 						ph->param_length =
1302 						    htons(SCTP_BUF_LEN(oper));
1303 						ippp = (uint32_t *) (ph + 1);
1304 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1305 						ippp++;
1306 						*ippp = chk->rec.data.TSN_seq;
1307 						ippp++;
1308 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1309 
1310 					}
1311 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1312 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1313 					*abort_flag = 1;
1314 					return;
1315 				}
1316 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1317 				    chk->rec.data.stream_seq !=
1318 				    next->rec.data.stream_seq) {
1319 					/*
1320 					 * Huh, need the correct STR here,
1321 					 * they must be the same.
1322 					 */
1323 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1324 					    chk->rec.data.stream_seq,
1325 					    next->rec.data.stream_seq);
1326 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1327 					    0, M_NOWAIT, 1, MT_DATA);
1328 					if (oper) {
1329 						struct sctp_paramhdr *ph;
1330 						uint32_t *ippp;
1331 
1332 						SCTP_BUF_LEN(oper) =
1333 						    sizeof(struct sctp_paramhdr) +
1334 						    (3 * sizeof(uint32_t));
1335 						ph = mtod(oper,
1336 						    struct sctp_paramhdr *);
1337 						ph->param_type =
1338 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1339 						ph->param_length =
1340 						    htons(SCTP_BUF_LEN(oper));
1341 						ippp = (uint32_t *) (ph + 1);
1342 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1343 						ippp++;
1344 						*ippp = chk->rec.data.TSN_seq;
1345 						ippp++;
1346 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1347 					}
1348 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1349 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1350 					*abort_flag = 1;
1351 					return;
1352 				}
1353 			}
1354 		}
1355 	}
1356 	/* Do we need to do some delivery? check */
1357 	sctp_deliver_reasm_check(stcb, asoc);
1358 }
1359 
1360 /*
1361  * This is an unfortunate routine. It checks to make sure a evil guy is not
1362  * stuffing us full of bad packet fragments. A broken peer could also do this
1363  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1364  * :< more cycles.
1365  */
1366 static int
1367 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1368     uint32_t TSN_seq)
1369 {
1370 	struct sctp_tmit_chunk *at;
1371 	uint32_t tsn_est;
1372 
1373 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1374 		if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1375 			/* is it one bigger? */
1376 			tsn_est = at->rec.data.TSN_seq + 1;
1377 			if (tsn_est == TSN_seq) {
1378 				/* yep. It better be a last then */
1379 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1380 				    SCTP_DATA_LAST_FRAG) {
1381 					/*
1382 					 * Ok this guy belongs next to a guy
1383 					 * that is NOT last, it should be a
1384 					 * middle/last, not a complete
1385 					 * chunk.
1386 					 */
1387 					return (1);
1388 				} else {
1389 					/*
1390 					 * This guy is ok since its a LAST
1391 					 * and the new chunk is a fully
1392 					 * self- contained one.
1393 					 */
1394 					return (0);
1395 				}
1396 			}
1397 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1398 			/* Software error since I have a dup? */
1399 			return (1);
1400 		} else {
1401 			/*
1402 			 * Ok, 'at' is larger than new chunk but does it
1403 			 * need to be right before it.
1404 			 */
1405 			tsn_est = TSN_seq + 1;
1406 			if (tsn_est == at->rec.data.TSN_seq) {
1407 				/* Yep, It better be a first */
1408 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1409 				    SCTP_DATA_FIRST_FRAG) {
1410 					return (1);
1411 				} else {
1412 					return (0);
1413 				}
1414 			}
1415 		}
1416 	}
1417 	return (0);
1418 }
1419 
1420 static int
1421 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1422     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1423     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1424     int *break_flag, int last_chunk)
1425 {
1426 	/* Process a data chunk */
1427 	/* struct sctp_tmit_chunk *chk; */
1428 	struct sctp_tmit_chunk *chk;
1429 	uint32_t tsn, gap;
1430 	struct mbuf *dmbuf;
1431 	int the_len;
1432 	int need_reasm_check = 0;
1433 	uint16_t strmno, strmseq;
1434 	struct mbuf *oper;
1435 	struct sctp_queued_to_read *control;
1436 	int ordered;
1437 	uint32_t protocol_id;
1438 	uint8_t chunk_flags;
1439 	struct sctp_stream_reset_list *liste;
1440 
1441 	chk = NULL;
1442 	tsn = ntohl(ch->dp.tsn);
1443 	chunk_flags = ch->ch.chunk_flags;
1444 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1445 		asoc->send_sack = 1;
1446 	}
1447 	protocol_id = ch->dp.protocol_id;
1448 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1449 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1450 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1451 	}
1452 	if (stcb == NULL) {
1453 		return (0);
1454 	}
1455 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1456 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1457 		/* It is a duplicate */
1458 		SCTP_STAT_INCR(sctps_recvdupdata);
1459 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1460 			/* Record a dup for the next outbound sack */
1461 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1462 			asoc->numduptsns++;
1463 		}
1464 		asoc->send_sack = 1;
1465 		return (0);
1466 	}
1467 	/* Calculate the number of TSN's between the base and this TSN */
1468 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1469 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1470 		/* Can't hold the bit in the mapping at max array, toss it */
1471 		return (0);
1472 	}
1473 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1474 		SCTP_TCB_LOCK_ASSERT(stcb);
1475 		if (sctp_expand_mapping_array(asoc, gap)) {
1476 			/* Can't expand, drop it */
1477 			return (0);
1478 		}
1479 	}
1480 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1481 		*high_tsn = tsn;
1482 	}
1483 	/* See if we have received this one already */
1484 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1485 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1486 		SCTP_STAT_INCR(sctps_recvdupdata);
1487 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1488 			/* Record a dup for the next outbound sack */
1489 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1490 			asoc->numduptsns++;
1491 		}
1492 		asoc->send_sack = 1;
1493 		return (0);
1494 	}
1495 	/*
1496 	 * Check to see about the GONE flag, duplicates would cause a sack
1497 	 * to be sent up above
1498 	 */
1499 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1500 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1501 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1502 	    ) {
1503 		/*
1504 		 * wait a minute, this guy is gone, there is no longer a
1505 		 * receiver. Send peer an ABORT!
1506 		 */
1507 		struct mbuf *op_err;
1508 
1509 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1510 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1511 		*abort_flag = 1;
1512 		return (0);
1513 	}
1514 	/*
1515 	 * Now before going further we see if there is room. If NOT then we
1516 	 * MAY let one through only IF this TSN is the one we are waiting
1517 	 * for on a partial delivery API.
1518 	 */
1519 
1520 	/* now do the tests */
1521 	if (((asoc->cnt_on_all_streams +
1522 	    asoc->cnt_on_reasm_queue +
1523 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1524 	    (((int)asoc->my_rwnd) <= 0)) {
1525 		/*
1526 		 * When we have NO room in the rwnd we check to make sure
1527 		 * the reader is doing its job...
1528 		 */
1529 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1530 			/* some to read, wake-up */
1531 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1532 			struct socket *so;
1533 
1534 			so = SCTP_INP_SO(stcb->sctp_ep);
1535 			atomic_add_int(&stcb->asoc.refcnt, 1);
1536 			SCTP_TCB_UNLOCK(stcb);
1537 			SCTP_SOCKET_LOCK(so, 1);
1538 			SCTP_TCB_LOCK(stcb);
1539 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1540 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1541 				/* assoc was freed while we were unlocked */
1542 				SCTP_SOCKET_UNLOCK(so, 1);
1543 				return (0);
1544 			}
1545 #endif
1546 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1547 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1548 			SCTP_SOCKET_UNLOCK(so, 1);
1549 #endif
1550 		}
1551 		/* now is it in the mapping array of what we have accepted? */
1552 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1553 		    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1554 			/* Nope not in the valid range dump it */
1555 			sctp_set_rwnd(stcb, asoc);
1556 			if ((asoc->cnt_on_all_streams +
1557 			    asoc->cnt_on_reasm_queue +
1558 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1559 				SCTP_STAT_INCR(sctps_datadropchklmt);
1560 			} else {
1561 				SCTP_STAT_INCR(sctps_datadroprwnd);
1562 			}
1563 			*break_flag = 1;
1564 			return (0);
1565 		}
1566 	}
1567 	strmno = ntohs(ch->dp.stream_id);
1568 	if (strmno >= asoc->streamincnt) {
1569 		struct sctp_paramhdr *phdr;
1570 		struct mbuf *mb;
1571 
1572 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1573 		    0, M_NOWAIT, 1, MT_DATA);
1574 		if (mb != NULL) {
1575 			/* add some space up front so prepend will work well */
1576 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1577 			phdr = mtod(mb, struct sctp_paramhdr *);
1578 			/*
1579 			 * Error causes are just param's and this one has
1580 			 * two back to back phdr, one with the error type
1581 			 * and size, the other with the streamid and a rsvd
1582 			 */
1583 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1584 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1585 			phdr->param_length =
1586 			    htons(sizeof(struct sctp_paramhdr) * 2);
1587 			phdr++;
1588 			/* We insert the stream in the type field */
1589 			phdr->param_type = ch->dp.stream_id;
1590 			/* And set the length to 0 for the rsvd field */
1591 			phdr->param_length = 0;
1592 			sctp_queue_op_err(stcb, mb);
1593 		}
1594 		SCTP_STAT_INCR(sctps_badsid);
1595 		SCTP_TCB_LOCK_ASSERT(stcb);
1596 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1597 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1598 			asoc->highest_tsn_inside_nr_map = tsn;
1599 		}
1600 		if (tsn == (asoc->cumulative_tsn + 1)) {
1601 			/* Update cum-ack */
1602 			asoc->cumulative_tsn = tsn;
1603 		}
1604 		return (0);
1605 	}
1606 	/*
1607 	 * Before we continue lets validate that we are not being fooled by
1608 	 * an evil attacker. We can only have 4k chunks based on our TSN
1609 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1610 	 * way our stream sequence numbers could have wrapped. We of course
1611 	 * only validate the FIRST fragment so the bit must be set.
1612 	 */
1613 	strmseq = ntohs(ch->dp.stream_sequence);
1614 #ifdef SCTP_ASOCLOG_OF_TSNS
1615 	SCTP_TCB_LOCK_ASSERT(stcb);
1616 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1617 		asoc->tsn_in_at = 0;
1618 		asoc->tsn_in_wrapped = 1;
1619 	}
1620 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1621 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1622 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1623 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1624 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1625 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1626 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1627 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1628 	asoc->tsn_in_at++;
1629 #endif
1630 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1631 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1632 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1633 	    SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1634 		/* The incoming sseq is behind where we last delivered? */
1635 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1636 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1637 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1638 		    0, M_NOWAIT, 1, MT_DATA);
1639 		if (oper) {
1640 			struct sctp_paramhdr *ph;
1641 			uint32_t *ippp;
1642 
1643 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1644 			    (3 * sizeof(uint32_t));
1645 			ph = mtod(oper, struct sctp_paramhdr *);
1646 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1647 			ph->param_length = htons(SCTP_BUF_LEN(oper));
1648 			ippp = (uint32_t *) (ph + 1);
1649 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1650 			ippp++;
1651 			*ippp = tsn;
1652 			ippp++;
1653 			*ippp = ((strmno << 16) | strmseq);
1654 
1655 		}
1656 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1657 		sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1658 		*abort_flag = 1;
1659 		return (0);
1660 	}
1661 	/************************************
1662 	 * From here down we may find ch-> invalid
1663 	 * so its a good idea NOT to use it.
1664 	 *************************************/
1665 
1666 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1667 	if (last_chunk == 0) {
1668 		dmbuf = SCTP_M_COPYM(*m,
1669 		    (offset + sizeof(struct sctp_data_chunk)),
1670 		    the_len, M_NOWAIT);
1671 #ifdef SCTP_MBUF_LOGGING
1672 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1673 			struct mbuf *mat;
1674 
1675 			for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) {
1676 				if (SCTP_BUF_IS_EXTENDED(mat)) {
1677 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1678 				}
1679 			}
1680 		}
1681 #endif
1682 	} else {
1683 		/* We can steal the last chunk */
1684 		int l_len;
1685 
1686 		dmbuf = *m;
1687 		/* lop off the top part */
1688 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1689 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1690 			l_len = SCTP_BUF_LEN(dmbuf);
1691 		} else {
1692 			/*
1693 			 * need to count up the size hopefully does not hit
1694 			 * this to often :-0
1695 			 */
1696 			struct mbuf *lat;
1697 
1698 			l_len = 0;
1699 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1700 				l_len += SCTP_BUF_LEN(lat);
1701 			}
1702 		}
1703 		if (l_len > the_len) {
1704 			/* Trim the end round bytes off  too */
1705 			m_adj(dmbuf, -(l_len - the_len));
1706 		}
1707 	}
1708 	if (dmbuf == NULL) {
1709 		SCTP_STAT_INCR(sctps_nomem);
1710 		return (0);
1711 	}
1712 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1713 	    asoc->fragmented_delivery_inprogress == 0 &&
1714 	    TAILQ_EMPTY(&asoc->resetHead) &&
1715 	    ((ordered == 0) ||
1716 	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1717 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1718 		/* Candidate for express delivery */
1719 		/*
1720 		 * Its not fragmented, No PD-API is up, Nothing in the
1721 		 * delivery queue, Its un-ordered OR ordered and the next to
1722 		 * deliver AND nothing else is stuck on the stream queue,
1723 		 * And there is room for it in the socket buffer. Lets just
1724 		 * stuff it up the buffer....
1725 		 */
1726 
1727 		/* It would be nice to avoid this copy if we could :< */
1728 		sctp_alloc_a_readq(stcb, control);
1729 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1730 		    protocol_id,
1731 		    strmno, strmseq,
1732 		    chunk_flags,
1733 		    dmbuf);
1734 		if (control == NULL) {
1735 			goto failed_express_del;
1736 		}
1737 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1738 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1739 			asoc->highest_tsn_inside_nr_map = tsn;
1740 		}
1741 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1742 		    control, &stcb->sctp_socket->so_rcv,
1743 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1744 
1745 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1746 			/* for ordered, bump what we delivered */
1747 			asoc->strmin[strmno].last_sequence_delivered++;
1748 		}
1749 		SCTP_STAT_INCR(sctps_recvexpress);
1750 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1751 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1752 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1753 		}
1754 		control = NULL;
1755 
1756 		goto finish_express_del;
1757 	}
1758 failed_express_del:
1759 	/* If we reach here this is a new chunk */
1760 	chk = NULL;
1761 	control = NULL;
1762 	/* Express for fragmented delivery? */
1763 	if ((asoc->fragmented_delivery_inprogress) &&
1764 	    (stcb->asoc.control_pdapi) &&
1765 	    (asoc->str_of_pdapi == strmno) &&
1766 	    (asoc->ssn_of_pdapi == strmseq)
1767 	    ) {
1768 		control = stcb->asoc.control_pdapi;
1769 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1770 			/* Can't be another first? */
1771 			goto failed_pdapi_express_del;
1772 		}
1773 		if (tsn == (control->sinfo_tsn + 1)) {
1774 			/* Yep, we can add it on */
1775 			int end = 0;
1776 
1777 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1778 				end = 1;
1779 			}
1780 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1781 			    tsn,
1782 			    &stcb->sctp_socket->so_rcv)) {
1783 				SCTP_PRINTF("Append fails end:%d\n", end);
1784 				goto failed_pdapi_express_del;
1785 			}
1786 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1787 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1788 				asoc->highest_tsn_inside_nr_map = tsn;
1789 			}
1790 			SCTP_STAT_INCR(sctps_recvexpressm);
1791 			control->sinfo_tsn = tsn;
1792 			asoc->tsn_last_delivered = tsn;
1793 			asoc->fragment_flags = chunk_flags;
1794 			asoc->tsn_of_pdapi_last_delivered = tsn;
1795 			asoc->last_flags_delivered = chunk_flags;
1796 			asoc->last_strm_seq_delivered = strmseq;
1797 			asoc->last_strm_no_delivered = strmno;
1798 			if (end) {
1799 				/* clean up the flags and such */
1800 				asoc->fragmented_delivery_inprogress = 0;
1801 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1802 					asoc->strmin[strmno].last_sequence_delivered++;
1803 				}
1804 				stcb->asoc.control_pdapi = NULL;
1805 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1806 					/*
1807 					 * There could be another message
1808 					 * ready
1809 					 */
1810 					need_reasm_check = 1;
1811 				}
1812 			}
1813 			control = NULL;
1814 			goto finish_express_del;
1815 		}
1816 	}
1817 failed_pdapi_express_del:
1818 	control = NULL;
1819 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1820 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1821 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1822 			asoc->highest_tsn_inside_nr_map = tsn;
1823 		}
1824 	} else {
1825 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1826 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1827 			asoc->highest_tsn_inside_map = tsn;
1828 		}
1829 	}
1830 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1831 		sctp_alloc_a_chunk(stcb, chk);
1832 		if (chk == NULL) {
1833 			/* No memory so we drop the chunk */
1834 			SCTP_STAT_INCR(sctps_nomem);
1835 			if (last_chunk == 0) {
1836 				/* we copied it, free the copy */
1837 				sctp_m_freem(dmbuf);
1838 			}
1839 			return (0);
1840 		}
1841 		chk->rec.data.TSN_seq = tsn;
1842 		chk->no_fr_allowed = 0;
1843 		chk->rec.data.stream_seq = strmseq;
1844 		chk->rec.data.stream_number = strmno;
1845 		chk->rec.data.payloadtype = protocol_id;
1846 		chk->rec.data.context = stcb->asoc.context;
1847 		chk->rec.data.doing_fast_retransmit = 0;
1848 		chk->rec.data.rcv_flags = chunk_flags;
1849 		chk->asoc = asoc;
1850 		chk->send_size = the_len;
1851 		chk->whoTo = net;
1852 		atomic_add_int(&net->ref_count, 1);
1853 		chk->data = dmbuf;
1854 	} else {
1855 		sctp_alloc_a_readq(stcb, control);
1856 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1857 		    protocol_id,
1858 		    strmno, strmseq,
1859 		    chunk_flags,
1860 		    dmbuf);
1861 		if (control == NULL) {
1862 			/* No memory so we drop the chunk */
1863 			SCTP_STAT_INCR(sctps_nomem);
1864 			if (last_chunk == 0) {
1865 				/* we copied it, free the copy */
1866 				sctp_m_freem(dmbuf);
1867 			}
1868 			return (0);
1869 		}
1870 		control->length = the_len;
1871 	}
1872 
1873 	/* Mark it as received */
1874 	/* Now queue it where it belongs */
1875 	if (control != NULL) {
1876 		/* First a sanity check */
1877 		if (asoc->fragmented_delivery_inprogress) {
1878 			/*
1879 			 * Ok, we have a fragmented delivery in progress if
1880 			 * this chunk is next to deliver OR belongs in our
1881 			 * view to the reassembly, the peer is evil or
1882 			 * broken.
1883 			 */
1884 			uint32_t estimate_tsn;
1885 
1886 			estimate_tsn = asoc->tsn_last_delivered + 1;
1887 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1888 			    (estimate_tsn == control->sinfo_tsn)) {
1889 				/* Evil/Broke peer */
1890 				sctp_m_freem(control->data);
1891 				control->data = NULL;
1892 				if (control->whoFrom) {
1893 					sctp_free_remote_addr(control->whoFrom);
1894 					control->whoFrom = NULL;
1895 				}
1896 				sctp_free_a_readq(stcb, control);
1897 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1898 				    0, M_NOWAIT, 1, MT_DATA);
1899 				if (oper) {
1900 					struct sctp_paramhdr *ph;
1901 					uint32_t *ippp;
1902 
1903 					SCTP_BUF_LEN(oper) =
1904 					    sizeof(struct sctp_paramhdr) +
1905 					    (3 * sizeof(uint32_t));
1906 					ph = mtod(oper, struct sctp_paramhdr *);
1907 					ph->param_type =
1908 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1909 					ph->param_length = htons(SCTP_BUF_LEN(oper));
1910 					ippp = (uint32_t *) (ph + 1);
1911 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1912 					ippp++;
1913 					*ippp = tsn;
1914 					ippp++;
1915 					*ippp = ((strmno << 16) | strmseq);
1916 				}
1917 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1918 				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1919 				*abort_flag = 1;
1920 				return (0);
1921 			} else {
1922 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1923 					sctp_m_freem(control->data);
1924 					control->data = NULL;
1925 					if (control->whoFrom) {
1926 						sctp_free_remote_addr(control->whoFrom);
1927 						control->whoFrom = NULL;
1928 					}
1929 					sctp_free_a_readq(stcb, control);
1930 
1931 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1932 					    0, M_NOWAIT, 1, MT_DATA);
1933 					if (oper) {
1934 						struct sctp_paramhdr *ph;
1935 						uint32_t *ippp;
1936 
1937 						SCTP_BUF_LEN(oper) =
1938 						    sizeof(struct sctp_paramhdr) +
1939 						    (3 * sizeof(uint32_t));
1940 						ph = mtod(oper,
1941 						    struct sctp_paramhdr *);
1942 						ph->param_type =
1943 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1944 						ph->param_length =
1945 						    htons(SCTP_BUF_LEN(oper));
1946 						ippp = (uint32_t *) (ph + 1);
1947 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1948 						ippp++;
1949 						*ippp = tsn;
1950 						ippp++;
1951 						*ippp = ((strmno << 16) | strmseq);
1952 					}
1953 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1954 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1955 					*abort_flag = 1;
1956 					return (0);
1957 				}
1958 			}
1959 		} else {
1960 			/* No PDAPI running */
1961 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1962 				/*
1963 				 * Reassembly queue is NOT empty validate
1964 				 * that this tsn does not need to be in
1965 				 * reasembly queue. If it does then our peer
1966 				 * is broken or evil.
1967 				 */
1968 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1969 					sctp_m_freem(control->data);
1970 					control->data = NULL;
1971 					if (control->whoFrom) {
1972 						sctp_free_remote_addr(control->whoFrom);
1973 						control->whoFrom = NULL;
1974 					}
1975 					sctp_free_a_readq(stcb, control);
1976 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1977 					    0, M_NOWAIT, 1, MT_DATA);
1978 					if (oper) {
1979 						struct sctp_paramhdr *ph;
1980 						uint32_t *ippp;
1981 
1982 						SCTP_BUF_LEN(oper) =
1983 						    sizeof(struct sctp_paramhdr) +
1984 						    (3 * sizeof(uint32_t));
1985 						ph = mtod(oper,
1986 						    struct sctp_paramhdr *);
1987 						ph->param_type =
1988 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1989 						ph->param_length =
1990 						    htons(SCTP_BUF_LEN(oper));
1991 						ippp = (uint32_t *) (ph + 1);
1992 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
1993 						ippp++;
1994 						*ippp = tsn;
1995 						ippp++;
1996 						*ippp = ((strmno << 16) | strmseq);
1997 					}
1998 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1999 					sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
2000 					*abort_flag = 1;
2001 					return (0);
2002 				}
2003 			}
2004 		}
2005 		/* ok, if we reach here we have passed the sanity checks */
2006 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2007 			/* queue directly into socket buffer */
2008 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2009 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2010 			    control,
2011 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2012 		} else {
2013 			/*
2014 			 * Special check for when streams are resetting. We
2015 			 * could be more smart about this and check the
2016 			 * actual stream to see if it is not being reset..
2017 			 * that way we would not create a HOLB when amongst
2018 			 * streams being reset and those not being reset.
2019 			 *
2020 			 * We take complete messages that have a stream reset
2021 			 * intervening (aka the TSN is after where our
2022 			 * cum-ack needs to be) off and put them on a
2023 			 * pending_reply_queue. The reassembly ones we do
2024 			 * not have to worry about since they are all sorted
2025 			 * and proceessed by TSN order. It is only the
2026 			 * singletons I must worry about.
2027 			 */
2028 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2029 			    SCTP_TSN_GT(tsn, liste->tsn)) {
2030 				/*
2031 				 * yep its past where we need to reset... go
2032 				 * ahead and queue it.
2033 				 */
2034 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2035 					/* first one on */
2036 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2037 				} else {
2038 					struct sctp_queued_to_read *ctlOn,
2039 					                   *nctlOn;
2040 					unsigned char inserted = 0;
2041 
2042 					TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2043 						if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2044 							continue;
2045 						} else {
2046 							/* found it */
2047 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2048 							inserted = 1;
2049 							break;
2050 						}
2051 					}
2052 					if (inserted == 0) {
2053 						/*
2054 						 * must be put at end, use
2055 						 * prevP (all setup from
2056 						 * loop) to setup nextP.
2057 						 */
2058 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2059 					}
2060 				}
2061 			} else {
2062 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2063 				if (*abort_flag) {
2064 					return (0);
2065 				}
2066 			}
2067 		}
2068 	} else {
2069 		/* Into the re-assembly queue */
2070 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2071 		if (*abort_flag) {
2072 			/*
2073 			 * the assoc is now gone and chk was put onto the
2074 			 * reasm queue, which has all been freed.
2075 			 */
2076 			*m = NULL;
2077 			return (0);
2078 		}
2079 	}
2080 finish_express_del:
2081 	if (tsn == (asoc->cumulative_tsn + 1)) {
2082 		/* Update cum-ack */
2083 		asoc->cumulative_tsn = tsn;
2084 	}
2085 	if (last_chunk) {
2086 		*m = NULL;
2087 	}
2088 	if (ordered) {
2089 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2090 	} else {
2091 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2092 	}
2093 	SCTP_STAT_INCR(sctps_recvdata);
2094 	/* Set it present please */
2095 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2096 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2097 	}
2098 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2099 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2100 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2101 	}
2102 	/* check the special flag for stream resets */
2103 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2104 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2105 		/*
2106 		 * we have finished working through the backlogged TSN's now
2107 		 * time to reset streams. 1: call reset function. 2: free
2108 		 * pending_reply space 3: distribute any chunks in
2109 		 * pending_reply_queue.
2110 		 */
2111 		struct sctp_queued_to_read *ctl, *nctl;
2112 
2113 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2114 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2115 		SCTP_FREE(liste, SCTP_M_STRESET);
2116 		/* sa_ignore FREED_MEMORY */
2117 		liste = TAILQ_FIRST(&asoc->resetHead);
2118 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2119 			/* All can be removed */
2120 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2121 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2122 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2123 				if (*abort_flag) {
2124 					return (0);
2125 				}
2126 			}
2127 		} else {
2128 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2129 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2130 					break;
2131 				}
2132 				/*
2133 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2134 				 * process it which is the NOT of
2135 				 * ctl->sinfo_tsn > liste->tsn
2136 				 */
2137 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2138 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2139 				if (*abort_flag) {
2140 					return (0);
2141 				}
2142 			}
2143 		}
2144 		/*
2145 		 * Now service re-assembly to pick up anything that has been
2146 		 * held on reassembly queue?
2147 		 */
2148 		sctp_deliver_reasm_check(stcb, asoc);
2149 		need_reasm_check = 0;
2150 	}
2151 	if (need_reasm_check) {
2152 		/* Another one waits ? */
2153 		sctp_deliver_reasm_check(stcb, asoc);
2154 	}
2155 	return (1);
2156 }
2157 
2158 int8_t sctp_map_lookup_tab[256] = {
2159 	0, 1, 0, 2, 0, 1, 0, 3,
2160 	0, 1, 0, 2, 0, 1, 0, 4,
2161 	0, 1, 0, 2, 0, 1, 0, 3,
2162 	0, 1, 0, 2, 0, 1, 0, 5,
2163 	0, 1, 0, 2, 0, 1, 0, 3,
2164 	0, 1, 0, 2, 0, 1, 0, 4,
2165 	0, 1, 0, 2, 0, 1, 0, 3,
2166 	0, 1, 0, 2, 0, 1, 0, 6,
2167 	0, 1, 0, 2, 0, 1, 0, 3,
2168 	0, 1, 0, 2, 0, 1, 0, 4,
2169 	0, 1, 0, 2, 0, 1, 0, 3,
2170 	0, 1, 0, 2, 0, 1, 0, 5,
2171 	0, 1, 0, 2, 0, 1, 0, 3,
2172 	0, 1, 0, 2, 0, 1, 0, 4,
2173 	0, 1, 0, 2, 0, 1, 0, 3,
2174 	0, 1, 0, 2, 0, 1, 0, 7,
2175 	0, 1, 0, 2, 0, 1, 0, 3,
2176 	0, 1, 0, 2, 0, 1, 0, 4,
2177 	0, 1, 0, 2, 0, 1, 0, 3,
2178 	0, 1, 0, 2, 0, 1, 0, 5,
2179 	0, 1, 0, 2, 0, 1, 0, 3,
2180 	0, 1, 0, 2, 0, 1, 0, 4,
2181 	0, 1, 0, 2, 0, 1, 0, 3,
2182 	0, 1, 0, 2, 0, 1, 0, 6,
2183 	0, 1, 0, 2, 0, 1, 0, 3,
2184 	0, 1, 0, 2, 0, 1, 0, 4,
2185 	0, 1, 0, 2, 0, 1, 0, 3,
2186 	0, 1, 0, 2, 0, 1, 0, 5,
2187 	0, 1, 0, 2, 0, 1, 0, 3,
2188 	0, 1, 0, 2, 0, 1, 0, 4,
2189 	0, 1, 0, 2, 0, 1, 0, 3,
2190 	0, 1, 0, 2, 0, 1, 0, 8
2191 };
2192 
2193 
2194 void
2195 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2196 {
2197 	/*
2198 	 * Now we also need to check the mapping array in a couple of ways.
2199 	 * 1) Did we move the cum-ack point?
2200 	 *
2201 	 * When you first glance at this you might think that all entries that
2202 	 * make up the postion of the cum-ack would be in the nr-mapping
2203 	 * array only.. i.e. things up to the cum-ack are always
2204 	 * deliverable. Thats true with one exception, when its a fragmented
2205 	 * message we may not deliver the data until some threshold (or all
2206 	 * of it) is in place. So we must OR the nr_mapping_array and
2207 	 * mapping_array to get a true picture of the cum-ack.
2208 	 */
2209 	struct sctp_association *asoc;
2210 	int at;
2211 	uint8_t val;
2212 	int slide_from, slide_end, lgap, distance;
2213 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2214 
2215 	asoc = &stcb->asoc;
2216 
2217 	old_cumack = asoc->cumulative_tsn;
2218 	old_base = asoc->mapping_array_base_tsn;
2219 	old_highest = asoc->highest_tsn_inside_map;
2220 	/*
2221 	 * We could probably improve this a small bit by calculating the
2222 	 * offset of the current cum-ack as the starting point.
2223 	 */
2224 	at = 0;
2225 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2226 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2227 		if (val == 0xff) {
2228 			at += 8;
2229 		} else {
2230 			/* there is a 0 bit */
2231 			at += sctp_map_lookup_tab[val];
2232 			break;
2233 		}
2234 	}
2235 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2236 
2237 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2238 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2239 #ifdef INVARIANTS
2240 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2241 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2242 #else
2243 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2244 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2245 		sctp_print_mapping_array(asoc);
2246 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2247 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2248 		}
2249 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2250 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2251 #endif
2252 	}
2253 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2254 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2255 	} else {
2256 		highest_tsn = asoc->highest_tsn_inside_map;
2257 	}
2258 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2259 		/* The complete array was completed by a single FR */
2260 		/* highest becomes the cum-ack */
2261 		int clr;
2262 
2263 #ifdef INVARIANTS
2264 		unsigned int i;
2265 
2266 #endif
2267 
2268 		/* clear the array */
2269 		clr = ((at + 7) >> 3);
2270 		if (clr > asoc->mapping_array_size) {
2271 			clr = asoc->mapping_array_size;
2272 		}
2273 		memset(asoc->mapping_array, 0, clr);
2274 		memset(asoc->nr_mapping_array, 0, clr);
2275 #ifdef INVARIANTS
2276 		for (i = 0; i < asoc->mapping_array_size; i++) {
2277 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2278 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2279 				sctp_print_mapping_array(asoc);
2280 			}
2281 		}
2282 #endif
2283 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2284 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2285 	} else if (at >= 8) {
2286 		/* we can slide the mapping array down */
2287 		/* slide_from holds where we hit the first NON 0xff byte */
2288 
2289 		/*
2290 		 * now calculate the ceiling of the move using our highest
2291 		 * TSN value
2292 		 */
2293 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2294 		slide_end = (lgap >> 3);
2295 		if (slide_end < slide_from) {
2296 			sctp_print_mapping_array(asoc);
2297 #ifdef INVARIANTS
2298 			panic("impossible slide");
2299 #else
2300 			SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2301 			    lgap, slide_end, slide_from, at);
2302 			return;
2303 #endif
2304 		}
2305 		if (slide_end > asoc->mapping_array_size) {
2306 #ifdef INVARIANTS
2307 			panic("would overrun buffer");
2308 #else
2309 			SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2310 			    asoc->mapping_array_size, slide_end);
2311 			slide_end = asoc->mapping_array_size;
2312 #endif
2313 		}
2314 		distance = (slide_end - slide_from) + 1;
2315 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2316 			sctp_log_map(old_base, old_cumack, old_highest,
2317 			    SCTP_MAP_PREPARE_SLIDE);
2318 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2319 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2320 		}
2321 		if (distance + slide_from > asoc->mapping_array_size ||
2322 		    distance < 0) {
2323 			/*
2324 			 * Here we do NOT slide forward the array so that
2325 			 * hopefully when more data comes in to fill it up
2326 			 * we will be able to slide it forward. Really I
2327 			 * don't think this should happen :-0
2328 			 */
2329 
2330 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2331 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2332 				    (uint32_t) asoc->mapping_array_size,
2333 				    SCTP_MAP_SLIDE_NONE);
2334 			}
2335 		} else {
2336 			int ii;
2337 
2338 			for (ii = 0; ii < distance; ii++) {
2339 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2340 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2341 
2342 			}
2343 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2344 				asoc->mapping_array[ii] = 0;
2345 				asoc->nr_mapping_array[ii] = 0;
2346 			}
2347 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2348 				asoc->highest_tsn_inside_map += (slide_from << 3);
2349 			}
2350 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2351 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2352 			}
2353 			asoc->mapping_array_base_tsn += (slide_from << 3);
2354 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2355 				sctp_log_map(asoc->mapping_array_base_tsn,
2356 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2357 				    SCTP_MAP_SLIDE_RESULT);
2358 			}
2359 		}
2360 	}
2361 }
2362 
2363 void
2364 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2365 {
2366 	struct sctp_association *asoc;
2367 	uint32_t highest_tsn;
2368 
2369 	asoc = &stcb->asoc;
2370 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2371 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2372 	} else {
2373 		highest_tsn = asoc->highest_tsn_inside_map;
2374 	}
2375 
2376 	/*
2377 	 * Now we need to see if we need to queue a sack or just start the
2378 	 * timer (if allowed).
2379 	 */
2380 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2381 		/*
2382 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2383 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2384 		 * SACK
2385 		 */
2386 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2387 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2388 			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2389 		}
2390 		sctp_send_shutdown(stcb,
2391 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2392 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2393 	} else {
2394 		int is_a_gap;
2395 
2396 		/* is there a gap now ? */
2397 		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2398 
2399 		/*
2400 		 * CMT DAC algorithm: increase number of packets received
2401 		 * since last ack
2402 		 */
2403 		stcb->asoc.cmt_dac_pkts_rcvd++;
2404 
2405 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2406 							 * SACK */
2407 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2408 							 * longer is one */
2409 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2410 		    (is_a_gap) ||	/* is still a gap */
2411 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2412 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2413 		    ) {
2414 
2415 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2416 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2417 			    (stcb->asoc.send_sack == 0) &&
2418 			    (stcb->asoc.numduptsns == 0) &&
2419 			    (stcb->asoc.delayed_ack) &&
2420 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2421 
2422 				/*
2423 				 * CMT DAC algorithm: With CMT, delay acks
2424 				 * even in the face of
2425 				 *
2426 				 * reordering. Therefore, if acks that do not
2427 				 * have to be sent because of the above
2428 				 * reasons, will be delayed. That is, acks
2429 				 * that would have been sent due to gap
2430 				 * reports will be delayed with DAC. Start
2431 				 * the delayed ack timer.
2432 				 */
2433 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2434 				    stcb->sctp_ep, stcb, NULL);
2435 			} else {
2436 				/*
2437 				 * Ok we must build a SACK since the timer
2438 				 * is pending, we got our first packet OR
2439 				 * there are gaps or duplicates.
2440 				 */
2441 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2442 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2443 			}
2444 		} else {
2445 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2446 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2447 				    stcb->sctp_ep, stcb, NULL);
2448 			}
2449 		}
2450 	}
2451 }
2452 
2453 void
2454 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2455 {
2456 	struct sctp_tmit_chunk *chk;
2457 	uint32_t tsize, pd_point;
2458 	uint16_t nxt_todel;
2459 
2460 	if (asoc->fragmented_delivery_inprogress) {
2461 		sctp_service_reassembly(stcb, asoc);
2462 	}
2463 	/* Can we proceed further, i.e. the PD-API is complete */
2464 	if (asoc->fragmented_delivery_inprogress) {
2465 		/* no */
2466 		return;
2467 	}
2468 	/*
2469 	 * Now is there some other chunk I can deliver from the reassembly
2470 	 * queue.
2471 	 */
2472 doit_again:
2473 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2474 	if (chk == NULL) {
2475 		asoc->size_on_reasm_queue = 0;
2476 		asoc->cnt_on_reasm_queue = 0;
2477 		return;
2478 	}
2479 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2480 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2481 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2482 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2483 		/*
2484 		 * Yep the first one is here. We setup to start reception,
2485 		 * by backing down the TSN just in case we can't deliver.
2486 		 */
2487 
2488 		/*
2489 		 * Before we start though either all of the message should
2490 		 * be here or the socket buffer max or nothing on the
2491 		 * delivery queue and something can be delivered.
2492 		 */
2493 		if (stcb->sctp_socket) {
2494 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2495 			    stcb->sctp_ep->partial_delivery_point);
2496 		} else {
2497 			pd_point = stcb->sctp_ep->partial_delivery_point;
2498 		}
2499 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2500 			asoc->fragmented_delivery_inprogress = 1;
2501 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2502 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2503 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2504 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2505 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2506 			sctp_service_reassembly(stcb, asoc);
2507 			if (asoc->fragmented_delivery_inprogress == 0) {
2508 				goto doit_again;
2509 			}
2510 		}
2511 	}
2512 }
2513 
2514 int
2515 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2516     struct sockaddr *src, struct sockaddr *dst,
2517     struct sctphdr *sh, struct sctp_inpcb *inp,
2518     struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn,
2519     uint8_t use_mflowid, uint32_t mflowid,
2520     uint32_t vrf_id, uint16_t port)
2521 {
2522 	struct sctp_data_chunk *ch, chunk_buf;
2523 	struct sctp_association *asoc;
2524 	int num_chunks = 0;	/* number of control chunks processed */
2525 	int stop_proc = 0;
2526 	int chk_length, break_flag, last_chunk;
2527 	int abort_flag = 0, was_a_gap;
2528 	struct mbuf *m;
2529 	uint32_t highest_tsn;
2530 
2531 	/* set the rwnd */
2532 	sctp_set_rwnd(stcb, &stcb->asoc);
2533 
2534 	m = *mm;
2535 	SCTP_TCB_LOCK_ASSERT(stcb);
2536 	asoc = &stcb->asoc;
2537 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2538 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2539 	} else {
2540 		highest_tsn = asoc->highest_tsn_inside_map;
2541 	}
2542 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2543 	/*
2544 	 * setup where we got the last DATA packet from for any SACK that
2545 	 * may need to go out. Don't bump the net. This is done ONLY when a
2546 	 * chunk is assigned.
2547 	 */
2548 	asoc->last_data_chunk_from = net;
2549 
2550 	/*-
2551 	 * Now before we proceed we must figure out if this is a wasted
2552 	 * cluster... i.e. it is a small packet sent in and yet the driver
2553 	 * underneath allocated a full cluster for it. If so we must copy it
2554 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2555 	 * with cluster starvation. Note for __Panda__ we don't do this
2556 	 * since it has clusters all the way down to 64 bytes.
2557 	 */
2558 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2559 		/* we only handle mbufs that are singletons.. not chains */
2560 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2561 		if (m) {
2562 			/* ok lets see if we can copy the data up */
2563 			caddr_t *from, *to;
2564 
2565 			/* get the pointers and copy */
2566 			to = mtod(m, caddr_t *);
2567 			from = mtod((*mm), caddr_t *);
2568 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2569 			/* copy the length and free up the old */
2570 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2571 			sctp_m_freem(*mm);
2572 			/* sucess, back copy */
2573 			*mm = m;
2574 		} else {
2575 			/* We are in trouble in the mbuf world .. yikes */
2576 			m = *mm;
2577 		}
2578 	}
2579 	/* get pointer to the first chunk header */
2580 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2581 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2582 	if (ch == NULL) {
2583 		return (1);
2584 	}
2585 	/*
2586 	 * process all DATA chunks...
2587 	 */
2588 	*high_tsn = asoc->cumulative_tsn;
2589 	break_flag = 0;
2590 	asoc->data_pkts_seen++;
2591 	while (stop_proc == 0) {
2592 		/* validate chunk length */
2593 		chk_length = ntohs(ch->ch.chunk_length);
2594 		if (length - *offset < chk_length) {
2595 			/* all done, mutulated chunk */
2596 			stop_proc = 1;
2597 			continue;
2598 		}
2599 		if (ch->ch.chunk_type == SCTP_DATA) {
2600 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2601 				/*
2602 				 * Need to send an abort since we had a
2603 				 * invalid data chunk.
2604 				 */
2605 				struct mbuf *op_err;
2606 
2607 				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2608 				    0, M_NOWAIT, 1, MT_DATA);
2609 
2610 				if (op_err) {
2611 					struct sctp_paramhdr *ph;
2612 					uint32_t *ippp;
2613 
2614 					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2615 					    (2 * sizeof(uint32_t));
2616 					ph = mtod(op_err, struct sctp_paramhdr *);
2617 					ph->param_type =
2618 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2619 					ph->param_length = htons(SCTP_BUF_LEN(op_err));
2620 					ippp = (uint32_t *) (ph + 1);
2621 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2622 					ippp++;
2623 					*ippp = asoc->cumulative_tsn;
2624 
2625 				}
2626 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2627 				sctp_abort_association(inp, stcb, m, iphlen,
2628 				    src, dst, sh, op_err,
2629 				    use_mflowid, mflowid,
2630 				    vrf_id, port);
2631 				return (2);
2632 			}
2633 #ifdef SCTP_AUDITING_ENABLED
2634 			sctp_audit_log(0xB1, 0);
2635 #endif
2636 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2637 				last_chunk = 1;
2638 			} else {
2639 				last_chunk = 0;
2640 			}
2641 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2642 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2643 			    last_chunk)) {
2644 				num_chunks++;
2645 			}
2646 			if (abort_flag)
2647 				return (2);
2648 
2649 			if (break_flag) {
2650 				/*
2651 				 * Set because of out of rwnd space and no
2652 				 * drop rep space left.
2653 				 */
2654 				stop_proc = 1;
2655 				continue;
2656 			}
2657 		} else {
2658 			/* not a data chunk in the data region */
2659 			switch (ch->ch.chunk_type) {
2660 			case SCTP_INITIATION:
2661 			case SCTP_INITIATION_ACK:
2662 			case SCTP_SELECTIVE_ACK:
2663 			case SCTP_NR_SELECTIVE_ACK:
2664 			case SCTP_HEARTBEAT_REQUEST:
2665 			case SCTP_HEARTBEAT_ACK:
2666 			case SCTP_ABORT_ASSOCIATION:
2667 			case SCTP_SHUTDOWN:
2668 			case SCTP_SHUTDOWN_ACK:
2669 			case SCTP_OPERATION_ERROR:
2670 			case SCTP_COOKIE_ECHO:
2671 			case SCTP_COOKIE_ACK:
2672 			case SCTP_ECN_ECHO:
2673 			case SCTP_ECN_CWR:
2674 			case SCTP_SHUTDOWN_COMPLETE:
2675 			case SCTP_AUTHENTICATION:
2676 			case SCTP_ASCONF_ACK:
2677 			case SCTP_PACKET_DROPPED:
2678 			case SCTP_STREAM_RESET:
2679 			case SCTP_FORWARD_CUM_TSN:
2680 			case SCTP_ASCONF:
2681 				/*
2682 				 * Now, what do we do with KNOWN chunks that
2683 				 * are NOT in the right place?
2684 				 *
2685 				 * For now, I do nothing but ignore them. We
2686 				 * may later want to add sysctl stuff to
2687 				 * switch out and do either an ABORT() or
2688 				 * possibly process them.
2689 				 */
2690 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2691 					struct mbuf *op_err;
2692 
2693 					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2694 					sctp_abort_association(inp, stcb,
2695 					    m, iphlen,
2696 					    src, dst,
2697 					    sh, op_err,
2698 					    use_mflowid, mflowid,
2699 					    vrf_id, port);
2700 					return (2);
2701 				}
2702 				break;
2703 			default:
2704 				/* unknown chunk type, use bit rules */
2705 				if (ch->ch.chunk_type & 0x40) {
2706 					/* Add a error report to the queue */
2707 					struct mbuf *merr;
2708 					struct sctp_paramhdr *phd;
2709 
2710 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA);
2711 					if (merr) {
2712 						phd = mtod(merr, struct sctp_paramhdr *);
2713 						/*
2714 						 * We cheat and use param
2715 						 * type since we did not
2716 						 * bother to define a error
2717 						 * cause struct. They are
2718 						 * the same basic format
2719 						 * with different names.
2720 						 */
2721 						phd->param_type =
2722 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2723 						phd->param_length =
2724 						    htons(chk_length + sizeof(*phd));
2725 						SCTP_BUF_LEN(merr) = sizeof(*phd);
2726 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2727 						if (SCTP_BUF_NEXT(merr)) {
2728 							if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) {
2729 								sctp_m_freem(merr);
2730 							} else {
2731 								sctp_queue_op_err(stcb, merr);
2732 							}
2733 						} else {
2734 							sctp_m_freem(merr);
2735 						}
2736 					}
2737 				}
2738 				if ((ch->ch.chunk_type & 0x80) == 0) {
2739 					/* discard the rest of this packet */
2740 					stop_proc = 1;
2741 				}	/* else skip this bad chunk and
2742 					 * continue... */
2743 				break;
2744 			}	/* switch of chunk type */
2745 		}
2746 		*offset += SCTP_SIZE32(chk_length);
2747 		if ((*offset >= length) || stop_proc) {
2748 			/* no more data left in the mbuf chain */
2749 			stop_proc = 1;
2750 			continue;
2751 		}
2752 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2753 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2754 		if (ch == NULL) {
2755 			*offset = length;
2756 			stop_proc = 1;
2757 			continue;
2758 		}
2759 	}
2760 	if (break_flag) {
2761 		/*
2762 		 * we need to report rwnd overrun drops.
2763 		 */
2764 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2765 	}
2766 	if (num_chunks) {
2767 		/*
2768 		 * Did we get data, if so update the time for auto-close and
2769 		 * give peer credit for being alive.
2770 		 */
2771 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2772 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2773 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2774 			    stcb->asoc.overall_error_count,
2775 			    0,
2776 			    SCTP_FROM_SCTP_INDATA,
2777 			    __LINE__);
2778 		}
2779 		stcb->asoc.overall_error_count = 0;
2780 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2781 	}
2782 	/* now service all of the reassm queue if needed */
2783 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2784 		sctp_service_queues(stcb, asoc);
2785 
2786 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2787 		/* Assure that we ack right away */
2788 		stcb->asoc.send_sack = 1;
2789 	}
2790 	/* Start a sack timer or QUEUE a SACK for sending */
2791 	sctp_sack_check(stcb, was_a_gap);
2792 	return (0);
2793 }
2794 
2795 static int
2796 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2797     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2798     int *num_frs,
2799     uint32_t * biggest_newly_acked_tsn,
2800     uint32_t * this_sack_lowest_newack,
2801     int *rto_ok)
2802 {
2803 	struct sctp_tmit_chunk *tp1;
2804 	unsigned int theTSN;
2805 	int j, wake_him = 0, circled = 0;
2806 
2807 	/* Recover the tp1 we last saw */
2808 	tp1 = *p_tp1;
2809 	if (tp1 == NULL) {
2810 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2811 	}
2812 	for (j = frag_strt; j <= frag_end; j++) {
2813 		theTSN = j + last_tsn;
2814 		while (tp1) {
2815 			if (tp1->rec.data.doing_fast_retransmit)
2816 				(*num_frs) += 1;
2817 
2818 			/*-
2819 			 * CMT: CUCv2 algorithm. For each TSN being
2820 			 * processed from the sent queue, track the
2821 			 * next expected pseudo-cumack, or
2822 			 * rtx_pseudo_cumack, if required. Separate
2823 			 * cumack trackers for first transmissions,
2824 			 * and retransmissions.
2825 			 */
2826 			if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2827 			    (tp1->snd_count == 1)) {
2828 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2829 				tp1->whoTo->find_pseudo_cumack = 0;
2830 			}
2831 			if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2832 			    (tp1->snd_count > 1)) {
2833 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2834 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2835 			}
2836 			if (tp1->rec.data.TSN_seq == theTSN) {
2837 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2838 					/*-
2839 					 * must be held until
2840 					 * cum-ack passes
2841 					 */
2842 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2843 						/*-
2844 						 * If it is less than RESEND, it is
2845 						 * now no-longer in flight.
2846 						 * Higher values may already be set
2847 						 * via previous Gap Ack Blocks...
2848 						 * i.e. ACKED or RESEND.
2849 						 */
2850 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2851 						    *biggest_newly_acked_tsn)) {
2852 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2853 						}
2854 						/*-
2855 						 * CMT: SFR algo (and HTNA) - set
2856 						 * saw_newack to 1 for dest being
2857 						 * newly acked. update
2858 						 * this_sack_highest_newack if
2859 						 * appropriate.
2860 						 */
2861 						if (tp1->rec.data.chunk_was_revoked == 0)
2862 							tp1->whoTo->saw_newack = 1;
2863 
2864 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2865 						    tp1->whoTo->this_sack_highest_newack)) {
2866 							tp1->whoTo->this_sack_highest_newack =
2867 							    tp1->rec.data.TSN_seq;
2868 						}
2869 						/*-
2870 						 * CMT DAC algo: also update
2871 						 * this_sack_lowest_newack
2872 						 */
2873 						if (*this_sack_lowest_newack == 0) {
2874 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2875 								sctp_log_sack(*this_sack_lowest_newack,
2876 								    last_tsn,
2877 								    tp1->rec.data.TSN_seq,
2878 								    0,
2879 								    0,
2880 								    SCTP_LOG_TSN_ACKED);
2881 							}
2882 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2883 						}
2884 						/*-
2885 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2886 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2887 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2888 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2889 						 * Separate pseudo_cumack trackers for first transmissions and
2890 						 * retransmissions.
2891 						 */
2892 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2893 							if (tp1->rec.data.chunk_was_revoked == 0) {
2894 								tp1->whoTo->new_pseudo_cumack = 1;
2895 							}
2896 							tp1->whoTo->find_pseudo_cumack = 1;
2897 						}
2898 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2899 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2900 						}
2901 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2902 							if (tp1->rec.data.chunk_was_revoked == 0) {
2903 								tp1->whoTo->new_pseudo_cumack = 1;
2904 							}
2905 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2906 						}
2907 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2908 							sctp_log_sack(*biggest_newly_acked_tsn,
2909 							    last_tsn,
2910 							    tp1->rec.data.TSN_seq,
2911 							    frag_strt,
2912 							    frag_end,
2913 							    SCTP_LOG_TSN_ACKED);
2914 						}
2915 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2916 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2917 							    tp1->whoTo->flight_size,
2918 							    tp1->book_size,
2919 							    (uintptr_t) tp1->whoTo,
2920 							    tp1->rec.data.TSN_seq);
2921 						}
2922 						sctp_flight_size_decrease(tp1);
2923 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2924 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2925 							    tp1);
2926 						}
2927 						sctp_total_flight_decrease(stcb, tp1);
2928 
2929 						tp1->whoTo->net_ack += tp1->send_size;
2930 						if (tp1->snd_count < 2) {
2931 							/*-
2932 							 * True non-retransmited chunk
2933 							 */
2934 							tp1->whoTo->net_ack2 += tp1->send_size;
2935 
2936 							/*-
2937 							 * update RTO too ?
2938 							 */
2939 							if (tp1->do_rtt) {
2940 								if (*rto_ok) {
2941 									tp1->whoTo->RTO =
2942 									    sctp_calculate_rto(stcb,
2943 									    &stcb->asoc,
2944 									    tp1->whoTo,
2945 									    &tp1->sent_rcv_time,
2946 									    sctp_align_safe_nocopy,
2947 									    SCTP_RTT_FROM_DATA);
2948 									*rto_ok = 0;
2949 								}
2950 								if (tp1->whoTo->rto_needed == 0) {
2951 									tp1->whoTo->rto_needed = 1;
2952 								}
2953 								tp1->do_rtt = 0;
2954 							}
2955 						}
2956 					}
2957 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2958 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2959 						    stcb->asoc.this_sack_highest_gap)) {
2960 							stcb->asoc.this_sack_highest_gap =
2961 							    tp1->rec.data.TSN_seq;
2962 						}
2963 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2964 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2965 #ifdef SCTP_AUDITING_ENABLED
2966 							sctp_audit_log(0xB2,
2967 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2968 #endif
2969 						}
2970 					}
2971 					/*-
2972 					 * All chunks NOT UNSENT fall through here and are marked
2973 					 * (leave PR-SCTP ones that are to skip alone though)
2974 					 */
2975 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2976 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2977 						tp1->sent = SCTP_DATAGRAM_MARKED;
2978 					}
2979 					if (tp1->rec.data.chunk_was_revoked) {
2980 						/* deflate the cwnd */
2981 						tp1->whoTo->cwnd -= tp1->book_size;
2982 						tp1->rec.data.chunk_was_revoked = 0;
2983 					}
2984 					/* NR Sack code here */
2985 					if (nr_sacking &&
2986 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2987 						if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2988 							stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2989 #ifdef INVARIANTS
2990 						} else {
2991 							panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2992 #endif
2993 						}
2994 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2995 						if (tp1->data) {
2996 							/*
2997 							 * sa_ignore
2998 							 * NO_NULL_CHK
2999 							 */
3000 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3001 							sctp_m_freem(tp1->data);
3002 							tp1->data = NULL;
3003 						}
3004 						wake_him++;
3005 					}
3006 				}
3007 				break;
3008 			}	/* if (tp1->TSN_seq == theTSN) */
3009 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
3010 				break;
3011 			}
3012 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3013 			if ((tp1 == NULL) && (circled == 0)) {
3014 				circled++;
3015 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3016 			}
3017 		}		/* end while (tp1) */
3018 		if (tp1 == NULL) {
3019 			circled = 0;
3020 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3021 		}
3022 		/* In case the fragments were not in order we must reset */
3023 	}			/* end for (j = fragStart */
3024 	*p_tp1 = tp1;
3025 	return (wake_him);	/* Return value only used for nr-sack */
3026 }
3027 
3028 
3029 static int
3030 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3031     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3032     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3033     int num_seg, int num_nr_seg, int *rto_ok)
3034 {
3035 	struct sctp_gap_ack_block *frag, block;
3036 	struct sctp_tmit_chunk *tp1;
3037 	int i;
3038 	int num_frs = 0;
3039 	int chunk_freed;
3040 	int non_revocable;
3041 	uint16_t frag_strt, frag_end, prev_frag_end;
3042 
3043 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3044 	prev_frag_end = 0;
3045 	chunk_freed = 0;
3046 
3047 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3048 		if (i == num_seg) {
3049 			prev_frag_end = 0;
3050 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3051 		}
3052 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3053 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3054 		*offset += sizeof(block);
3055 		if (frag == NULL) {
3056 			return (chunk_freed);
3057 		}
3058 		frag_strt = ntohs(frag->start);
3059 		frag_end = ntohs(frag->end);
3060 
3061 		if (frag_strt > frag_end) {
3062 			/* This gap report is malformed, skip it. */
3063 			continue;
3064 		}
3065 		if (frag_strt <= prev_frag_end) {
3066 			/* This gap report is not in order, so restart. */
3067 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3068 		}
3069 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3070 			*biggest_tsn_acked = last_tsn + frag_end;
3071 		}
3072 		if (i < num_seg) {
3073 			non_revocable = 0;
3074 		} else {
3075 			non_revocable = 1;
3076 		}
3077 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3078 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3079 		    this_sack_lowest_newack, rto_ok)) {
3080 			chunk_freed = 1;
3081 		}
3082 		prev_frag_end = frag_end;
3083 	}
3084 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3085 		if (num_frs)
3086 			sctp_log_fr(*biggest_tsn_acked,
3087 			    *biggest_newly_acked_tsn,
3088 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3089 	}
3090 	return (chunk_freed);
3091 }
3092 
3093 static void
3094 sctp_check_for_revoked(struct sctp_tcb *stcb,
3095     struct sctp_association *asoc, uint32_t cumack,
3096     uint32_t biggest_tsn_acked)
3097 {
3098 	struct sctp_tmit_chunk *tp1;
3099 
3100 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3101 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3102 			/*
3103 			 * ok this guy is either ACK or MARKED. If it is
3104 			 * ACKED it has been previously acked but not this
3105 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3106 			 * again.
3107 			 */
3108 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3109 				break;
3110 			}
3111 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3112 				/* it has been revoked */
3113 				tp1->sent = SCTP_DATAGRAM_SENT;
3114 				tp1->rec.data.chunk_was_revoked = 1;
3115 				/*
3116 				 * We must add this stuff back in to assure
3117 				 * timers and such get started.
3118 				 */
3119 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3120 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3121 					    tp1->whoTo->flight_size,
3122 					    tp1->book_size,
3123 					    (uintptr_t) tp1->whoTo,
3124 					    tp1->rec.data.TSN_seq);
3125 				}
3126 				sctp_flight_size_increase(tp1);
3127 				sctp_total_flight_increase(stcb, tp1);
3128 				/*
3129 				 * We inflate the cwnd to compensate for our
3130 				 * artificial inflation of the flight_size.
3131 				 */
3132 				tp1->whoTo->cwnd += tp1->book_size;
3133 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3134 					sctp_log_sack(asoc->last_acked_seq,
3135 					    cumack,
3136 					    tp1->rec.data.TSN_seq,
3137 					    0,
3138 					    0,
3139 					    SCTP_LOG_TSN_REVOKED);
3140 				}
3141 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3142 				/* it has been re-acked in this SACK */
3143 				tp1->sent = SCTP_DATAGRAM_ACKED;
3144 			}
3145 		}
3146 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3147 			break;
3148 	}
3149 }
3150 
3151 
3152 static void
3153 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3154     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3155 {
3156 	struct sctp_tmit_chunk *tp1;
3157 	int strike_flag = 0;
3158 	struct timeval now;
3159 	int tot_retrans = 0;
3160 	uint32_t sending_seq;
3161 	struct sctp_nets *net;
3162 	int num_dests_sacked = 0;
3163 
3164 	/*
3165 	 * select the sending_seq, this is either the next thing ready to be
3166 	 * sent but not transmitted, OR, the next seq we assign.
3167 	 */
3168 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3169 	if (tp1 == NULL) {
3170 		sending_seq = asoc->sending_seq;
3171 	} else {
3172 		sending_seq = tp1->rec.data.TSN_seq;
3173 	}
3174 
3175 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3176 	if ((asoc->sctp_cmt_on_off > 0) &&
3177 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3178 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3179 			if (net->saw_newack)
3180 				num_dests_sacked++;
3181 		}
3182 	}
3183 	if (stcb->asoc.peer_supports_prsctp) {
3184 		(void)SCTP_GETTIME_TIMEVAL(&now);
3185 	}
3186 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3187 		strike_flag = 0;
3188 		if (tp1->no_fr_allowed) {
3189 			/* this one had a timeout or something */
3190 			continue;
3191 		}
3192 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3193 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3194 				sctp_log_fr(biggest_tsn_newly_acked,
3195 				    tp1->rec.data.TSN_seq,
3196 				    tp1->sent,
3197 				    SCTP_FR_LOG_CHECK_STRIKE);
3198 		}
3199 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3200 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3201 			/* done */
3202 			break;
3203 		}
3204 		if (stcb->asoc.peer_supports_prsctp) {
3205 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3206 				/* Is it expired? */
3207 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3208 					/* Yes so drop it */
3209 					if (tp1->data != NULL) {
3210 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3211 						    SCTP_SO_NOT_LOCKED);
3212 					}
3213 					continue;
3214 				}
3215 			}
3216 		}
3217 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3218 			/* we are beyond the tsn in the sack  */
3219 			break;
3220 		}
3221 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3222 			/* either a RESEND, ACKED, or MARKED */
3223 			/* skip */
3224 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3225 				/* Continue strikin FWD-TSN chunks */
3226 				tp1->rec.data.fwd_tsn_cnt++;
3227 			}
3228 			continue;
3229 		}
3230 		/*
3231 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3232 		 */
3233 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3234 			/*
3235 			 * No new acks were receieved for data sent to this
3236 			 * dest. Therefore, according to the SFR algo for
3237 			 * CMT, no data sent to this dest can be marked for
3238 			 * FR using this SACK.
3239 			 */
3240 			continue;
3241 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3242 		    tp1->whoTo->this_sack_highest_newack)) {
3243 			/*
3244 			 * CMT: New acks were receieved for data sent to
3245 			 * this dest. But no new acks were seen for data
3246 			 * sent after tp1. Therefore, according to the SFR
3247 			 * algo for CMT, tp1 cannot be marked for FR using
3248 			 * this SACK. This step covers part of the DAC algo
3249 			 * and the HTNA algo as well.
3250 			 */
3251 			continue;
3252 		}
3253 		/*
3254 		 * Here we check to see if we were have already done a FR
3255 		 * and if so we see if the biggest TSN we saw in the sack is
3256 		 * smaller than the recovery point. If so we don't strike
3257 		 * the tsn... otherwise we CAN strike the TSN.
3258 		 */
3259 		/*
3260 		 * @@@ JRI: Check for CMT if (accum_moved &&
3261 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3262 		 * 0)) {
3263 		 */
3264 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3265 			/*
3266 			 * Strike the TSN if in fast-recovery and cum-ack
3267 			 * moved.
3268 			 */
3269 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3270 				sctp_log_fr(biggest_tsn_newly_acked,
3271 				    tp1->rec.data.TSN_seq,
3272 				    tp1->sent,
3273 				    SCTP_FR_LOG_STRIKE_CHUNK);
3274 			}
3275 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3276 				tp1->sent++;
3277 			}
3278 			if ((asoc->sctp_cmt_on_off > 0) &&
3279 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3280 				/*
3281 				 * CMT DAC algorithm: If SACK flag is set to
3282 				 * 0, then lowest_newack test will not pass
3283 				 * because it would have been set to the
3284 				 * cumack earlier. If not already to be
3285 				 * rtx'd, If not a mixed sack and if tp1 is
3286 				 * not between two sacked TSNs, then mark by
3287 				 * one more. NOTE that we are marking by one
3288 				 * additional time since the SACK DAC flag
3289 				 * indicates that two packets have been
3290 				 * received after this missing TSN.
3291 				 */
3292 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3293 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3294 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3295 						sctp_log_fr(16 + num_dests_sacked,
3296 						    tp1->rec.data.TSN_seq,
3297 						    tp1->sent,
3298 						    SCTP_FR_LOG_STRIKE_CHUNK);
3299 					}
3300 					tp1->sent++;
3301 				}
3302 			}
3303 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3304 		    (asoc->sctp_cmt_on_off == 0)) {
3305 			/*
3306 			 * For those that have done a FR we must take
3307 			 * special consideration if we strike. I.e the
3308 			 * biggest_newly_acked must be higher than the
3309 			 * sending_seq at the time we did the FR.
3310 			 */
3311 			if (
3312 #ifdef SCTP_FR_TO_ALTERNATE
3313 			/*
3314 			 * If FR's go to new networks, then we must only do
3315 			 * this for singly homed asoc's. However if the FR's
3316 			 * go to the same network (Armando's work) then its
3317 			 * ok to FR multiple times.
3318 			 */
3319 			    (asoc->numnets < 2)
3320 #else
3321 			    (1)
3322 #endif
3323 			    ) {
3324 
3325 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3326 				    tp1->rec.data.fast_retran_tsn)) {
3327 					/*
3328 					 * Strike the TSN, since this ack is
3329 					 * beyond where things were when we
3330 					 * did a FR.
3331 					 */
3332 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3333 						sctp_log_fr(biggest_tsn_newly_acked,
3334 						    tp1->rec.data.TSN_seq,
3335 						    tp1->sent,
3336 						    SCTP_FR_LOG_STRIKE_CHUNK);
3337 					}
3338 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3339 						tp1->sent++;
3340 					}
3341 					strike_flag = 1;
3342 					if ((asoc->sctp_cmt_on_off > 0) &&
3343 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3344 						/*
3345 						 * CMT DAC algorithm: If
3346 						 * SACK flag is set to 0,
3347 						 * then lowest_newack test
3348 						 * will not pass because it
3349 						 * would have been set to
3350 						 * the cumack earlier. If
3351 						 * not already to be rtx'd,
3352 						 * If not a mixed sack and
3353 						 * if tp1 is not between two
3354 						 * sacked TSNs, then mark by
3355 						 * one more. NOTE that we
3356 						 * are marking by one
3357 						 * additional time since the
3358 						 * SACK DAC flag indicates
3359 						 * that two packets have
3360 						 * been received after this
3361 						 * missing TSN.
3362 						 */
3363 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3364 						    (num_dests_sacked == 1) &&
3365 						    SCTP_TSN_GT(this_sack_lowest_newack,
3366 						    tp1->rec.data.TSN_seq)) {
3367 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3368 								sctp_log_fr(32 + num_dests_sacked,
3369 								    tp1->rec.data.TSN_seq,
3370 								    tp1->sent,
3371 								    SCTP_FR_LOG_STRIKE_CHUNK);
3372 							}
3373 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3374 								tp1->sent++;
3375 							}
3376 						}
3377 					}
3378 				}
3379 			}
3380 			/*
3381 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3382 			 * algo covers HTNA.
3383 			 */
3384 		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3385 		    biggest_tsn_newly_acked)) {
3386 			/*
3387 			 * We don't strike these: This is the  HTNA
3388 			 * algorithm i.e. we don't strike If our TSN is
3389 			 * larger than the Highest TSN Newly Acked.
3390 			 */
3391 			;
3392 		} else {
3393 			/* Strike the TSN */
3394 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3395 				sctp_log_fr(biggest_tsn_newly_acked,
3396 				    tp1->rec.data.TSN_seq,
3397 				    tp1->sent,
3398 				    SCTP_FR_LOG_STRIKE_CHUNK);
3399 			}
3400 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3401 				tp1->sent++;
3402 			}
3403 			if ((asoc->sctp_cmt_on_off > 0) &&
3404 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3405 				/*
3406 				 * CMT DAC algorithm: If SACK flag is set to
3407 				 * 0, then lowest_newack test will not pass
3408 				 * because it would have been set to the
3409 				 * cumack earlier. If not already to be
3410 				 * rtx'd, If not a mixed sack and if tp1 is
3411 				 * not between two sacked TSNs, then mark by
3412 				 * one more. NOTE that we are marking by one
3413 				 * additional time since the SACK DAC flag
3414 				 * indicates that two packets have been
3415 				 * received after this missing TSN.
3416 				 */
3417 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3418 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3419 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3420 						sctp_log_fr(48 + num_dests_sacked,
3421 						    tp1->rec.data.TSN_seq,
3422 						    tp1->sent,
3423 						    SCTP_FR_LOG_STRIKE_CHUNK);
3424 					}
3425 					tp1->sent++;
3426 				}
3427 			}
3428 		}
3429 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3430 			struct sctp_nets *alt;
3431 
3432 			/* fix counts and things */
3433 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3434 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3435 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3436 				    tp1->book_size,
3437 				    (uintptr_t) tp1->whoTo,
3438 				    tp1->rec.data.TSN_seq);
3439 			}
3440 			if (tp1->whoTo) {
3441 				tp1->whoTo->net_ack++;
3442 				sctp_flight_size_decrease(tp1);
3443 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3444 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3445 					    tp1);
3446 				}
3447 			}
3448 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3449 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3450 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3451 			}
3452 			/* add back to the rwnd */
3453 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3454 
3455 			/* remove from the total flight */
3456 			sctp_total_flight_decrease(stcb, tp1);
3457 
3458 			if ((stcb->asoc.peer_supports_prsctp) &&
3459 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3460 				/*
3461 				 * Has it been retransmitted tv_sec times? -
3462 				 * we store the retran count there.
3463 				 */
3464 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3465 					/* Yes, so drop it */
3466 					if (tp1->data != NULL) {
3467 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3468 						    SCTP_SO_NOT_LOCKED);
3469 					}
3470 					/* Make sure to flag we had a FR */
3471 					tp1->whoTo->net_ack++;
3472 					continue;
3473 				}
3474 			}
3475 			/*
3476 			 * SCTP_PRINTF("OK, we are now ready to FR this
3477 			 * guy\n");
3478 			 */
3479 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3480 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3481 				    0, SCTP_FR_MARKED);
3482 			}
3483 			if (strike_flag) {
3484 				/* This is a subsequent FR */
3485 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3486 			}
3487 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3488 			if (asoc->sctp_cmt_on_off > 0) {
3489 				/*
3490 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3491 				 * If CMT is being used, then pick dest with
3492 				 * largest ssthresh for any retransmission.
3493 				 */
3494 				tp1->no_fr_allowed = 1;
3495 				alt = tp1->whoTo;
3496 				/* sa_ignore NO_NULL_CHK */
3497 				if (asoc->sctp_cmt_pf > 0) {
3498 					/*
3499 					 * JRS 5/18/07 - If CMT PF is on,
3500 					 * use the PF version of
3501 					 * find_alt_net()
3502 					 */
3503 					alt = sctp_find_alternate_net(stcb, alt, 2);
3504 				} else {
3505 					/*
3506 					 * JRS 5/18/07 - If only CMT is on,
3507 					 * use the CMT version of
3508 					 * find_alt_net()
3509 					 */
3510 					/* sa_ignore NO_NULL_CHK */
3511 					alt = sctp_find_alternate_net(stcb, alt, 1);
3512 				}
3513 				if (alt == NULL) {
3514 					alt = tp1->whoTo;
3515 				}
3516 				/*
3517 				 * CUCv2: If a different dest is picked for
3518 				 * the retransmission, then new
3519 				 * (rtx-)pseudo_cumack needs to be tracked
3520 				 * for orig dest. Let CUCv2 track new (rtx-)
3521 				 * pseudo-cumack always.
3522 				 */
3523 				if (tp1->whoTo) {
3524 					tp1->whoTo->find_pseudo_cumack = 1;
3525 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3526 				}
3527 			} else {/* CMT is OFF */
3528 
3529 #ifdef SCTP_FR_TO_ALTERNATE
3530 				/* Can we find an alternate? */
3531 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3532 #else
3533 				/*
3534 				 * default behavior is to NOT retransmit
3535 				 * FR's to an alternate. Armando Caro's
3536 				 * paper details why.
3537 				 */
3538 				alt = tp1->whoTo;
3539 #endif
3540 			}
3541 
3542 			tp1->rec.data.doing_fast_retransmit = 1;
3543 			tot_retrans++;
3544 			/* mark the sending seq for possible subsequent FR's */
3545 			/*
3546 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3547 			 * (uint32_t)tpi->rec.data.TSN_seq);
3548 			 */
3549 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3550 				/*
3551 				 * If the queue of send is empty then its
3552 				 * the next sequence number that will be
3553 				 * assigned so we subtract one from this to
3554 				 * get the one we last sent.
3555 				 */
3556 				tp1->rec.data.fast_retran_tsn = sending_seq;
3557 			} else {
3558 				/*
3559 				 * If there are chunks on the send queue
3560 				 * (unsent data that has made it from the
3561 				 * stream queues but not out the door, we
3562 				 * take the first one (which will have the
3563 				 * lowest TSN) and subtract one to get the
3564 				 * one we last sent.
3565 				 */
3566 				struct sctp_tmit_chunk *ttt;
3567 
3568 				ttt = TAILQ_FIRST(&asoc->send_queue);
3569 				tp1->rec.data.fast_retran_tsn =
3570 				    ttt->rec.data.TSN_seq;
3571 			}
3572 
3573 			if (tp1->do_rtt) {
3574 				/*
3575 				 * this guy had a RTO calculation pending on
3576 				 * it, cancel it
3577 				 */
3578 				if ((tp1->whoTo != NULL) &&
3579 				    (tp1->whoTo->rto_needed == 0)) {
3580 					tp1->whoTo->rto_needed = 1;
3581 				}
3582 				tp1->do_rtt = 0;
3583 			}
3584 			if (alt != tp1->whoTo) {
3585 				/* yes, there is an alternate. */
3586 				sctp_free_remote_addr(tp1->whoTo);
3587 				/* sa_ignore FREED_MEMORY */
3588 				tp1->whoTo = alt;
3589 				atomic_add_int(&alt->ref_count, 1);
3590 			}
3591 		}
3592 	}
3593 }
3594 
3595 struct sctp_tmit_chunk *
3596 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3597     struct sctp_association *asoc)
3598 {
3599 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3600 	struct timeval now;
3601 	int now_filled = 0;
3602 
3603 	if (asoc->peer_supports_prsctp == 0) {
3604 		return (NULL);
3605 	}
3606 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3607 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3608 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3609 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3610 			/* no chance to advance, out of here */
3611 			break;
3612 		}
3613 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3614 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3615 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3616 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3617 				    asoc->advanced_peer_ack_point,
3618 				    tp1->rec.data.TSN_seq, 0, 0);
3619 			}
3620 		}
3621 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3622 			/*
3623 			 * We can't fwd-tsn past any that are reliable aka
3624 			 * retransmitted until the asoc fails.
3625 			 */
3626 			break;
3627 		}
3628 		if (!now_filled) {
3629 			(void)SCTP_GETTIME_TIMEVAL(&now);
3630 			now_filled = 1;
3631 		}
3632 		/*
3633 		 * now we got a chunk which is marked for another
3634 		 * retransmission to a PR-stream but has run out its chances
3635 		 * already maybe OR has been marked to skip now. Can we skip
3636 		 * it if its a resend?
3637 		 */
3638 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3639 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3640 			/*
3641 			 * Now is this one marked for resend and its time is
3642 			 * now up?
3643 			 */
3644 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3645 				/* Yes so drop it */
3646 				if (tp1->data) {
3647 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3648 					    1, SCTP_SO_NOT_LOCKED);
3649 				}
3650 			} else {
3651 				/*
3652 				 * No, we are done when hit one for resend
3653 				 * whos time as not expired.
3654 				 */
3655 				break;
3656 			}
3657 		}
3658 		/*
3659 		 * Ok now if this chunk is marked to drop it we can clean up
3660 		 * the chunk, advance our peer ack point and we can check
3661 		 * the next chunk.
3662 		 */
3663 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3664 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3665 			/* advance PeerAckPoint goes forward */
3666 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3667 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3668 				a_adv = tp1;
3669 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3670 				/* No update but we do save the chk */
3671 				a_adv = tp1;
3672 			}
3673 		} else {
3674 			/*
3675 			 * If it is still in RESEND we can advance no
3676 			 * further
3677 			 */
3678 			break;
3679 		}
3680 	}
3681 	return (a_adv);
3682 }
3683 
3684 static int
3685 sctp_fs_audit(struct sctp_association *asoc)
3686 {
3687 	struct sctp_tmit_chunk *chk;
3688 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3689 	int entry_flight, entry_cnt, ret;
3690 
3691 	entry_flight = asoc->total_flight;
3692 	entry_cnt = asoc->total_flight_count;
3693 	ret = 0;
3694 
3695 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3696 		return (0);
3697 
3698 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3699 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3700 			SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3701 			    chk->rec.data.TSN_seq,
3702 			    chk->send_size,
3703 			    chk->snd_count);
3704 			inflight++;
3705 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3706 			resend++;
3707 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3708 			inbetween++;
3709 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3710 			above++;
3711 		} else {
3712 			acked++;
3713 		}
3714 	}
3715 
3716 	if ((inflight > 0) || (inbetween > 0)) {
3717 #ifdef INVARIANTS
3718 		panic("Flight size-express incorrect? \n");
3719 #else
3720 		SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3721 		    entry_flight, entry_cnt);
3722 
3723 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3724 		    inflight, inbetween, resend, above, acked);
3725 		ret = 1;
3726 #endif
3727 	}
3728 	return (ret);
3729 }
3730 
3731 
3732 static void
3733 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3734     struct sctp_association *asoc,
3735     struct sctp_tmit_chunk *tp1)
3736 {
3737 	tp1->window_probe = 0;
3738 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3739 		/* TSN's skipped we do NOT move back. */
3740 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3741 		    tp1->whoTo->flight_size,
3742 		    tp1->book_size,
3743 		    (uintptr_t) tp1->whoTo,
3744 		    tp1->rec.data.TSN_seq);
3745 		return;
3746 	}
3747 	/* First setup this by shrinking flight */
3748 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3749 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3750 		    tp1);
3751 	}
3752 	sctp_flight_size_decrease(tp1);
3753 	sctp_total_flight_decrease(stcb, tp1);
3754 	/* Now mark for resend */
3755 	tp1->sent = SCTP_DATAGRAM_RESEND;
3756 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3757 
3758 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3759 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3760 		    tp1->whoTo->flight_size,
3761 		    tp1->book_size,
3762 		    (uintptr_t) tp1->whoTo,
3763 		    tp1->rec.data.TSN_seq);
3764 	}
3765 }
3766 
3767 void
3768 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3769     uint32_t rwnd, int *abort_now, int ecne_seen)
3770 {
3771 	struct sctp_nets *net;
3772 	struct sctp_association *asoc;
3773 	struct sctp_tmit_chunk *tp1, *tp2;
3774 	uint32_t old_rwnd;
3775 	int win_probe_recovery = 0;
3776 	int win_probe_recovered = 0;
3777 	int j, done_once = 0;
3778 	int rto_ok = 1;
3779 
3780 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3781 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3782 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3783 	}
3784 	SCTP_TCB_LOCK_ASSERT(stcb);
3785 #ifdef SCTP_ASOCLOG_OF_TSNS
3786 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3787 	stcb->asoc.cumack_log_at++;
3788 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3789 		stcb->asoc.cumack_log_at = 0;
3790 	}
3791 #endif
3792 	asoc = &stcb->asoc;
3793 	old_rwnd = asoc->peers_rwnd;
3794 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3795 		/* old ack */
3796 		return;
3797 	} else if (asoc->last_acked_seq == cumack) {
3798 		/* Window update sack */
3799 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3800 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3801 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3802 			/* SWS sender side engages */
3803 			asoc->peers_rwnd = 0;
3804 		}
3805 		if (asoc->peers_rwnd > old_rwnd) {
3806 			goto again;
3807 		}
3808 		return;
3809 	}
3810 	/* First setup for CC stuff */
3811 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3812 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3813 			/* Drag along the window_tsn for cwr's */
3814 			net->cwr_window_tsn = cumack;
3815 		}
3816 		net->prev_cwnd = net->cwnd;
3817 		net->net_ack = 0;
3818 		net->net_ack2 = 0;
3819 
3820 		/*
3821 		 * CMT: Reset CUC and Fast recovery algo variables before
3822 		 * SACK processing
3823 		 */
3824 		net->new_pseudo_cumack = 0;
3825 		net->will_exit_fast_recovery = 0;
3826 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3827 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3828 		}
3829 	}
3830 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3831 		uint32_t send_s;
3832 
3833 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3834 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3835 			    sctpchunk_listhead);
3836 			send_s = tp1->rec.data.TSN_seq + 1;
3837 		} else {
3838 			send_s = asoc->sending_seq;
3839 		}
3840 		if (SCTP_TSN_GE(cumack, send_s)) {
3841 #ifndef INVARIANTS
3842 			struct mbuf *oper;
3843 
3844 #endif
3845 #ifdef INVARIANTS
3846 			panic("Impossible sack 1");
3847 #else
3848 
3849 			*abort_now = 1;
3850 			/* XXX */
3851 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3852 			    0, M_NOWAIT, 1, MT_DATA);
3853 			if (oper) {
3854 				struct sctp_paramhdr *ph;
3855 				uint32_t *ippp;
3856 
3857 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3858 				    sizeof(uint32_t);
3859 				ph = mtod(oper, struct sctp_paramhdr *);
3860 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3861 				ph->param_length = htons(SCTP_BUF_LEN(oper));
3862 				ippp = (uint32_t *) (ph + 1);
3863 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3864 			}
3865 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3866 			sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
3867 			return;
3868 #endif
3869 		}
3870 	}
3871 	asoc->this_sack_highest_gap = cumack;
3872 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3873 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3874 		    stcb->asoc.overall_error_count,
3875 		    0,
3876 		    SCTP_FROM_SCTP_INDATA,
3877 		    __LINE__);
3878 	}
3879 	stcb->asoc.overall_error_count = 0;
3880 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3881 		/* process the new consecutive TSN first */
3882 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3883 			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3884 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3885 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3886 				}
3887 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3888 					/*
3889 					 * If it is less than ACKED, it is
3890 					 * now no-longer in flight. Higher
3891 					 * values may occur during marking
3892 					 */
3893 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3894 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3895 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3896 							    tp1->whoTo->flight_size,
3897 							    tp1->book_size,
3898 							    (uintptr_t) tp1->whoTo,
3899 							    tp1->rec.data.TSN_seq);
3900 						}
3901 						sctp_flight_size_decrease(tp1);
3902 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3903 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3904 							    tp1);
3905 						}
3906 						/* sa_ignore NO_NULL_CHK */
3907 						sctp_total_flight_decrease(stcb, tp1);
3908 					}
3909 					tp1->whoTo->net_ack += tp1->send_size;
3910 					if (tp1->snd_count < 2) {
3911 						/*
3912 						 * True non-retransmited
3913 						 * chunk
3914 						 */
3915 						tp1->whoTo->net_ack2 +=
3916 						    tp1->send_size;
3917 
3918 						/* update RTO too? */
3919 						if (tp1->do_rtt) {
3920 							if (rto_ok) {
3921 								tp1->whoTo->RTO =
3922 								/*
3923 								 * sa_ignore
3924 								 * NO_NULL_CH
3925 								 * K
3926 								 */
3927 								    sctp_calculate_rto(stcb,
3928 								    asoc, tp1->whoTo,
3929 								    &tp1->sent_rcv_time,
3930 								    sctp_align_safe_nocopy,
3931 								    SCTP_RTT_FROM_DATA);
3932 								rto_ok = 0;
3933 							}
3934 							if (tp1->whoTo->rto_needed == 0) {
3935 								tp1->whoTo->rto_needed = 1;
3936 							}
3937 							tp1->do_rtt = 0;
3938 						}
3939 					}
3940 					/*
3941 					 * CMT: CUCv2 algorithm. From the
3942 					 * cumack'd TSNs, for each TSN being
3943 					 * acked for the first time, set the
3944 					 * following variables for the
3945 					 * corresp destination.
3946 					 * new_pseudo_cumack will trigger a
3947 					 * cwnd update.
3948 					 * find_(rtx_)pseudo_cumack will
3949 					 * trigger search for the next
3950 					 * expected (rtx-)pseudo-cumack.
3951 					 */
3952 					tp1->whoTo->new_pseudo_cumack = 1;
3953 					tp1->whoTo->find_pseudo_cumack = 1;
3954 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3955 
3956 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3957 						/* sa_ignore NO_NULL_CHK */
3958 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3959 					}
3960 				}
3961 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3962 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3963 				}
3964 				if (tp1->rec.data.chunk_was_revoked) {
3965 					/* deflate the cwnd */
3966 					tp1->whoTo->cwnd -= tp1->book_size;
3967 					tp1->rec.data.chunk_was_revoked = 0;
3968 				}
3969 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3970 					if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3971 						asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3972 #ifdef INVARIANTS
3973 					} else {
3974 						panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3975 #endif
3976 					}
3977 				}
3978 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3979 				if (tp1->data) {
3980 					/* sa_ignore NO_NULL_CHK */
3981 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3982 					sctp_m_freem(tp1->data);
3983 					tp1->data = NULL;
3984 				}
3985 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3986 					sctp_log_sack(asoc->last_acked_seq,
3987 					    cumack,
3988 					    tp1->rec.data.TSN_seq,
3989 					    0,
3990 					    0,
3991 					    SCTP_LOG_FREE_SENT);
3992 				}
3993 				asoc->sent_queue_cnt--;
3994 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3995 			} else {
3996 				break;
3997 			}
3998 		}
3999 
4000 	}
4001 	/* sa_ignore NO_NULL_CHK */
4002 	if (stcb->sctp_socket) {
4003 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4004 		struct socket *so;
4005 
4006 #endif
4007 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4008 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4009 			/* sa_ignore NO_NULL_CHK */
4010 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4011 		}
4012 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4013 		so = SCTP_INP_SO(stcb->sctp_ep);
4014 		atomic_add_int(&stcb->asoc.refcnt, 1);
4015 		SCTP_TCB_UNLOCK(stcb);
4016 		SCTP_SOCKET_LOCK(so, 1);
4017 		SCTP_TCB_LOCK(stcb);
4018 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4019 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4020 			/* assoc was freed while we were unlocked */
4021 			SCTP_SOCKET_UNLOCK(so, 1);
4022 			return;
4023 		}
4024 #endif
4025 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4026 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4027 		SCTP_SOCKET_UNLOCK(so, 1);
4028 #endif
4029 	} else {
4030 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4031 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4032 		}
4033 	}
4034 
4035 	/* JRS - Use the congestion control given in the CC module */
4036 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4037 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4038 			if (net->net_ack2 > 0) {
4039 				/*
4040 				 * Karn's rule applies to clearing error
4041 				 * count, this is optional.
4042 				 */
4043 				net->error_count = 0;
4044 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4045 					/* addr came good */
4046 					net->dest_state |= SCTP_ADDR_REACHABLE;
4047 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4048 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4049 				}
4050 				if (net == stcb->asoc.primary_destination) {
4051 					if (stcb->asoc.alternate) {
4052 						/*
4053 						 * release the alternate,
4054 						 * primary is good
4055 						 */
4056 						sctp_free_remote_addr(stcb->asoc.alternate);
4057 						stcb->asoc.alternate = NULL;
4058 					}
4059 				}
4060 				if (net->dest_state & SCTP_ADDR_PF) {
4061 					net->dest_state &= ~SCTP_ADDR_PF;
4062 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4063 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4064 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4065 					/* Done with this net */
4066 					net->net_ack = 0;
4067 				}
4068 				/* restore any doubled timers */
4069 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4070 				if (net->RTO < stcb->asoc.minrto) {
4071 					net->RTO = stcb->asoc.minrto;
4072 				}
4073 				if (net->RTO > stcb->asoc.maxrto) {
4074 					net->RTO = stcb->asoc.maxrto;
4075 				}
4076 			}
4077 		}
4078 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4079 	}
4080 	asoc->last_acked_seq = cumack;
4081 
4082 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4083 		/* nothing left in-flight */
4084 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4085 			net->flight_size = 0;
4086 			net->partial_bytes_acked = 0;
4087 		}
4088 		asoc->total_flight = 0;
4089 		asoc->total_flight_count = 0;
4090 	}
4091 	/* RWND update */
4092 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4093 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4094 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4095 		/* SWS sender side engages */
4096 		asoc->peers_rwnd = 0;
4097 	}
4098 	if (asoc->peers_rwnd > old_rwnd) {
4099 		win_probe_recovery = 1;
4100 	}
4101 	/* Now assure a timer where data is queued at */
4102 again:
4103 	j = 0;
4104 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4105 		int to_ticks;
4106 
4107 		if (win_probe_recovery && (net->window_probe)) {
4108 			win_probe_recovered = 1;
4109 			/*
4110 			 * Find first chunk that was used with window probe
4111 			 * and clear the sent
4112 			 */
4113 			/* sa_ignore FREED_MEMORY */
4114 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4115 				if (tp1->window_probe) {
4116 					/* move back to data send queue */
4117 					sctp_window_probe_recovery(stcb, asoc, tp1);
4118 					break;
4119 				}
4120 			}
4121 		}
4122 		if (net->RTO == 0) {
4123 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4124 		} else {
4125 			to_ticks = MSEC_TO_TICKS(net->RTO);
4126 		}
4127 		if (net->flight_size) {
4128 			j++;
4129 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4130 			    sctp_timeout_handler, &net->rxt_timer);
4131 			if (net->window_probe) {
4132 				net->window_probe = 0;
4133 			}
4134 		} else {
4135 			if (net->window_probe) {
4136 				/*
4137 				 * In window probes we must assure a timer
4138 				 * is still running there
4139 				 */
4140 				net->window_probe = 0;
4141 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4142 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4143 					    sctp_timeout_handler, &net->rxt_timer);
4144 				}
4145 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4146 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4147 				    stcb, net,
4148 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4149 			}
4150 		}
4151 	}
4152 	if ((j == 0) &&
4153 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4154 	    (asoc->sent_queue_retran_cnt == 0) &&
4155 	    (win_probe_recovered == 0) &&
4156 	    (done_once == 0)) {
4157 		/*
4158 		 * huh, this should not happen unless all packets are
4159 		 * PR-SCTP and marked to skip of course.
4160 		 */
4161 		if (sctp_fs_audit(asoc)) {
4162 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4163 				net->flight_size = 0;
4164 			}
4165 			asoc->total_flight = 0;
4166 			asoc->total_flight_count = 0;
4167 			asoc->sent_queue_retran_cnt = 0;
4168 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4169 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4170 					sctp_flight_size_increase(tp1);
4171 					sctp_total_flight_increase(stcb, tp1);
4172 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4173 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4174 				}
4175 			}
4176 		}
4177 		done_once = 1;
4178 		goto again;
4179 	}
4180 	/**********************************/
4181 	/* Now what about shutdown issues */
4182 	/**********************************/
4183 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4184 		/* nothing left on sendqueue.. consider done */
4185 		/* clean up */
4186 		if ((asoc->stream_queue_cnt == 1) &&
4187 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4188 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4189 		    (asoc->locked_on_sending)
4190 		    ) {
4191 			struct sctp_stream_queue_pending *sp;
4192 
4193 			/*
4194 			 * I may be in a state where we got all across.. but
4195 			 * cannot write more due to a shutdown... we abort
4196 			 * since the user did not indicate EOR in this case.
4197 			 * The sp will be cleaned during free of the asoc.
4198 			 */
4199 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4200 			    sctp_streamhead);
4201 			if ((sp) && (sp->length == 0)) {
4202 				/* Let cleanup code purge it */
4203 				if (sp->msg_is_complete) {
4204 					asoc->stream_queue_cnt--;
4205 				} else {
4206 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4207 					asoc->locked_on_sending = NULL;
4208 					asoc->stream_queue_cnt--;
4209 				}
4210 			}
4211 		}
4212 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4213 		    (asoc->stream_queue_cnt == 0)) {
4214 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4215 				/* Need to abort here */
4216 				struct mbuf *oper;
4217 
4218 		abort_out_now:
4219 				*abort_now = 1;
4220 				/* XXX */
4221 				oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4222 				    0, M_NOWAIT, 1, MT_DATA);
4223 				if (oper) {
4224 					struct sctp_paramhdr *ph;
4225 
4226 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr);
4227 					ph = mtod(oper, struct sctp_paramhdr *);
4228 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4229 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4230 				}
4231 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4232 				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4233 			} else {
4234 				struct sctp_nets *netp;
4235 
4236 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4237 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4238 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4239 				}
4240 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4241 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4242 				sctp_stop_timers_for_shutdown(stcb);
4243 				if (asoc->alternate) {
4244 					netp = asoc->alternate;
4245 				} else {
4246 					netp = asoc->primary_destination;
4247 				}
4248 				sctp_send_shutdown(stcb, netp);
4249 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4250 				    stcb->sctp_ep, stcb, netp);
4251 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4252 				    stcb->sctp_ep, stcb, netp);
4253 			}
4254 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4255 		    (asoc->stream_queue_cnt == 0)) {
4256 			struct sctp_nets *netp;
4257 
4258 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4259 				goto abort_out_now;
4260 			}
4261 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4262 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4263 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4264 			sctp_stop_timers_for_shutdown(stcb);
4265 			if (asoc->alternate) {
4266 				netp = asoc->alternate;
4267 			} else {
4268 				netp = asoc->primary_destination;
4269 			}
4270 			sctp_send_shutdown_ack(stcb, netp);
4271 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4272 			    stcb->sctp_ep, stcb, netp);
4273 		}
4274 	}
4275 	/*********************************************/
4276 	/* Here we perform PR-SCTP procedures        */
4277 	/* (section 4.2)                             */
4278 	/*********************************************/
4279 	/* C1. update advancedPeerAckPoint */
4280 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4281 		asoc->advanced_peer_ack_point = cumack;
4282 	}
4283 	/* PR-Sctp issues need to be addressed too */
4284 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4285 		struct sctp_tmit_chunk *lchk;
4286 		uint32_t old_adv_peer_ack_point;
4287 
4288 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4289 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4290 		/* C3. See if we need to send a Fwd-TSN */
4291 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4292 			/*
4293 			 * ISSUE with ECN, see FWD-TSN processing.
4294 			 */
4295 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4296 				send_forward_tsn(stcb, asoc);
4297 			} else if (lchk) {
4298 				/* try to FR fwd-tsn's that get lost too */
4299 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4300 					send_forward_tsn(stcb, asoc);
4301 				}
4302 			}
4303 		}
4304 		if (lchk) {
4305 			/* Assure a timer is up */
4306 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4307 			    stcb->sctp_ep, stcb, lchk->whoTo);
4308 		}
4309 	}
4310 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4311 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4312 		    rwnd,
4313 		    stcb->asoc.peers_rwnd,
4314 		    stcb->asoc.total_flight,
4315 		    stcb->asoc.total_output_queue_size);
4316 	}
4317 }
4318 
4319 void
4320 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4321     struct sctp_tcb *stcb,
4322     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4323     int *abort_now, uint8_t flags,
4324     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4325 {
4326 	struct sctp_association *asoc;
4327 	struct sctp_tmit_chunk *tp1, *tp2;
4328 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4329 	uint16_t wake_him = 0;
4330 	uint32_t send_s = 0;
4331 	long j;
4332 	int accum_moved = 0;
4333 	int will_exit_fast_recovery = 0;
4334 	uint32_t a_rwnd, old_rwnd;
4335 	int win_probe_recovery = 0;
4336 	int win_probe_recovered = 0;
4337 	struct sctp_nets *net = NULL;
4338 	int done_once;
4339 	int rto_ok = 1;
4340 	uint8_t reneged_all = 0;
4341 	uint8_t cmt_dac_flag;
4342 
4343 	/*
4344 	 * we take any chance we can to service our queues since we cannot
4345 	 * get awoken when the socket is read from :<
4346 	 */
4347 	/*
4348 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4349 	 * old sack, if so discard. 2) If there is nothing left in the send
4350 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4351 	 * too, update any rwnd change and verify no timers are running.
4352 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4353 	 * moved process these first and note that it moved. 4) Process any
4354 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4355 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4356 	 * sync up flightsizes and things, stop all timers and also check
4357 	 * for shutdown_pending state. If so then go ahead and send off the
4358 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4359 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4360 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4361 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4362 	 * if in shutdown_recv state.
4363 	 */
4364 	SCTP_TCB_LOCK_ASSERT(stcb);
4365 	/* CMT DAC algo */
4366 	this_sack_lowest_newack = 0;
4367 	SCTP_STAT_INCR(sctps_slowpath_sack);
4368 	last_tsn = cum_ack;
4369 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4370 #ifdef SCTP_ASOCLOG_OF_TSNS
4371 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4372 	stcb->asoc.cumack_log_at++;
4373 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4374 		stcb->asoc.cumack_log_at = 0;
4375 	}
4376 #endif
4377 	a_rwnd = rwnd;
4378 
4379 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4380 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4381 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4382 	}
4383 	old_rwnd = stcb->asoc.peers_rwnd;
4384 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4385 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4386 		    stcb->asoc.overall_error_count,
4387 		    0,
4388 		    SCTP_FROM_SCTP_INDATA,
4389 		    __LINE__);
4390 	}
4391 	stcb->asoc.overall_error_count = 0;
4392 	asoc = &stcb->asoc;
4393 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4394 		sctp_log_sack(asoc->last_acked_seq,
4395 		    cum_ack,
4396 		    0,
4397 		    num_seg,
4398 		    num_dup,
4399 		    SCTP_LOG_NEW_SACK);
4400 	}
4401 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4402 		uint16_t i;
4403 		uint32_t *dupdata, dblock;
4404 
4405 		for (i = 0; i < num_dup; i++) {
4406 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4407 			    sizeof(uint32_t), (uint8_t *) & dblock);
4408 			if (dupdata == NULL) {
4409 				break;
4410 			}
4411 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4412 		}
4413 	}
4414 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4415 		/* reality check */
4416 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4417 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4418 			    sctpchunk_listhead);
4419 			send_s = tp1->rec.data.TSN_seq + 1;
4420 		} else {
4421 			tp1 = NULL;
4422 			send_s = asoc->sending_seq;
4423 		}
4424 		if (SCTP_TSN_GE(cum_ack, send_s)) {
4425 			struct mbuf *oper;
4426 
4427 			/*
4428 			 * no way, we have not even sent this TSN out yet.
4429 			 * Peer is hopelessly messed up with us.
4430 			 */
4431 			SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4432 			    cum_ack, send_s);
4433 			if (tp1) {
4434 				SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4435 				    tp1->rec.data.TSN_seq, (void *)tp1);
4436 			}
4437 	hopeless_peer:
4438 			*abort_now = 1;
4439 			/* XXX */
4440 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4441 			    0, M_NOWAIT, 1, MT_DATA);
4442 			if (oper) {
4443 				struct sctp_paramhdr *ph;
4444 				uint32_t *ippp;
4445 
4446 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4447 				    sizeof(uint32_t);
4448 				ph = mtod(oper, struct sctp_paramhdr *);
4449 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4450 				ph->param_length = htons(SCTP_BUF_LEN(oper));
4451 				ippp = (uint32_t *) (ph + 1);
4452 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4453 			}
4454 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4455 			sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4456 			return;
4457 		}
4458 	}
4459 	/**********************/
4460 	/* 1) check the range */
4461 	/**********************/
4462 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4463 		/* acking something behind */
4464 		return;
4465 	}
4466 	/* update the Rwnd of the peer */
4467 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4468 	    TAILQ_EMPTY(&asoc->send_queue) &&
4469 	    (asoc->stream_queue_cnt == 0)) {
4470 		/* nothing left on send/sent and strmq */
4471 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4472 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4473 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4474 		}
4475 		asoc->peers_rwnd = a_rwnd;
4476 		if (asoc->sent_queue_retran_cnt) {
4477 			asoc->sent_queue_retran_cnt = 0;
4478 		}
4479 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4480 			/* SWS sender side engages */
4481 			asoc->peers_rwnd = 0;
4482 		}
4483 		/* stop any timers */
4484 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4485 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4486 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4487 			net->partial_bytes_acked = 0;
4488 			net->flight_size = 0;
4489 		}
4490 		asoc->total_flight = 0;
4491 		asoc->total_flight_count = 0;
4492 		return;
4493 	}
4494 	/*
4495 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4496 	 * things. The total byte count acked is tracked in netAckSz AND
4497 	 * netAck2 is used to track the total bytes acked that are un-
4498 	 * amibguious and were never retransmitted. We track these on a per
4499 	 * destination address basis.
4500 	 */
4501 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4502 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4503 			/* Drag along the window_tsn for cwr's */
4504 			net->cwr_window_tsn = cum_ack;
4505 		}
4506 		net->prev_cwnd = net->cwnd;
4507 		net->net_ack = 0;
4508 		net->net_ack2 = 0;
4509 
4510 		/*
4511 		 * CMT: Reset CUC and Fast recovery algo variables before
4512 		 * SACK processing
4513 		 */
4514 		net->new_pseudo_cumack = 0;
4515 		net->will_exit_fast_recovery = 0;
4516 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4517 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4518 		}
4519 	}
4520 	/* process the new consecutive TSN first */
4521 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4522 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4523 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4524 				accum_moved = 1;
4525 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4526 					/*
4527 					 * If it is less than ACKED, it is
4528 					 * now no-longer in flight. Higher
4529 					 * values may occur during marking
4530 					 */
4531 					if ((tp1->whoTo->dest_state &
4532 					    SCTP_ADDR_UNCONFIRMED) &&
4533 					    (tp1->snd_count < 2)) {
4534 						/*
4535 						 * If there was no retran
4536 						 * and the address is
4537 						 * un-confirmed and we sent
4538 						 * there and are now
4539 						 * sacked.. its confirmed,
4540 						 * mark it so.
4541 						 */
4542 						tp1->whoTo->dest_state &=
4543 						    ~SCTP_ADDR_UNCONFIRMED;
4544 					}
4545 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4546 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4547 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4548 							    tp1->whoTo->flight_size,
4549 							    tp1->book_size,
4550 							    (uintptr_t) tp1->whoTo,
4551 							    tp1->rec.data.TSN_seq);
4552 						}
4553 						sctp_flight_size_decrease(tp1);
4554 						sctp_total_flight_decrease(stcb, tp1);
4555 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4556 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4557 							    tp1);
4558 						}
4559 					}
4560 					tp1->whoTo->net_ack += tp1->send_size;
4561 
4562 					/* CMT SFR and DAC algos */
4563 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4564 					tp1->whoTo->saw_newack = 1;
4565 
4566 					if (tp1->snd_count < 2) {
4567 						/*
4568 						 * True non-retransmited
4569 						 * chunk
4570 						 */
4571 						tp1->whoTo->net_ack2 +=
4572 						    tp1->send_size;
4573 
4574 						/* update RTO too? */
4575 						if (tp1->do_rtt) {
4576 							if (rto_ok) {
4577 								tp1->whoTo->RTO =
4578 								    sctp_calculate_rto(stcb,
4579 								    asoc, tp1->whoTo,
4580 								    &tp1->sent_rcv_time,
4581 								    sctp_align_safe_nocopy,
4582 								    SCTP_RTT_FROM_DATA);
4583 								rto_ok = 0;
4584 							}
4585 							if (tp1->whoTo->rto_needed == 0) {
4586 								tp1->whoTo->rto_needed = 1;
4587 							}
4588 							tp1->do_rtt = 0;
4589 						}
4590 					}
4591 					/*
4592 					 * CMT: CUCv2 algorithm. From the
4593 					 * cumack'd TSNs, for each TSN being
4594 					 * acked for the first time, set the
4595 					 * following variables for the
4596 					 * corresp destination.
4597 					 * new_pseudo_cumack will trigger a
4598 					 * cwnd update.
4599 					 * find_(rtx_)pseudo_cumack will
4600 					 * trigger search for the next
4601 					 * expected (rtx-)pseudo-cumack.
4602 					 */
4603 					tp1->whoTo->new_pseudo_cumack = 1;
4604 					tp1->whoTo->find_pseudo_cumack = 1;
4605 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4606 
4607 
4608 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4609 						sctp_log_sack(asoc->last_acked_seq,
4610 						    cum_ack,
4611 						    tp1->rec.data.TSN_seq,
4612 						    0,
4613 						    0,
4614 						    SCTP_LOG_TSN_ACKED);
4615 					}
4616 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4617 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4618 					}
4619 				}
4620 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4621 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4622 #ifdef SCTP_AUDITING_ENABLED
4623 					sctp_audit_log(0xB3,
4624 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4625 #endif
4626 				}
4627 				if (tp1->rec.data.chunk_was_revoked) {
4628 					/* deflate the cwnd */
4629 					tp1->whoTo->cwnd -= tp1->book_size;
4630 					tp1->rec.data.chunk_was_revoked = 0;
4631 				}
4632 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4633 					tp1->sent = SCTP_DATAGRAM_ACKED;
4634 				}
4635 			}
4636 		} else {
4637 			break;
4638 		}
4639 	}
4640 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4641 	/* always set this up to cum-ack */
4642 	asoc->this_sack_highest_gap = last_tsn;
4643 
4644 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4645 
4646 		/*
4647 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4648 		 * to be greater than the cumack. Also reset saw_newack to 0
4649 		 * for all dests.
4650 		 */
4651 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4652 			net->saw_newack = 0;
4653 			net->this_sack_highest_newack = last_tsn;
4654 		}
4655 
4656 		/*
4657 		 * thisSackHighestGap will increase while handling NEW
4658 		 * segments this_sack_highest_newack will increase while
4659 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4660 		 * used for CMT DAC algo. saw_newack will also change.
4661 		 */
4662 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4663 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4664 		    num_seg, num_nr_seg, &rto_ok)) {
4665 			wake_him++;
4666 		}
4667 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4668 			/*
4669 			 * validate the biggest_tsn_acked in the gap acks if
4670 			 * strict adherence is wanted.
4671 			 */
4672 			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4673 				/*
4674 				 * peer is either confused or we are under
4675 				 * attack. We must abort.
4676 				 */
4677 				SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4678 				    biggest_tsn_acked, send_s);
4679 				goto hopeless_peer;
4680 			}
4681 		}
4682 	}
4683 	/*******************************************/
4684 	/* cancel ALL T3-send timer if accum moved */
4685 	/*******************************************/
4686 	if (asoc->sctp_cmt_on_off > 0) {
4687 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4688 			if (net->new_pseudo_cumack)
4689 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4690 				    stcb, net,
4691 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4692 
4693 		}
4694 	} else {
4695 		if (accum_moved) {
4696 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4697 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4698 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4699 			}
4700 		}
4701 	}
4702 	/********************************************/
4703 	/* drop the acked chunks from the sentqueue */
4704 	/********************************************/
4705 	asoc->last_acked_seq = cum_ack;
4706 
4707 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4708 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4709 			break;
4710 		}
4711 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4712 			if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4713 				asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4714 #ifdef INVARIANTS
4715 			} else {
4716 				panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4717 #endif
4718 			}
4719 		}
4720 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4721 		if (PR_SCTP_ENABLED(tp1->flags)) {
4722 			if (asoc->pr_sctp_cnt != 0)
4723 				asoc->pr_sctp_cnt--;
4724 		}
4725 		asoc->sent_queue_cnt--;
4726 		if (tp1->data) {
4727 			/* sa_ignore NO_NULL_CHK */
4728 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4729 			sctp_m_freem(tp1->data);
4730 			tp1->data = NULL;
4731 			if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4732 				asoc->sent_queue_cnt_removeable--;
4733 			}
4734 		}
4735 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4736 			sctp_log_sack(asoc->last_acked_seq,
4737 			    cum_ack,
4738 			    tp1->rec.data.TSN_seq,
4739 			    0,
4740 			    0,
4741 			    SCTP_LOG_FREE_SENT);
4742 		}
4743 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4744 		wake_him++;
4745 	}
4746 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4747 #ifdef INVARIANTS
4748 		panic("Warning flight size is postive and should be 0");
4749 #else
4750 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4751 		    asoc->total_flight);
4752 #endif
4753 		asoc->total_flight = 0;
4754 	}
4755 	/* sa_ignore NO_NULL_CHK */
4756 	if ((wake_him) && (stcb->sctp_socket)) {
4757 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4758 		struct socket *so;
4759 
4760 #endif
4761 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4762 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4763 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4764 		}
4765 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4766 		so = SCTP_INP_SO(stcb->sctp_ep);
4767 		atomic_add_int(&stcb->asoc.refcnt, 1);
4768 		SCTP_TCB_UNLOCK(stcb);
4769 		SCTP_SOCKET_LOCK(so, 1);
4770 		SCTP_TCB_LOCK(stcb);
4771 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4772 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4773 			/* assoc was freed while we were unlocked */
4774 			SCTP_SOCKET_UNLOCK(so, 1);
4775 			return;
4776 		}
4777 #endif
4778 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4779 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4780 		SCTP_SOCKET_UNLOCK(so, 1);
4781 #endif
4782 	} else {
4783 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4784 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4785 		}
4786 	}
4787 
4788 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4789 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4790 			/* Setup so we will exit RFC2582 fast recovery */
4791 			will_exit_fast_recovery = 1;
4792 		}
4793 	}
4794 	/*
4795 	 * Check for revoked fragments:
4796 	 *
4797 	 * if Previous sack - Had no frags then we can't have any revoked if
4798 	 * Previous sack - Had frag's then - If we now have frags aka
4799 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4800 	 * some of them. else - The peer revoked all ACKED fragments, since
4801 	 * we had some before and now we have NONE.
4802 	 */
4803 
4804 	if (num_seg) {
4805 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4806 		asoc->saw_sack_with_frags = 1;
4807 	} else if (asoc->saw_sack_with_frags) {
4808 		int cnt_revoked = 0;
4809 
4810 		/* Peer revoked all dg's marked or acked */
4811 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4812 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4813 				tp1->sent = SCTP_DATAGRAM_SENT;
4814 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4815 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4816 					    tp1->whoTo->flight_size,
4817 					    tp1->book_size,
4818 					    (uintptr_t) tp1->whoTo,
4819 					    tp1->rec.data.TSN_seq);
4820 				}
4821 				sctp_flight_size_increase(tp1);
4822 				sctp_total_flight_increase(stcb, tp1);
4823 				tp1->rec.data.chunk_was_revoked = 1;
4824 				/*
4825 				 * To ensure that this increase in
4826 				 * flightsize, which is artificial, does not
4827 				 * throttle the sender, we also increase the
4828 				 * cwnd artificially.
4829 				 */
4830 				tp1->whoTo->cwnd += tp1->book_size;
4831 				cnt_revoked++;
4832 			}
4833 		}
4834 		if (cnt_revoked) {
4835 			reneged_all = 1;
4836 		}
4837 		asoc->saw_sack_with_frags = 0;
4838 	}
4839 	if (num_nr_seg > 0)
4840 		asoc->saw_sack_with_nr_frags = 1;
4841 	else
4842 		asoc->saw_sack_with_nr_frags = 0;
4843 
4844 	/* JRS - Use the congestion control given in the CC module */
4845 	if (ecne_seen == 0) {
4846 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4847 			if (net->net_ack2 > 0) {
4848 				/*
4849 				 * Karn's rule applies to clearing error
4850 				 * count, this is optional.
4851 				 */
4852 				net->error_count = 0;
4853 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4854 					/* addr came good */
4855 					net->dest_state |= SCTP_ADDR_REACHABLE;
4856 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4857 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4858 				}
4859 				if (net == stcb->asoc.primary_destination) {
4860 					if (stcb->asoc.alternate) {
4861 						/*
4862 						 * release the alternate,
4863 						 * primary is good
4864 						 */
4865 						sctp_free_remote_addr(stcb->asoc.alternate);
4866 						stcb->asoc.alternate = NULL;
4867 					}
4868 				}
4869 				if (net->dest_state & SCTP_ADDR_PF) {
4870 					net->dest_state &= ~SCTP_ADDR_PF;
4871 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4872 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4873 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4874 					/* Done with this net */
4875 					net->net_ack = 0;
4876 				}
4877 				/* restore any doubled timers */
4878 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4879 				if (net->RTO < stcb->asoc.minrto) {
4880 					net->RTO = stcb->asoc.minrto;
4881 				}
4882 				if (net->RTO > stcb->asoc.maxrto) {
4883 					net->RTO = stcb->asoc.maxrto;
4884 				}
4885 			}
4886 		}
4887 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4888 	}
4889 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4890 		/* nothing left in-flight */
4891 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4892 			/* stop all timers */
4893 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4894 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4895 			net->flight_size = 0;
4896 			net->partial_bytes_acked = 0;
4897 		}
4898 		asoc->total_flight = 0;
4899 		asoc->total_flight_count = 0;
4900 	}
4901 	/**********************************/
4902 	/* Now what about shutdown issues */
4903 	/**********************************/
4904 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4905 		/* nothing left on sendqueue.. consider done */
4906 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4907 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4908 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4909 		}
4910 		asoc->peers_rwnd = a_rwnd;
4911 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4912 			/* SWS sender side engages */
4913 			asoc->peers_rwnd = 0;
4914 		}
4915 		/* clean up */
4916 		if ((asoc->stream_queue_cnt == 1) &&
4917 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4918 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4919 		    (asoc->locked_on_sending)
4920 		    ) {
4921 			struct sctp_stream_queue_pending *sp;
4922 
4923 			/*
4924 			 * I may be in a state where we got all across.. but
4925 			 * cannot write more due to a shutdown... we abort
4926 			 * since the user did not indicate EOR in this case.
4927 			 */
4928 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4929 			    sctp_streamhead);
4930 			if ((sp) && (sp->length == 0)) {
4931 				asoc->locked_on_sending = NULL;
4932 				if (sp->msg_is_complete) {
4933 					asoc->stream_queue_cnt--;
4934 				} else {
4935 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4936 					asoc->stream_queue_cnt--;
4937 				}
4938 			}
4939 		}
4940 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4941 		    (asoc->stream_queue_cnt == 0)) {
4942 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4943 				/* Need to abort here */
4944 				struct mbuf *oper;
4945 
4946 		abort_out_now:
4947 				*abort_now = 1;
4948 				/* XXX */
4949 				oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4950 				    0, M_NOWAIT, 1, MT_DATA);
4951 				if (oper) {
4952 					struct sctp_paramhdr *ph;
4953 
4954 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr);
4955 					ph = mtod(oper, struct sctp_paramhdr *);
4956 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4957 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4958 				}
4959 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4960 				sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4961 				return;
4962 			} else {
4963 				struct sctp_nets *netp;
4964 
4965 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4966 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4967 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4968 				}
4969 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4970 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4971 				sctp_stop_timers_for_shutdown(stcb);
4972 				if (asoc->alternate) {
4973 					netp = asoc->alternate;
4974 				} else {
4975 					netp = asoc->primary_destination;
4976 				}
4977 				sctp_send_shutdown(stcb, netp);
4978 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4979 				    stcb->sctp_ep, stcb, netp);
4980 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4981 				    stcb->sctp_ep, stcb, netp);
4982 			}
4983 			return;
4984 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4985 		    (asoc->stream_queue_cnt == 0)) {
4986 			struct sctp_nets *netp;
4987 
4988 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4989 				goto abort_out_now;
4990 			}
4991 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4992 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4993 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4994 			sctp_stop_timers_for_shutdown(stcb);
4995 			if (asoc->alternate) {
4996 				netp = asoc->alternate;
4997 			} else {
4998 				netp = asoc->primary_destination;
4999 			}
5000 			sctp_send_shutdown_ack(stcb, netp);
5001 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5002 			    stcb->sctp_ep, stcb, netp);
5003 			return;
5004 		}
5005 	}
5006 	/*
5007 	 * Now here we are going to recycle net_ack for a different use...
5008 	 * HEADS UP.
5009 	 */
5010 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5011 		net->net_ack = 0;
5012 	}
5013 
5014 	/*
5015 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5016 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5017 	 * automatically ensure that.
5018 	 */
5019 	if ((asoc->sctp_cmt_on_off > 0) &&
5020 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5021 	    (cmt_dac_flag == 0)) {
5022 		this_sack_lowest_newack = cum_ack;
5023 	}
5024 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5025 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5026 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5027 	}
5028 	/* JRS - Use the congestion control given in the CC module */
5029 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5030 
5031 	/* Now are we exiting loss recovery ? */
5032 	if (will_exit_fast_recovery) {
5033 		/* Ok, we must exit fast recovery */
5034 		asoc->fast_retran_loss_recovery = 0;
5035 	}
5036 	if ((asoc->sat_t3_loss_recovery) &&
5037 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5038 		/* end satellite t3 loss recovery */
5039 		asoc->sat_t3_loss_recovery = 0;
5040 	}
5041 	/*
5042 	 * CMT Fast recovery
5043 	 */
5044 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5045 		if (net->will_exit_fast_recovery) {
5046 			/* Ok, we must exit fast recovery */
5047 			net->fast_retran_loss_recovery = 0;
5048 		}
5049 	}
5050 
5051 	/* Adjust and set the new rwnd value */
5052 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5053 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5054 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5055 	}
5056 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5057 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5058 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5059 		/* SWS sender side engages */
5060 		asoc->peers_rwnd = 0;
5061 	}
5062 	if (asoc->peers_rwnd > old_rwnd) {
5063 		win_probe_recovery = 1;
5064 	}
5065 	/*
5066 	 * Now we must setup so we have a timer up for anyone with
5067 	 * outstanding data.
5068 	 */
5069 	done_once = 0;
5070 again:
5071 	j = 0;
5072 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5073 		if (win_probe_recovery && (net->window_probe)) {
5074 			win_probe_recovered = 1;
5075 			/*-
5076 			 * Find first chunk that was used with
5077 			 * window probe and clear the event. Put
5078 			 * it back into the send queue as if has
5079 			 * not been sent.
5080 			 */
5081 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5082 				if (tp1->window_probe) {
5083 					sctp_window_probe_recovery(stcb, asoc, tp1);
5084 					break;
5085 				}
5086 			}
5087 		}
5088 		if (net->flight_size) {
5089 			j++;
5090 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5091 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5092 				    stcb->sctp_ep, stcb, net);
5093 			}
5094 			if (net->window_probe) {
5095 				net->window_probe = 0;
5096 			}
5097 		} else {
5098 			if (net->window_probe) {
5099 				/*
5100 				 * In window probes we must assure a timer
5101 				 * is still running there
5102 				 */
5103 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5104 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5105 					    stcb->sctp_ep, stcb, net);
5106 
5107 				}
5108 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5109 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5110 				    stcb, net,
5111 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5112 			}
5113 		}
5114 	}
5115 	if ((j == 0) &&
5116 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5117 	    (asoc->sent_queue_retran_cnt == 0) &&
5118 	    (win_probe_recovered == 0) &&
5119 	    (done_once == 0)) {
5120 		/*
5121 		 * huh, this should not happen unless all packets are
5122 		 * PR-SCTP and marked to skip of course.
5123 		 */
5124 		if (sctp_fs_audit(asoc)) {
5125 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5126 				net->flight_size = 0;
5127 			}
5128 			asoc->total_flight = 0;
5129 			asoc->total_flight_count = 0;
5130 			asoc->sent_queue_retran_cnt = 0;
5131 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5132 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5133 					sctp_flight_size_increase(tp1);
5134 					sctp_total_flight_increase(stcb, tp1);
5135 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5136 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5137 				}
5138 			}
5139 		}
5140 		done_once = 1;
5141 		goto again;
5142 	}
5143 	/*********************************************/
5144 	/* Here we perform PR-SCTP procedures        */
5145 	/* (section 4.2)                             */
5146 	/*********************************************/
5147 	/* C1. update advancedPeerAckPoint */
5148 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5149 		asoc->advanced_peer_ack_point = cum_ack;
5150 	}
5151 	/* C2. try to further move advancedPeerAckPoint ahead */
5152 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5153 		struct sctp_tmit_chunk *lchk;
5154 		uint32_t old_adv_peer_ack_point;
5155 
5156 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5157 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5158 		/* C3. See if we need to send a Fwd-TSN */
5159 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5160 			/*
5161 			 * ISSUE with ECN, see FWD-TSN processing.
5162 			 */
5163 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5164 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5165 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5166 				    old_adv_peer_ack_point);
5167 			}
5168 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5169 				send_forward_tsn(stcb, asoc);
5170 			} else if (lchk) {
5171 				/* try to FR fwd-tsn's that get lost too */
5172 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5173 					send_forward_tsn(stcb, asoc);
5174 				}
5175 			}
5176 		}
5177 		if (lchk) {
5178 			/* Assure a timer is up */
5179 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5180 			    stcb->sctp_ep, stcb, lchk->whoTo);
5181 		}
5182 	}
5183 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5184 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5185 		    a_rwnd,
5186 		    stcb->asoc.peers_rwnd,
5187 		    stcb->asoc.total_flight,
5188 		    stcb->asoc.total_output_queue_size);
5189 	}
5190 }
5191 
5192 void
5193 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5194 {
5195 	/* Copy cum-ack */
5196 	uint32_t cum_ack, a_rwnd;
5197 
5198 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5199 	/* Arrange so a_rwnd does NOT change */
5200 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5201 
5202 	/* Now call the express sack handling */
5203 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5204 }
5205 
5206 static void
5207 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5208     struct sctp_stream_in *strmin)
5209 {
5210 	struct sctp_queued_to_read *ctl, *nctl;
5211 	struct sctp_association *asoc;
5212 	uint16_t tt;
5213 
5214 	asoc = &stcb->asoc;
5215 	tt = strmin->last_sequence_delivered;
5216 	/*
5217 	 * First deliver anything prior to and including the stream no that
5218 	 * came in
5219 	 */
5220 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5221 		if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
5222 			/* this is deliverable now */
5223 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5224 			/* subtract pending on streams */
5225 			asoc->size_on_all_streams -= ctl->length;
5226 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5227 			/* deliver it to at least the delivery-q */
5228 			if (stcb->sctp_socket) {
5229 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5230 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5231 				    ctl,
5232 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5233 			}
5234 		} else {
5235 			/* no more delivery now. */
5236 			break;
5237 		}
5238 	}
5239 	/*
5240 	 * now we must deliver things in queue the normal way  if any are
5241 	 * now ready.
5242 	 */
5243 	tt = strmin->last_sequence_delivered + 1;
5244 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5245 		if (tt == ctl->sinfo_ssn) {
5246 			/* this is deliverable now */
5247 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5248 			/* subtract pending on streams */
5249 			asoc->size_on_all_streams -= ctl->length;
5250 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5251 			/* deliver it to at least the delivery-q */
5252 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5253 			if (stcb->sctp_socket) {
5254 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5255 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5256 				    ctl,
5257 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5258 
5259 			}
5260 			tt = strmin->last_sequence_delivered + 1;
5261 		} else {
5262 			break;
5263 		}
5264 	}
5265 }
5266 
5267 static void
5268 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5269     struct sctp_association *asoc,
5270     uint16_t stream, uint16_t seq)
5271 {
5272 	struct sctp_tmit_chunk *chk, *nchk;
5273 
5274 	/* For each one on here see if we need to toss it */
5275 	/*
5276 	 * For now large messages held on the reasmqueue that are complete
5277 	 * will be tossed too. We could in theory do more work to spin
5278 	 * through and stop after dumping one msg aka seeing the start of a
5279 	 * new msg at the head, and call the delivery function... to see if
5280 	 * it can be delivered... But for now we just dump everything on the
5281 	 * queue.
5282 	 */
5283 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5284 		/*
5285 		 * Do not toss it if on a different stream or marked for
5286 		 * unordered delivery in which case the stream sequence
5287 		 * number has no meaning.
5288 		 */
5289 		if ((chk->rec.data.stream_number != stream) ||
5290 		    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5291 			continue;
5292 		}
5293 		if (chk->rec.data.stream_seq == seq) {
5294 			/* It needs to be tossed */
5295 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5296 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5297 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5298 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5299 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5300 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5301 			}
5302 			asoc->size_on_reasm_queue -= chk->send_size;
5303 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5304 
5305 			/* Clear up any stream problem */
5306 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5307 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5308 				/*
5309 				 * We must dump forward this streams
5310 				 * sequence number if the chunk is not
5311 				 * unordered that is being skipped. There is
5312 				 * a chance that if the peer does not
5313 				 * include the last fragment in its FWD-TSN
5314 				 * we WILL have a problem here since you
5315 				 * would have a partial chunk in queue that
5316 				 * may not be deliverable. Also if a Partial
5317 				 * delivery API as started the user may get
5318 				 * a partial chunk. The next read returning
5319 				 * a new chunk... really ugly but I see no
5320 				 * way around it! Maybe a notify??
5321 				 */
5322 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5323 			}
5324 			if (chk->data) {
5325 				sctp_m_freem(chk->data);
5326 				chk->data = NULL;
5327 			}
5328 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5329 		} else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5330 			/*
5331 			 * If the stream_seq is > than the purging one, we
5332 			 * are done
5333 			 */
5334 			break;
5335 		}
5336 	}
5337 }
5338 
5339 
5340 void
5341 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5342     struct sctp_forward_tsn_chunk *fwd,
5343     int *abort_flag, struct mbuf *m, int offset)
5344 {
5345 	/* The pr-sctp fwd tsn */
5346 	/*
5347 	 * here we will perform all the data receiver side steps for
5348 	 * processing FwdTSN, as required in by pr-sctp draft:
5349 	 *
5350 	 * Assume we get FwdTSN(x):
5351 	 *
5352 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5353 	 * others we have 3) examine and update re-ordering queue on
5354 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5355 	 * report where we are.
5356 	 */
5357 	struct sctp_association *asoc;
5358 	uint32_t new_cum_tsn, gap;
5359 	unsigned int i, fwd_sz, m_size;
5360 	uint32_t str_seq;
5361 	struct sctp_stream_in *strm;
5362 	struct sctp_tmit_chunk *chk, *nchk;
5363 	struct sctp_queued_to_read *ctl, *sv;
5364 
5365 	asoc = &stcb->asoc;
5366 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5367 		SCTPDBG(SCTP_DEBUG_INDATA1,
5368 		    "Bad size too small/big fwd-tsn\n");
5369 		return;
5370 	}
5371 	m_size = (stcb->asoc.mapping_array_size << 3);
5372 	/*************************************************************/
5373 	/* 1. Here we update local cumTSN and shift the bitmap array */
5374 	/*************************************************************/
5375 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5376 
5377 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5378 		/* Already got there ... */
5379 		return;
5380 	}
5381 	/*
5382 	 * now we know the new TSN is more advanced, let's find the actual
5383 	 * gap
5384 	 */
5385 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5386 	asoc->cumulative_tsn = new_cum_tsn;
5387 	if (gap >= m_size) {
5388 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5389 			struct mbuf *oper;
5390 
5391 			/*
5392 			 * out of range (of single byte chunks in the rwnd I
5393 			 * give out). This must be an attacker.
5394 			 */
5395 			*abort_flag = 1;
5396 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5397 			    0, M_NOWAIT, 1, MT_DATA);
5398 			if (oper) {
5399 				struct sctp_paramhdr *ph;
5400 				uint32_t *ippp;
5401 
5402 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5403 				    (sizeof(uint32_t) * 3);
5404 				ph = mtod(oper, struct sctp_paramhdr *);
5405 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5406 				ph->param_length = htons(SCTP_BUF_LEN(oper));
5407 				ippp = (uint32_t *) (ph + 1);
5408 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5409 				ippp++;
5410 				*ippp = asoc->highest_tsn_inside_map;
5411 				ippp++;
5412 				*ippp = new_cum_tsn;
5413 			}
5414 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5415 			sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
5416 			return;
5417 		}
5418 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5419 
5420 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5421 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5422 		asoc->highest_tsn_inside_map = new_cum_tsn;
5423 
5424 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5425 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5426 
5427 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5428 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5429 		}
5430 	} else {
5431 		SCTP_TCB_LOCK_ASSERT(stcb);
5432 		for (i = 0; i <= gap; i++) {
5433 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5434 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5435 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5436 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5437 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5438 				}
5439 			}
5440 		}
5441 	}
5442 	/*************************************************************/
5443 	/* 2. Clear up re-assembly queue                             */
5444 	/*************************************************************/
5445 	/*
5446 	 * First service it if pd-api is up, just in case we can progress it
5447 	 * forward
5448 	 */
5449 	if (asoc->fragmented_delivery_inprogress) {
5450 		sctp_service_reassembly(stcb, asoc);
5451 	}
5452 	/* For each one on here see if we need to toss it */
5453 	/*
5454 	 * For now large messages held on the reasmqueue that are complete
5455 	 * will be tossed too. We could in theory do more work to spin
5456 	 * through and stop after dumping one msg aka seeing the start of a
5457 	 * new msg at the head, and call the delivery function... to see if
5458 	 * it can be delivered... But for now we just dump everything on the
5459 	 * queue.
5460 	 */
5461 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5462 		if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5463 			/* It needs to be tossed */
5464 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5465 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5466 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5467 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5468 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5469 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5470 			}
5471 			asoc->size_on_reasm_queue -= chk->send_size;
5472 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5473 
5474 			/* Clear up any stream problem */
5475 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5476 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5477 				/*
5478 				 * We must dump forward this streams
5479 				 * sequence number if the chunk is not
5480 				 * unordered that is being skipped. There is
5481 				 * a chance that if the peer does not
5482 				 * include the last fragment in its FWD-TSN
5483 				 * we WILL have a problem here since you
5484 				 * would have a partial chunk in queue that
5485 				 * may not be deliverable. Also if a Partial
5486 				 * delivery API as started the user may get
5487 				 * a partial chunk. The next read returning
5488 				 * a new chunk... really ugly but I see no
5489 				 * way around it! Maybe a notify??
5490 				 */
5491 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5492 			}
5493 			if (chk->data) {
5494 				sctp_m_freem(chk->data);
5495 				chk->data = NULL;
5496 			}
5497 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5498 		} else {
5499 			/*
5500 			 * Ok we have gone beyond the end of the fwd-tsn's
5501 			 * mark.
5502 			 */
5503 			break;
5504 		}
5505 	}
5506 	/*******************************************************/
5507 	/* 3. Update the PR-stream re-ordering queues and fix  */
5508 	/* delivery issues as needed.                       */
5509 	/*******************************************************/
5510 	fwd_sz -= sizeof(*fwd);
5511 	if (m && fwd_sz) {
5512 		/* New method. */
5513 		unsigned int num_str;
5514 		struct sctp_strseq *stseq, strseqbuf;
5515 
5516 		offset += sizeof(*fwd);
5517 
5518 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5519 		num_str = fwd_sz / sizeof(struct sctp_strseq);
5520 		for (i = 0; i < num_str; i++) {
5521 			uint16_t st;
5522 
5523 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5524 			    sizeof(struct sctp_strseq),
5525 			    (uint8_t *) & strseqbuf);
5526 			offset += sizeof(struct sctp_strseq);
5527 			if (stseq == NULL) {
5528 				break;
5529 			}
5530 			/* Convert */
5531 			st = ntohs(stseq->stream);
5532 			stseq->stream = st;
5533 			st = ntohs(stseq->sequence);
5534 			stseq->sequence = st;
5535 
5536 			/* now process */
5537 
5538 			/*
5539 			 * Ok we now look for the stream/seq on the read
5540 			 * queue where its not all delivered. If we find it
5541 			 * we transmute the read entry into a PDI_ABORTED.
5542 			 */
5543 			if (stseq->stream >= asoc->streamincnt) {
5544 				/* screwed up streams, stop!  */
5545 				break;
5546 			}
5547 			if ((asoc->str_of_pdapi == stseq->stream) &&
5548 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5549 				/*
5550 				 * If this is the one we were partially
5551 				 * delivering now then we no longer are.
5552 				 * Note this will change with the reassembly
5553 				 * re-write.
5554 				 */
5555 				asoc->fragmented_delivery_inprogress = 0;
5556 			}
5557 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5558 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5559 				if ((ctl->sinfo_stream == stseq->stream) &&
5560 				    (ctl->sinfo_ssn == stseq->sequence)) {
5561 					str_seq = (stseq->stream << 16) | stseq->sequence;
5562 					ctl->end_added = 1;
5563 					ctl->pdapi_aborted = 1;
5564 					sv = stcb->asoc.control_pdapi;
5565 					stcb->asoc.control_pdapi = ctl;
5566 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5567 					    stcb,
5568 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5569 					    (void *)&str_seq,
5570 					    SCTP_SO_NOT_LOCKED);
5571 					stcb->asoc.control_pdapi = sv;
5572 					break;
5573 				} else if ((ctl->sinfo_stream == stseq->stream) &&
5574 				    SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5575 					/* We are past our victim SSN */
5576 					break;
5577 				}
5578 			}
5579 			strm = &asoc->strmin[stseq->stream];
5580 			if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5581 				/* Update the sequence number */
5582 				strm->last_sequence_delivered = stseq->sequence;
5583 			}
5584 			/* now kick the stream the new way */
5585 			/* sa_ignore NO_NULL_CHK */
5586 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5587 		}
5588 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5589 	}
5590 	/*
5591 	 * Now slide thing forward.
5592 	 */
5593 	sctp_slide_mapping_arrays(stcb);
5594 
5595 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5596 		/* now lets kick out and check for more fragmented delivery */
5597 		/* sa_ignore NO_NULL_CHK */
5598 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5599 	}
5600 }
5601