xref: /freebsd/sys/netinet/sctp_indata.c (revision 0f2bd1e89db1a2f09268edea21e0ead329e092df)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 
48 
49 /*
50  * NOTES: On the outbound side of things I need to check the sack timer to
51  * see if I should generate a sack into the chunk queue (if I have data to
52  * send that is and will be sending it .. for bundling.
53  *
54  * The callback in sctp_usrreq.c will get called when the socket is read from.
55  * This will cause sctp_service_queues() to get called on the top entry in
56  * the list.
57  */
58 
59 void
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61 {
62 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63 }
64 
65 /* Calculate what the rwnd would be */
66 uint32_t
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68 {
69 	uint32_t calc = 0;
70 
71 	/*
72 	 * This is really set wrong with respect to a 1-2-m socket. Since
73 	 * the sb_cc is the count that everyone as put up. When we re-write
74 	 * sctp_soreceive then we will fix this so that ONLY this
75 	 * associations data is taken into account.
76 	 */
77 	if (stcb->sctp_socket == NULL)
78 		return (calc);
79 
80 	if (stcb->asoc.sb_cc == 0 &&
81 	    asoc->size_on_reasm_queue == 0 &&
82 	    asoc->size_on_all_streams == 0) {
83 		/* Full rwnd granted */
84 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85 		return (calc);
86 	}
87 	/* get actual space */
88 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89 
90 	/*
91 	 * take out what has NOT been put on socket queue and we yet hold
92 	 * for putting up.
93 	 */
94 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 	    asoc->cnt_on_reasm_queue * MSIZE));
96 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 	    asoc->cnt_on_all_streams * MSIZE));
98 
99 	if (calc == 0) {
100 		/* out of space */
101 		return (calc);
102 	}
103 	/* what is the overhead of all these rwnd's */
104 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
105 	/*
106 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 	 * even it is 0. SWS engaged
108 	 */
109 	if (calc < stcb->asoc.my_rwnd_control_len) {
110 		calc = 1;
111 	}
112 	return (calc);
113 }
114 
115 
116 
117 /*
118  * Build out our readq entry based on the incoming packet.
119  */
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122     struct sctp_nets *net,
123     uint32_t tsn, uint32_t ppid,
124     uint32_t context, uint16_t stream_no,
125     uint16_t stream_seq, uint8_t flags,
126     struct mbuf *dm)
127 {
128 	struct sctp_queued_to_read *read_queue_e = NULL;
129 
130 	sctp_alloc_a_readq(stcb, read_queue_e);
131 	if (read_queue_e == NULL) {
132 		goto failed_build;
133 	}
134 	read_queue_e->sinfo_stream = stream_no;
135 	read_queue_e->sinfo_ssn = stream_seq;
136 	read_queue_e->sinfo_flags = (flags << 8);
137 	read_queue_e->sinfo_ppid = ppid;
138 	read_queue_e->sinfo_context = stcb->asoc.context;
139 	read_queue_e->sinfo_timetolive = 0;
140 	read_queue_e->sinfo_tsn = tsn;
141 	read_queue_e->sinfo_cumtsn = tsn;
142 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 	read_queue_e->whoFrom = net;
144 	read_queue_e->length = 0;
145 	atomic_add_int(&net->ref_count, 1);
146 	read_queue_e->data = dm;
147 	read_queue_e->spec_flags = 0;
148 	read_queue_e->tail_mbuf = NULL;
149 	read_queue_e->aux_data = NULL;
150 	read_queue_e->stcb = stcb;
151 	read_queue_e->port_from = stcb->rport;
152 	read_queue_e->do_not_ref_stcb = 0;
153 	read_queue_e->end_added = 0;
154 	read_queue_e->some_taken = 0;
155 	read_queue_e->pdapi_aborted = 0;
156 failed_build:
157 	return (read_queue_e);
158 }
159 
160 
161 /*
162  * Build out our readq entry based on the incoming packet.
163  */
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166     struct sctp_tmit_chunk *chk)
167 {
168 	struct sctp_queued_to_read *read_queue_e = NULL;
169 
170 	sctp_alloc_a_readq(stcb, read_queue_e);
171 	if (read_queue_e == NULL) {
172 		goto failed_build;
173 	}
174 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 	read_queue_e->sinfo_context = stcb->asoc.context;
179 	read_queue_e->sinfo_timetolive = 0;
180 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 	read_queue_e->whoFrom = chk->whoTo;
184 	read_queue_e->aux_data = NULL;
185 	read_queue_e->length = 0;
186 	atomic_add_int(&chk->whoTo->ref_count, 1);
187 	read_queue_e->data = chk->data;
188 	read_queue_e->tail_mbuf = NULL;
189 	read_queue_e->stcb = stcb;
190 	read_queue_e->port_from = stcb->rport;
191 	read_queue_e->spec_flags = 0;
192 	read_queue_e->do_not_ref_stcb = 0;
193 	read_queue_e->end_added = 0;
194 	read_queue_e->some_taken = 0;
195 	read_queue_e->pdapi_aborted = 0;
196 failed_build:
197 	return (read_queue_e);
198 }
199 
200 
201 struct mbuf *
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
203     struct sctp_sndrcvinfo *sinfo)
204 {
205 	struct sctp_sndrcvinfo *outinfo;
206 	struct cmsghdr *cmh;
207 	struct mbuf *ret;
208 	int len;
209 	int use_extended = 0;
210 
211 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
212 		/* user does not want the sndrcv ctl */
213 		return (NULL);
214 	}
215 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
216 		use_extended = 1;
217 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
218 	} else {
219 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
220 	}
221 
222 
223 	ret = sctp_get_mbuf_for_msg(len,
224 	    0, M_DONTWAIT, 1, MT_DATA);
225 
226 	if (ret == NULL) {
227 		/* No space */
228 		return (ret);
229 	}
230 	/* We need a CMSG header followed by the struct  */
231 	cmh = mtod(ret, struct cmsghdr *);
232 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
233 	cmh->cmsg_level = IPPROTO_SCTP;
234 	if (use_extended) {
235 		cmh->cmsg_type = SCTP_EXTRCV;
236 		cmh->cmsg_len = len;
237 		memcpy(outinfo, sinfo, len);
238 	} else {
239 		cmh->cmsg_type = SCTP_SNDRCV;
240 		cmh->cmsg_len = len;
241 		*outinfo = *sinfo;
242 	}
243 	SCTP_BUF_LEN(ret) = cmh->cmsg_len;
244 	return (ret);
245 }
246 
247 
248 char *
249 sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
250     int *control_len,
251     struct sctp_sndrcvinfo *sinfo)
252 {
253 	struct sctp_sndrcvinfo *outinfo;
254 	struct cmsghdr *cmh;
255 	char *buf;
256 	int len;
257 	int use_extended = 0;
258 
259 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
260 		/* user does not want the sndrcv ctl */
261 		return (NULL);
262 	}
263 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
264 		use_extended = 1;
265 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
266 	} else {
267 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 	}
269 	SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
270 	if (buf == NULL) {
271 		/* No space */
272 		return (buf);
273 	}
274 	/* We need a CMSG header followed by the struct  */
275 	cmh = (struct cmsghdr *)buf;
276 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
277 	cmh->cmsg_level = IPPROTO_SCTP;
278 	if (use_extended) {
279 		cmh->cmsg_type = SCTP_EXTRCV;
280 		cmh->cmsg_len = len;
281 		memcpy(outinfo, sinfo, len);
282 	} else {
283 		cmh->cmsg_type = SCTP_SNDRCV;
284 		cmh->cmsg_len = len;
285 		*outinfo = *sinfo;
286 	}
287 	*control_len = len;
288 	return (buf);
289 }
290 
291 static void
292 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
293 {
294 	uint32_t gap, i, cumackp1;
295 	int fnd = 0;
296 
297 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
298 		return;
299 	}
300 	cumackp1 = asoc->cumulative_tsn + 1;
301 	if (compare_with_wrap(cumackp1, tsn, MAX_TSN)) {
302 		/*
303 		 * this tsn is behind the cum ack and thus we don't need to
304 		 * worry about it being moved from one to the other.
305 		 */
306 		return;
307 	}
308 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
309 	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
310 		printf("gap:%x tsn:%x\n", gap, tsn);
311 		sctp_print_mapping_array(asoc);
312 #ifdef INVARIANTS
313 		panic("Things are really messed up now!!");
314 #endif
315 	}
316 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
317 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
318 	if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
319 		asoc->highest_tsn_inside_nr_map = tsn;
320 	}
321 	if (tsn == asoc->highest_tsn_inside_map) {
322 		/* We must back down to see what the new highest is */
323 		for (i = tsn - 1; (compare_with_wrap(i, asoc->mapping_array_base_tsn, MAX_TSN) ||
324 		    (i == asoc->mapping_array_base_tsn)); i--) {
325 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
326 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
327 				asoc->highest_tsn_inside_map = i;
328 				fnd = 1;
329 				break;
330 			}
331 		}
332 		if (!fnd) {
333 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
334 		}
335 	}
336 }
337 
338 
339 /*
340  * We are delivering currently from the reassembly queue. We must continue to
341  * deliver until we either: 1) run out of space. 2) run out of sequential
342  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
343  */
344 static void
345 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
346 {
347 	struct sctp_tmit_chunk *chk;
348 	uint16_t nxt_todel;
349 	uint16_t stream_no;
350 	int end = 0;
351 	int cntDel;
352 
353 	struct sctp_queued_to_read *control, *ctl, *ctlat;
354 
355 	if (stcb == NULL)
356 		return;
357 
358 	cntDel = stream_no = 0;
359 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
360 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
361 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
362 		/* socket above is long gone or going.. */
363 abandon:
364 		asoc->fragmented_delivery_inprogress = 0;
365 		chk = TAILQ_FIRST(&asoc->reasmqueue);
366 		while (chk) {
367 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
368 			asoc->size_on_reasm_queue -= chk->send_size;
369 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
370 			/*
371 			 * Lose the data pointer, since its in the socket
372 			 * buffer
373 			 */
374 			if (chk->data) {
375 				sctp_m_freem(chk->data);
376 				chk->data = NULL;
377 			}
378 			/* Now free the address and data */
379 			sctp_free_a_chunk(stcb, chk);
380 			/* sa_ignore FREED_MEMORY */
381 			chk = TAILQ_FIRST(&asoc->reasmqueue);
382 		}
383 		return;
384 	}
385 	SCTP_TCB_LOCK_ASSERT(stcb);
386 	do {
387 		chk = TAILQ_FIRST(&asoc->reasmqueue);
388 		if (chk == NULL) {
389 			return;
390 		}
391 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
392 			/* Can't deliver more :< */
393 			return;
394 		}
395 		stream_no = chk->rec.data.stream_number;
396 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
397 		if (nxt_todel != chk->rec.data.stream_seq &&
398 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
399 			/*
400 			 * Not the next sequence to deliver in its stream OR
401 			 * unordered
402 			 */
403 			return;
404 		}
405 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
406 
407 			control = sctp_build_readq_entry_chk(stcb, chk);
408 			if (control == NULL) {
409 				/* out of memory? */
410 				return;
411 			}
412 			/* save it off for our future deliveries */
413 			stcb->asoc.control_pdapi = control;
414 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
415 				end = 1;
416 			else
417 				end = 0;
418 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
419 			sctp_add_to_readq(stcb->sctp_ep,
420 			    stcb, control, &stcb->sctp_socket->so_rcv, end,
421 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
422 			cntDel++;
423 		} else {
424 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
425 				end = 1;
426 			else
427 				end = 0;
428 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
429 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
430 			    stcb->asoc.control_pdapi,
431 			    chk->data, end, chk->rec.data.TSN_seq,
432 			    &stcb->sctp_socket->so_rcv)) {
433 				/*
434 				 * something is very wrong, either
435 				 * control_pdapi is NULL, or the tail_mbuf
436 				 * is corrupt, or there is a EOM already on
437 				 * the mbuf chain.
438 				 */
439 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
440 					goto abandon;
441 				} else {
442 #ifdef INVARIANTS
443 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
444 						panic("This should not happen control_pdapi NULL?");
445 					}
446 					/* if we did not panic, it was a EOM */
447 					panic("Bad chunking ??");
448 #else
449 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
450 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
451 					}
452 					SCTP_PRINTF("Bad chunking ??\n");
453 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
454 
455 #endif
456 					goto abandon;
457 				}
458 			}
459 			cntDel++;
460 		}
461 		/* pull it we did it */
462 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
463 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
464 			asoc->fragmented_delivery_inprogress = 0;
465 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
466 				asoc->strmin[stream_no].last_sequence_delivered++;
467 			}
468 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
469 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
470 			}
471 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
472 			/*
473 			 * turn the flag back on since we just  delivered
474 			 * yet another one.
475 			 */
476 			asoc->fragmented_delivery_inprogress = 1;
477 		}
478 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
479 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
480 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
481 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
482 
483 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
484 		asoc->size_on_reasm_queue -= chk->send_size;
485 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
486 		/* free up the chk */
487 		chk->data = NULL;
488 		sctp_free_a_chunk(stcb, chk);
489 
490 		if (asoc->fragmented_delivery_inprogress == 0) {
491 			/*
492 			 * Now lets see if we can deliver the next one on
493 			 * the stream
494 			 */
495 			struct sctp_stream_in *strm;
496 
497 			strm = &asoc->strmin[stream_no];
498 			nxt_todel = strm->last_sequence_delivered + 1;
499 			ctl = TAILQ_FIRST(&strm->inqueue);
500 			if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
501 				while (ctl != NULL) {
502 					/* Deliver more if we can. */
503 					if (nxt_todel == ctl->sinfo_ssn) {
504 						ctlat = TAILQ_NEXT(ctl, next);
505 						TAILQ_REMOVE(&strm->inqueue, ctl, next);
506 						asoc->size_on_all_streams -= ctl->length;
507 						sctp_ucount_decr(asoc->cnt_on_all_streams);
508 						strm->last_sequence_delivered++;
509 						sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
510 						sctp_add_to_readq(stcb->sctp_ep, stcb,
511 						    ctl,
512 						    &stcb->sctp_socket->so_rcv, 1,
513 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
514 						ctl = ctlat;
515 					} else {
516 						break;
517 					}
518 					nxt_todel = strm->last_sequence_delivered + 1;
519 				}
520 			}
521 			break;
522 		}
523 		/* sa_ignore FREED_MEMORY */
524 		chk = TAILQ_FIRST(&asoc->reasmqueue);
525 	} while (chk);
526 }
527 
528 /*
529  * Queue the chunk either right into the socket buffer if it is the next one
530  * to go OR put it in the correct place in the delivery queue.  If we do
531  * append to the so_buf, keep doing so until we are out of order. One big
532  * question still remains, what to do when the socket buffer is FULL??
533  */
534 static void
535 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
536     struct sctp_queued_to_read *control, int *abort_flag)
537 {
538 	/*
539 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
540 	 * all the data in one stream this could happen quite rapidly. One
541 	 * could use the TSN to keep track of things, but this scheme breaks
542 	 * down in the other type of stream useage that could occur. Send a
543 	 * single msg to stream 0, send 4Billion messages to stream 1, now
544 	 * send a message to stream 0. You have a situation where the TSN
545 	 * has wrapped but not in the stream. Is this worth worrying about
546 	 * or should we just change our queue sort at the bottom to be by
547 	 * TSN.
548 	 *
549 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
550 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
551 	 * assignment this could happen... and I don't see how this would be
552 	 * a violation. So for now I am undecided an will leave the sort by
553 	 * SSN alone. Maybe a hybred approach is the answer
554 	 *
555 	 */
556 	struct sctp_stream_in *strm;
557 	struct sctp_queued_to_read *at;
558 	int queue_needed;
559 	uint16_t nxt_todel;
560 	struct mbuf *oper;
561 
562 	queue_needed = 1;
563 	asoc->size_on_all_streams += control->length;
564 	sctp_ucount_incr(asoc->cnt_on_all_streams);
565 	strm = &asoc->strmin[control->sinfo_stream];
566 	nxt_todel = strm->last_sequence_delivered + 1;
567 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
568 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
569 	}
570 	SCTPDBG(SCTP_DEBUG_INDATA1,
571 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
572 	    (uint32_t) control->sinfo_stream,
573 	    (uint32_t) strm->last_sequence_delivered,
574 	    (uint32_t) nxt_todel);
575 	if (compare_with_wrap(strm->last_sequence_delivered,
576 	    control->sinfo_ssn, MAX_SEQ) ||
577 	    (strm->last_sequence_delivered == control->sinfo_ssn)) {
578 		/* The incoming sseq is behind where we last delivered? */
579 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
580 		    control->sinfo_ssn, strm->last_sequence_delivered);
581 protocol_error:
582 		/*
583 		 * throw it in the stream so it gets cleaned up in
584 		 * association destruction
585 		 */
586 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
587 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
588 		    0, M_DONTWAIT, 1, MT_DATA);
589 		if (oper) {
590 			struct sctp_paramhdr *ph;
591 			uint32_t *ippp;
592 
593 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
594 			    (sizeof(uint32_t) * 3);
595 			ph = mtod(oper, struct sctp_paramhdr *);
596 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
597 			ph->param_length = htons(SCTP_BUF_LEN(oper));
598 			ippp = (uint32_t *) (ph + 1);
599 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
600 			ippp++;
601 			*ippp = control->sinfo_tsn;
602 			ippp++;
603 			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
604 		}
605 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
606 		sctp_abort_an_association(stcb->sctp_ep, stcb,
607 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
608 
609 		*abort_flag = 1;
610 		return;
611 
612 	}
613 	if (nxt_todel == control->sinfo_ssn) {
614 		/* can be delivered right away? */
615 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
616 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
617 		}
618 		/* EY it wont be queued if it could be delivered directly */
619 		queue_needed = 0;
620 		asoc->size_on_all_streams -= control->length;
621 		sctp_ucount_decr(asoc->cnt_on_all_streams);
622 		strm->last_sequence_delivered++;
623 
624 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
625 		sctp_add_to_readq(stcb->sctp_ep, stcb,
626 		    control,
627 		    &stcb->sctp_socket->so_rcv, 1,
628 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
629 		control = TAILQ_FIRST(&strm->inqueue);
630 		while (control != NULL) {
631 			/* all delivered */
632 			nxt_todel = strm->last_sequence_delivered + 1;
633 			if (nxt_todel == control->sinfo_ssn) {
634 				at = TAILQ_NEXT(control, next);
635 				TAILQ_REMOVE(&strm->inqueue, control, next);
636 				asoc->size_on_all_streams -= control->length;
637 				sctp_ucount_decr(asoc->cnt_on_all_streams);
638 				strm->last_sequence_delivered++;
639 				/*
640 				 * We ignore the return of deliver_data here
641 				 * since we always can hold the chunk on the
642 				 * d-queue. And we have a finite number that
643 				 * can be delivered from the strq.
644 				 */
645 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
646 					sctp_log_strm_del(control, NULL,
647 					    SCTP_STR_LOG_FROM_IMMED_DEL);
648 				}
649 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
650 				sctp_add_to_readq(stcb->sctp_ep, stcb,
651 				    control,
652 				    &stcb->sctp_socket->so_rcv, 1,
653 				    SCTP_READ_LOCK_NOT_HELD,
654 				    SCTP_SO_NOT_LOCKED);
655 				control = at;
656 				continue;
657 			}
658 			break;
659 		}
660 	}
661 	if (queue_needed) {
662 		/*
663 		 * Ok, we did not deliver this guy, find the correct place
664 		 * to put it on the queue.
665 		 */
666 		if ((compare_with_wrap(asoc->cumulative_tsn,
667 		    control->sinfo_tsn, MAX_TSN)) ||
668 		    (control->sinfo_tsn == asoc->cumulative_tsn)) {
669 			goto protocol_error;
670 		}
671 		if (TAILQ_EMPTY(&strm->inqueue)) {
672 			/* Empty queue */
673 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
674 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
675 			}
676 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
677 		} else {
678 			TAILQ_FOREACH(at, &strm->inqueue, next) {
679 				if (compare_with_wrap(at->sinfo_ssn,
680 				    control->sinfo_ssn, MAX_SEQ)) {
681 					/*
682 					 * one in queue is bigger than the
683 					 * new one, insert before this one
684 					 */
685 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
686 						sctp_log_strm_del(control, at,
687 						    SCTP_STR_LOG_FROM_INSERT_MD);
688 					}
689 					TAILQ_INSERT_BEFORE(at, control, next);
690 					break;
691 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
692 					/*
693 					 * Gak, He sent me a duplicate str
694 					 * seq number
695 					 */
696 					/*
697 					 * foo bar, I guess I will just free
698 					 * this new guy, should we abort
699 					 * too? FIX ME MAYBE? Or it COULD be
700 					 * that the SSN's have wrapped.
701 					 * Maybe I should compare to TSN
702 					 * somehow... sigh for now just blow
703 					 * away the chunk!
704 					 */
705 
706 					if (control->data)
707 						sctp_m_freem(control->data);
708 					control->data = NULL;
709 					asoc->size_on_all_streams -= control->length;
710 					sctp_ucount_decr(asoc->cnt_on_all_streams);
711 					if (control->whoFrom)
712 						sctp_free_remote_addr(control->whoFrom);
713 					control->whoFrom = NULL;
714 					sctp_free_a_readq(stcb, control);
715 					return;
716 				} else {
717 					if (TAILQ_NEXT(at, next) == NULL) {
718 						/*
719 						 * We are at the end, insert
720 						 * it after this one
721 						 */
722 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
723 							sctp_log_strm_del(control, at,
724 							    SCTP_STR_LOG_FROM_INSERT_TL);
725 						}
726 						TAILQ_INSERT_AFTER(&strm->inqueue,
727 						    at, control, next);
728 						break;
729 					}
730 				}
731 			}
732 		}
733 	}
734 }
735 
736 /*
737  * Returns two things: You get the total size of the deliverable parts of the
738  * first fragmented message on the reassembly queue. And you get a 1 back if
739  * all of the message is ready or a 0 back if the message is still incomplete
740  */
741 static int
742 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
743 {
744 	struct sctp_tmit_chunk *chk;
745 	uint32_t tsn;
746 
747 	*t_size = 0;
748 	chk = TAILQ_FIRST(&asoc->reasmqueue);
749 	if (chk == NULL) {
750 		/* nothing on the queue */
751 		return (0);
752 	}
753 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
754 		/* Not a first on the queue */
755 		return (0);
756 	}
757 	tsn = chk->rec.data.TSN_seq;
758 	while (chk) {
759 		if (tsn != chk->rec.data.TSN_seq) {
760 			return (0);
761 		}
762 		*t_size += chk->send_size;
763 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
764 			return (1);
765 		}
766 		tsn++;
767 		chk = TAILQ_NEXT(chk, sctp_next);
768 	}
769 	return (0);
770 }
771 
772 static void
773 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
774 {
775 	struct sctp_tmit_chunk *chk;
776 	uint16_t nxt_todel;
777 	uint32_t tsize, pd_point;
778 
779 doit_again:
780 	chk = TAILQ_FIRST(&asoc->reasmqueue);
781 	if (chk == NULL) {
782 		/* Huh? */
783 		asoc->size_on_reasm_queue = 0;
784 		asoc->cnt_on_reasm_queue = 0;
785 		return;
786 	}
787 	if (asoc->fragmented_delivery_inprogress == 0) {
788 		nxt_todel =
789 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
790 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
791 		    (nxt_todel == chk->rec.data.stream_seq ||
792 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
793 			/*
794 			 * Yep the first one is here and its ok to deliver
795 			 * but should we?
796 			 */
797 			if (stcb->sctp_socket) {
798 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
799 				    stcb->sctp_ep->partial_delivery_point);
800 			} else {
801 				pd_point = stcb->sctp_ep->partial_delivery_point;
802 			}
803 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
804 
805 				/*
806 				 * Yes, we setup to start reception, by
807 				 * backing down the TSN just in case we
808 				 * can't deliver. If we
809 				 */
810 				asoc->fragmented_delivery_inprogress = 1;
811 				asoc->tsn_last_delivered =
812 				    chk->rec.data.TSN_seq - 1;
813 				asoc->str_of_pdapi =
814 				    chk->rec.data.stream_number;
815 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
816 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
817 				asoc->fragment_flags = chk->rec.data.rcv_flags;
818 				sctp_service_reassembly(stcb, asoc);
819 			}
820 		}
821 	} else {
822 		/*
823 		 * Service re-assembly will deliver stream data queued at
824 		 * the end of fragmented delivery.. but it wont know to go
825 		 * back and call itself again... we do that here with the
826 		 * got doit_again
827 		 */
828 		sctp_service_reassembly(stcb, asoc);
829 		if (asoc->fragmented_delivery_inprogress == 0) {
830 			/*
831 			 * finished our Fragmented delivery, could be more
832 			 * waiting?
833 			 */
834 			goto doit_again;
835 		}
836 	}
837 }
838 
839 /*
840  * Dump onto the re-assembly queue, in its proper place. After dumping on the
841  * queue, see if anthing can be delivered. If so pull it off (or as much as
842  * we can. If we run out of space then we must dump what we can and set the
843  * appropriate flag to say we queued what we could.
844  */
845 static void
846 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
847     struct sctp_tmit_chunk *chk, int *abort_flag)
848 {
849 	struct mbuf *oper;
850 	uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
851 	u_char last_flags;
852 	struct sctp_tmit_chunk *at, *prev, *next;
853 
854 	prev = next = NULL;
855 	cum_ackp1 = asoc->tsn_last_delivered + 1;
856 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
857 		/* This is the first one on the queue */
858 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
859 		/*
860 		 * we do not check for delivery of anything when only one
861 		 * fragment is here
862 		 */
863 		asoc->size_on_reasm_queue = chk->send_size;
864 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
865 		if (chk->rec.data.TSN_seq == cum_ackp1) {
866 			if (asoc->fragmented_delivery_inprogress == 0 &&
867 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
868 			    SCTP_DATA_FIRST_FRAG) {
869 				/*
870 				 * An empty queue, no delivery inprogress,
871 				 * we hit the next one and it does NOT have
872 				 * a FIRST fragment mark.
873 				 */
874 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
875 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
876 				    0, M_DONTWAIT, 1, MT_DATA);
877 
878 				if (oper) {
879 					struct sctp_paramhdr *ph;
880 					uint32_t *ippp;
881 
882 					SCTP_BUF_LEN(oper) =
883 					    sizeof(struct sctp_paramhdr) +
884 					    (sizeof(uint32_t) * 3);
885 					ph = mtod(oper, struct sctp_paramhdr *);
886 					ph->param_type =
887 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
888 					ph->param_length = htons(SCTP_BUF_LEN(oper));
889 					ippp = (uint32_t *) (ph + 1);
890 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
891 					ippp++;
892 					*ippp = chk->rec.data.TSN_seq;
893 					ippp++;
894 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
895 
896 				}
897 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
898 				sctp_abort_an_association(stcb->sctp_ep, stcb,
899 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
900 				*abort_flag = 1;
901 			} else if (asoc->fragmented_delivery_inprogress &&
902 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
903 				/*
904 				 * We are doing a partial delivery and the
905 				 * NEXT chunk MUST be either the LAST or
906 				 * MIDDLE fragment NOT a FIRST
907 				 */
908 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
909 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
910 				    0, M_DONTWAIT, 1, MT_DATA);
911 				if (oper) {
912 					struct sctp_paramhdr *ph;
913 					uint32_t *ippp;
914 
915 					SCTP_BUF_LEN(oper) =
916 					    sizeof(struct sctp_paramhdr) +
917 					    (3 * sizeof(uint32_t));
918 					ph = mtod(oper, struct sctp_paramhdr *);
919 					ph->param_type =
920 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
921 					ph->param_length = htons(SCTP_BUF_LEN(oper));
922 					ippp = (uint32_t *) (ph + 1);
923 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
924 					ippp++;
925 					*ippp = chk->rec.data.TSN_seq;
926 					ippp++;
927 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
928 				}
929 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
930 				sctp_abort_an_association(stcb->sctp_ep, stcb,
931 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
932 				*abort_flag = 1;
933 			} else if (asoc->fragmented_delivery_inprogress) {
934 				/*
935 				 * Here we are ok with a MIDDLE or LAST
936 				 * piece
937 				 */
938 				if (chk->rec.data.stream_number !=
939 				    asoc->str_of_pdapi) {
940 					/* Got to be the right STR No */
941 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
942 					    chk->rec.data.stream_number,
943 					    asoc->str_of_pdapi);
944 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
945 					    0, M_DONTWAIT, 1, MT_DATA);
946 					if (oper) {
947 						struct sctp_paramhdr *ph;
948 						uint32_t *ippp;
949 
950 						SCTP_BUF_LEN(oper) =
951 						    sizeof(struct sctp_paramhdr) +
952 						    (sizeof(uint32_t) * 3);
953 						ph = mtod(oper,
954 						    struct sctp_paramhdr *);
955 						ph->param_type =
956 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
957 						ph->param_length =
958 						    htons(SCTP_BUF_LEN(oper));
959 						ippp = (uint32_t *) (ph + 1);
960 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
961 						ippp++;
962 						*ippp = chk->rec.data.TSN_seq;
963 						ippp++;
964 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
965 					}
966 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
967 					sctp_abort_an_association(stcb->sctp_ep,
968 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
969 					*abort_flag = 1;
970 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
971 					    SCTP_DATA_UNORDERED &&
972 				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
973 					/* Got to be the right STR Seq */
974 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
975 					    chk->rec.data.stream_seq,
976 					    asoc->ssn_of_pdapi);
977 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
978 					    0, M_DONTWAIT, 1, MT_DATA);
979 					if (oper) {
980 						struct sctp_paramhdr *ph;
981 						uint32_t *ippp;
982 
983 						SCTP_BUF_LEN(oper) =
984 						    sizeof(struct sctp_paramhdr) +
985 						    (3 * sizeof(uint32_t));
986 						ph = mtod(oper,
987 						    struct sctp_paramhdr *);
988 						ph->param_type =
989 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
990 						ph->param_length =
991 						    htons(SCTP_BUF_LEN(oper));
992 						ippp = (uint32_t *) (ph + 1);
993 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
994 						ippp++;
995 						*ippp = chk->rec.data.TSN_seq;
996 						ippp++;
997 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
998 
999 					}
1000 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
1001 					sctp_abort_an_association(stcb->sctp_ep,
1002 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1003 					*abort_flag = 1;
1004 				}
1005 			}
1006 		}
1007 		return;
1008 	}
1009 	/* Find its place */
1010 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1011 		if (compare_with_wrap(at->rec.data.TSN_seq,
1012 		    chk->rec.data.TSN_seq, MAX_TSN)) {
1013 			/*
1014 			 * one in queue is bigger than the new one, insert
1015 			 * before this one
1016 			 */
1017 			/* A check */
1018 			asoc->size_on_reasm_queue += chk->send_size;
1019 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1020 			next = at;
1021 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1022 			break;
1023 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1024 			/* Gak, He sent me a duplicate str seq number */
1025 			/*
1026 			 * foo bar, I guess I will just free this new guy,
1027 			 * should we abort too? FIX ME MAYBE? Or it COULD be
1028 			 * that the SSN's have wrapped. Maybe I should
1029 			 * compare to TSN somehow... sigh for now just blow
1030 			 * away the chunk!
1031 			 */
1032 			if (chk->data) {
1033 				sctp_m_freem(chk->data);
1034 				chk->data = NULL;
1035 			}
1036 			sctp_free_a_chunk(stcb, chk);
1037 			return;
1038 		} else {
1039 			last_flags = at->rec.data.rcv_flags;
1040 			last_tsn = at->rec.data.TSN_seq;
1041 			prev = at;
1042 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1043 				/*
1044 				 * We are at the end, insert it after this
1045 				 * one
1046 				 */
1047 				/* check it first */
1048 				asoc->size_on_reasm_queue += chk->send_size;
1049 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1050 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1051 				break;
1052 			}
1053 		}
1054 	}
1055 	/* Now the audits */
1056 	if (prev) {
1057 		prev_tsn = chk->rec.data.TSN_seq - 1;
1058 		if (prev_tsn == prev->rec.data.TSN_seq) {
1059 			/*
1060 			 * Ok the one I am dropping onto the end is the
1061 			 * NEXT. A bit of valdiation here.
1062 			 */
1063 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1064 			    SCTP_DATA_FIRST_FRAG ||
1065 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1066 			    SCTP_DATA_MIDDLE_FRAG) {
1067 				/*
1068 				 * Insert chk MUST be a MIDDLE or LAST
1069 				 * fragment
1070 				 */
1071 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1072 				    SCTP_DATA_FIRST_FRAG) {
1073 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1074 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1075 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1076 					    0, M_DONTWAIT, 1, MT_DATA);
1077 					if (oper) {
1078 						struct sctp_paramhdr *ph;
1079 						uint32_t *ippp;
1080 
1081 						SCTP_BUF_LEN(oper) =
1082 						    sizeof(struct sctp_paramhdr) +
1083 						    (3 * sizeof(uint32_t));
1084 						ph = mtod(oper,
1085 						    struct sctp_paramhdr *);
1086 						ph->param_type =
1087 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1088 						ph->param_length =
1089 						    htons(SCTP_BUF_LEN(oper));
1090 						ippp = (uint32_t *) (ph + 1);
1091 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1092 						ippp++;
1093 						*ippp = chk->rec.data.TSN_seq;
1094 						ippp++;
1095 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1096 
1097 					}
1098 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1099 					sctp_abort_an_association(stcb->sctp_ep,
1100 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1101 					*abort_flag = 1;
1102 					return;
1103 				}
1104 				if (chk->rec.data.stream_number !=
1105 				    prev->rec.data.stream_number) {
1106 					/*
1107 					 * Huh, need the correct STR here,
1108 					 * they must be the same.
1109 					 */
1110 					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1111 					    chk->rec.data.stream_number,
1112 					    prev->rec.data.stream_number);
1113 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1114 					    0, M_DONTWAIT, 1, MT_DATA);
1115 					if (oper) {
1116 						struct sctp_paramhdr *ph;
1117 						uint32_t *ippp;
1118 
1119 						SCTP_BUF_LEN(oper) =
1120 						    sizeof(struct sctp_paramhdr) +
1121 						    (3 * sizeof(uint32_t));
1122 						ph = mtod(oper,
1123 						    struct sctp_paramhdr *);
1124 						ph->param_type =
1125 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1126 						ph->param_length =
1127 						    htons(SCTP_BUF_LEN(oper));
1128 						ippp = (uint32_t *) (ph + 1);
1129 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1130 						ippp++;
1131 						*ippp = chk->rec.data.TSN_seq;
1132 						ippp++;
1133 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1134 					}
1135 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1136 					sctp_abort_an_association(stcb->sctp_ep,
1137 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1138 
1139 					*abort_flag = 1;
1140 					return;
1141 				}
1142 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1143 				    chk->rec.data.stream_seq !=
1144 				    prev->rec.data.stream_seq) {
1145 					/*
1146 					 * Huh, need the correct STR here,
1147 					 * they must be the same.
1148 					 */
1149 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1150 					    chk->rec.data.stream_seq,
1151 					    prev->rec.data.stream_seq);
1152 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1153 					    0, M_DONTWAIT, 1, MT_DATA);
1154 					if (oper) {
1155 						struct sctp_paramhdr *ph;
1156 						uint32_t *ippp;
1157 
1158 						SCTP_BUF_LEN(oper) =
1159 						    sizeof(struct sctp_paramhdr) +
1160 						    (3 * sizeof(uint32_t));
1161 						ph = mtod(oper,
1162 						    struct sctp_paramhdr *);
1163 						ph->param_type =
1164 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1165 						ph->param_length =
1166 						    htons(SCTP_BUF_LEN(oper));
1167 						ippp = (uint32_t *) (ph + 1);
1168 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1169 						ippp++;
1170 						*ippp = chk->rec.data.TSN_seq;
1171 						ippp++;
1172 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1173 					}
1174 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1175 					sctp_abort_an_association(stcb->sctp_ep,
1176 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1177 
1178 					*abort_flag = 1;
1179 					return;
1180 				}
1181 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1182 			    SCTP_DATA_LAST_FRAG) {
1183 				/* Insert chk MUST be a FIRST */
1184 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1185 				    SCTP_DATA_FIRST_FRAG) {
1186 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1187 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1188 					    0, M_DONTWAIT, 1, MT_DATA);
1189 					if (oper) {
1190 						struct sctp_paramhdr *ph;
1191 						uint32_t *ippp;
1192 
1193 						SCTP_BUF_LEN(oper) =
1194 						    sizeof(struct sctp_paramhdr) +
1195 						    (3 * sizeof(uint32_t));
1196 						ph = mtod(oper,
1197 						    struct sctp_paramhdr *);
1198 						ph->param_type =
1199 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1200 						ph->param_length =
1201 						    htons(SCTP_BUF_LEN(oper));
1202 						ippp = (uint32_t *) (ph + 1);
1203 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1204 						ippp++;
1205 						*ippp = chk->rec.data.TSN_seq;
1206 						ippp++;
1207 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1208 
1209 					}
1210 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1211 					sctp_abort_an_association(stcb->sctp_ep,
1212 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1213 
1214 					*abort_flag = 1;
1215 					return;
1216 				}
1217 			}
1218 		}
1219 	}
1220 	if (next) {
1221 		post_tsn = chk->rec.data.TSN_seq + 1;
1222 		if (post_tsn == next->rec.data.TSN_seq) {
1223 			/*
1224 			 * Ok the one I am inserting ahead of is my NEXT
1225 			 * one. A bit of valdiation here.
1226 			 */
1227 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1228 				/* Insert chk MUST be a last fragment */
1229 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1230 				    != SCTP_DATA_LAST_FRAG) {
1231 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1232 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1233 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1234 					    0, M_DONTWAIT, 1, MT_DATA);
1235 					if (oper) {
1236 						struct sctp_paramhdr *ph;
1237 						uint32_t *ippp;
1238 
1239 						SCTP_BUF_LEN(oper) =
1240 						    sizeof(struct sctp_paramhdr) +
1241 						    (3 * sizeof(uint32_t));
1242 						ph = mtod(oper,
1243 						    struct sctp_paramhdr *);
1244 						ph->param_type =
1245 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1246 						ph->param_length =
1247 						    htons(SCTP_BUF_LEN(oper));
1248 						ippp = (uint32_t *) (ph + 1);
1249 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1250 						ippp++;
1251 						*ippp = chk->rec.data.TSN_seq;
1252 						ippp++;
1253 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1254 					}
1255 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1256 					sctp_abort_an_association(stcb->sctp_ep,
1257 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1258 
1259 					*abort_flag = 1;
1260 					return;
1261 				}
1262 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1263 				    SCTP_DATA_MIDDLE_FRAG ||
1264 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1265 			    SCTP_DATA_LAST_FRAG) {
1266 				/*
1267 				 * Insert chk CAN be MIDDLE or FIRST NOT
1268 				 * LAST
1269 				 */
1270 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1271 				    SCTP_DATA_LAST_FRAG) {
1272 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1273 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1274 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1275 					    0, M_DONTWAIT, 1, MT_DATA);
1276 					if (oper) {
1277 						struct sctp_paramhdr *ph;
1278 						uint32_t *ippp;
1279 
1280 						SCTP_BUF_LEN(oper) =
1281 						    sizeof(struct sctp_paramhdr) +
1282 						    (3 * sizeof(uint32_t));
1283 						ph = mtod(oper,
1284 						    struct sctp_paramhdr *);
1285 						ph->param_type =
1286 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1287 						ph->param_length =
1288 						    htons(SCTP_BUF_LEN(oper));
1289 						ippp = (uint32_t *) (ph + 1);
1290 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1291 						ippp++;
1292 						*ippp = chk->rec.data.TSN_seq;
1293 						ippp++;
1294 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1295 
1296 					}
1297 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1298 					sctp_abort_an_association(stcb->sctp_ep,
1299 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1300 
1301 					*abort_flag = 1;
1302 					return;
1303 				}
1304 				if (chk->rec.data.stream_number !=
1305 				    next->rec.data.stream_number) {
1306 					/*
1307 					 * Huh, need the correct STR here,
1308 					 * they must be the same.
1309 					 */
1310 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1311 					    chk->rec.data.stream_number,
1312 					    next->rec.data.stream_number);
1313 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1314 					    0, M_DONTWAIT, 1, MT_DATA);
1315 					if (oper) {
1316 						struct sctp_paramhdr *ph;
1317 						uint32_t *ippp;
1318 
1319 						SCTP_BUF_LEN(oper) =
1320 						    sizeof(struct sctp_paramhdr) +
1321 						    (3 * sizeof(uint32_t));
1322 						ph = mtod(oper,
1323 						    struct sctp_paramhdr *);
1324 						ph->param_type =
1325 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1326 						ph->param_length =
1327 						    htons(SCTP_BUF_LEN(oper));
1328 						ippp = (uint32_t *) (ph + 1);
1329 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1330 						ippp++;
1331 						*ippp = chk->rec.data.TSN_seq;
1332 						ippp++;
1333 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1334 
1335 					}
1336 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1337 					sctp_abort_an_association(stcb->sctp_ep,
1338 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1339 
1340 					*abort_flag = 1;
1341 					return;
1342 				}
1343 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1344 				    chk->rec.data.stream_seq !=
1345 				    next->rec.data.stream_seq) {
1346 					/*
1347 					 * Huh, need the correct STR here,
1348 					 * they must be the same.
1349 					 */
1350 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1351 					    chk->rec.data.stream_seq,
1352 					    next->rec.data.stream_seq);
1353 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1354 					    0, M_DONTWAIT, 1, MT_DATA);
1355 					if (oper) {
1356 						struct sctp_paramhdr *ph;
1357 						uint32_t *ippp;
1358 
1359 						SCTP_BUF_LEN(oper) =
1360 						    sizeof(struct sctp_paramhdr) +
1361 						    (3 * sizeof(uint32_t));
1362 						ph = mtod(oper,
1363 						    struct sctp_paramhdr *);
1364 						ph->param_type =
1365 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1366 						ph->param_length =
1367 						    htons(SCTP_BUF_LEN(oper));
1368 						ippp = (uint32_t *) (ph + 1);
1369 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1370 						ippp++;
1371 						*ippp = chk->rec.data.TSN_seq;
1372 						ippp++;
1373 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1374 					}
1375 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1376 					sctp_abort_an_association(stcb->sctp_ep,
1377 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1378 
1379 					*abort_flag = 1;
1380 					return;
1381 				}
1382 			}
1383 		}
1384 	}
1385 	/* Do we need to do some delivery? check */
1386 	sctp_deliver_reasm_check(stcb, asoc);
1387 }
1388 
1389 /*
1390  * This is an unfortunate routine. It checks to make sure a evil guy is not
1391  * stuffing us full of bad packet fragments. A broken peer could also do this
1392  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1393  * :< more cycles.
1394  */
1395 static int
1396 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1397     uint32_t TSN_seq)
1398 {
1399 	struct sctp_tmit_chunk *at;
1400 	uint32_t tsn_est;
1401 
1402 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1403 		if (compare_with_wrap(TSN_seq,
1404 		    at->rec.data.TSN_seq, MAX_TSN)) {
1405 			/* is it one bigger? */
1406 			tsn_est = at->rec.data.TSN_seq + 1;
1407 			if (tsn_est == TSN_seq) {
1408 				/* yep. It better be a last then */
1409 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1410 				    SCTP_DATA_LAST_FRAG) {
1411 					/*
1412 					 * Ok this guy belongs next to a guy
1413 					 * that is NOT last, it should be a
1414 					 * middle/last, not a complete
1415 					 * chunk.
1416 					 */
1417 					return (1);
1418 				} else {
1419 					/*
1420 					 * This guy is ok since its a LAST
1421 					 * and the new chunk is a fully
1422 					 * self- contained one.
1423 					 */
1424 					return (0);
1425 				}
1426 			}
1427 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1428 			/* Software error since I have a dup? */
1429 			return (1);
1430 		} else {
1431 			/*
1432 			 * Ok, 'at' is larger than new chunk but does it
1433 			 * need to be right before it.
1434 			 */
1435 			tsn_est = TSN_seq + 1;
1436 			if (tsn_est == at->rec.data.TSN_seq) {
1437 				/* Yep, It better be a first */
1438 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1439 				    SCTP_DATA_FIRST_FRAG) {
1440 					return (1);
1441 				} else {
1442 					return (0);
1443 				}
1444 			}
1445 		}
1446 	}
1447 	return (0);
1448 }
1449 
1450 
1451 static int
1452 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1453     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1454     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1455     int *break_flag, int last_chunk)
1456 {
1457 	/* Process a data chunk */
1458 	/* struct sctp_tmit_chunk *chk; */
1459 	struct sctp_tmit_chunk *chk;
1460 	uint32_t tsn, gap;
1461 	struct mbuf *dmbuf;
1462 	int indx, the_len;
1463 	int need_reasm_check = 0;
1464 	uint16_t strmno, strmseq;
1465 	struct mbuf *oper;
1466 	struct sctp_queued_to_read *control;
1467 	int ordered;
1468 	uint32_t protocol_id;
1469 	uint8_t chunk_flags;
1470 	struct sctp_stream_reset_list *liste;
1471 
1472 	chk = NULL;
1473 	tsn = ntohl(ch->dp.tsn);
1474 	chunk_flags = ch->ch.chunk_flags;
1475 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1476 		asoc->send_sack = 1;
1477 	}
1478 	protocol_id = ch->dp.protocol_id;
1479 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1480 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1481 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1482 	}
1483 	if (stcb == NULL) {
1484 		return (0);
1485 	}
1486 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1487 	if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1488 	    asoc->cumulative_tsn == tsn) {
1489 		/* It is a duplicate */
1490 		SCTP_STAT_INCR(sctps_recvdupdata);
1491 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1492 			/* Record a dup for the next outbound sack */
1493 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1494 			asoc->numduptsns++;
1495 		}
1496 		asoc->send_sack = 1;
1497 		return (0);
1498 	}
1499 	/* Calculate the number of TSN's between the base and this TSN */
1500 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1501 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1502 		/* Can't hold the bit in the mapping at max array, toss it */
1503 		return (0);
1504 	}
1505 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1506 		SCTP_TCB_LOCK_ASSERT(stcb);
1507 		if (sctp_expand_mapping_array(asoc, gap)) {
1508 			/* Can't expand, drop it */
1509 			return (0);
1510 		}
1511 	}
1512 	if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1513 		*high_tsn = tsn;
1514 	}
1515 	/* See if we have received this one already */
1516 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1517 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1518 		SCTP_STAT_INCR(sctps_recvdupdata);
1519 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1520 			/* Record a dup for the next outbound sack */
1521 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1522 			asoc->numduptsns++;
1523 		}
1524 		asoc->send_sack = 1;
1525 		return (0);
1526 	}
1527 	/*
1528 	 * Check to see about the GONE flag, duplicates would cause a sack
1529 	 * to be sent up above
1530 	 */
1531 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1532 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1533 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1534 	    ) {
1535 		/*
1536 		 * wait a minute, this guy is gone, there is no longer a
1537 		 * receiver. Send peer an ABORT!
1538 		 */
1539 		struct mbuf *op_err;
1540 
1541 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1542 		sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1543 		*abort_flag = 1;
1544 		return (0);
1545 	}
1546 	/*
1547 	 * Now before going further we see if there is room. If NOT then we
1548 	 * MAY let one through only IF this TSN is the one we are waiting
1549 	 * for on a partial delivery API.
1550 	 */
1551 
1552 	/* now do the tests */
1553 	if (((asoc->cnt_on_all_streams +
1554 	    asoc->cnt_on_reasm_queue +
1555 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1556 	    (((int)asoc->my_rwnd) <= 0)) {
1557 		/*
1558 		 * When we have NO room in the rwnd we check to make sure
1559 		 * the reader is doing its job...
1560 		 */
1561 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1562 			/* some to read, wake-up */
1563 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1564 			struct socket *so;
1565 
1566 			so = SCTP_INP_SO(stcb->sctp_ep);
1567 			atomic_add_int(&stcb->asoc.refcnt, 1);
1568 			SCTP_TCB_UNLOCK(stcb);
1569 			SCTP_SOCKET_LOCK(so, 1);
1570 			SCTP_TCB_LOCK(stcb);
1571 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1572 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1573 				/* assoc was freed while we were unlocked */
1574 				SCTP_SOCKET_UNLOCK(so, 1);
1575 				return (0);
1576 			}
1577 #endif
1578 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1579 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1580 			SCTP_SOCKET_UNLOCK(so, 1);
1581 #endif
1582 		}
1583 		/* now is it in the mapping array of what we have accepted? */
1584 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
1585 		    compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1586 			/* Nope not in the valid range dump it */
1587 			sctp_set_rwnd(stcb, asoc);
1588 			if ((asoc->cnt_on_all_streams +
1589 			    asoc->cnt_on_reasm_queue +
1590 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1591 				SCTP_STAT_INCR(sctps_datadropchklmt);
1592 			} else {
1593 				SCTP_STAT_INCR(sctps_datadroprwnd);
1594 			}
1595 			indx = *break_flag;
1596 			*break_flag = 1;
1597 			return (0);
1598 		}
1599 	}
1600 	strmno = ntohs(ch->dp.stream_id);
1601 	if (strmno >= asoc->streamincnt) {
1602 		struct sctp_paramhdr *phdr;
1603 		struct mbuf *mb;
1604 
1605 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1606 		    0, M_DONTWAIT, 1, MT_DATA);
1607 		if (mb != NULL) {
1608 			/* add some space up front so prepend will work well */
1609 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1610 			phdr = mtod(mb, struct sctp_paramhdr *);
1611 			/*
1612 			 * Error causes are just param's and this one has
1613 			 * two back to back phdr, one with the error type
1614 			 * and size, the other with the streamid and a rsvd
1615 			 */
1616 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1617 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1618 			phdr->param_length =
1619 			    htons(sizeof(struct sctp_paramhdr) * 2);
1620 			phdr++;
1621 			/* We insert the stream in the type field */
1622 			phdr->param_type = ch->dp.stream_id;
1623 			/* And set the length to 0 for the rsvd field */
1624 			phdr->param_length = 0;
1625 			sctp_queue_op_err(stcb, mb);
1626 		}
1627 		SCTP_STAT_INCR(sctps_badsid);
1628 		SCTP_TCB_LOCK_ASSERT(stcb);
1629 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1630 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1631 			asoc->highest_tsn_inside_nr_map = tsn;
1632 		}
1633 		if (tsn == (asoc->cumulative_tsn + 1)) {
1634 			/* Update cum-ack */
1635 			asoc->cumulative_tsn = tsn;
1636 		}
1637 		return (0);
1638 	}
1639 	/*
1640 	 * Before we continue lets validate that we are not being fooled by
1641 	 * an evil attacker. We can only have 4k chunks based on our TSN
1642 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1643 	 * way our stream sequence numbers could have wrapped. We of course
1644 	 * only validate the FIRST fragment so the bit must be set.
1645 	 */
1646 	strmseq = ntohs(ch->dp.stream_sequence);
1647 #ifdef SCTP_ASOCLOG_OF_TSNS
1648 	SCTP_TCB_LOCK_ASSERT(stcb);
1649 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1650 		asoc->tsn_in_at = 0;
1651 		asoc->tsn_in_wrapped = 1;
1652 	}
1653 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1654 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1655 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1656 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1657 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1658 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1659 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1660 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1661 	asoc->tsn_in_at++;
1662 #endif
1663 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1664 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1665 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1666 	    (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1667 	    strmseq, MAX_SEQ) ||
1668 	    asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1669 		/* The incoming sseq is behind where we last delivered? */
1670 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1671 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1672 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1673 		    0, M_DONTWAIT, 1, MT_DATA);
1674 		if (oper) {
1675 			struct sctp_paramhdr *ph;
1676 			uint32_t *ippp;
1677 
1678 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1679 			    (3 * sizeof(uint32_t));
1680 			ph = mtod(oper, struct sctp_paramhdr *);
1681 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1682 			ph->param_length = htons(SCTP_BUF_LEN(oper));
1683 			ippp = (uint32_t *) (ph + 1);
1684 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1685 			ippp++;
1686 			*ippp = tsn;
1687 			ippp++;
1688 			*ippp = ((strmno << 16) | strmseq);
1689 
1690 		}
1691 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1692 		sctp_abort_an_association(stcb->sctp_ep, stcb,
1693 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1694 		*abort_flag = 1;
1695 		return (0);
1696 	}
1697 	/************************************
1698 	 * From here down we may find ch-> invalid
1699 	 * so its a good idea NOT to use it.
1700 	 *************************************/
1701 
1702 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1703 	if (last_chunk == 0) {
1704 		dmbuf = SCTP_M_COPYM(*m,
1705 		    (offset + sizeof(struct sctp_data_chunk)),
1706 		    the_len, M_DONTWAIT);
1707 #ifdef SCTP_MBUF_LOGGING
1708 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1709 			struct mbuf *mat;
1710 
1711 			mat = dmbuf;
1712 			while (mat) {
1713 				if (SCTP_BUF_IS_EXTENDED(mat)) {
1714 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1715 				}
1716 				mat = SCTP_BUF_NEXT(mat);
1717 			}
1718 		}
1719 #endif
1720 	} else {
1721 		/* We can steal the last chunk */
1722 		int l_len;
1723 
1724 		dmbuf = *m;
1725 		/* lop off the top part */
1726 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1727 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1728 			l_len = SCTP_BUF_LEN(dmbuf);
1729 		} else {
1730 			/*
1731 			 * need to count up the size hopefully does not hit
1732 			 * this to often :-0
1733 			 */
1734 			struct mbuf *lat;
1735 
1736 			l_len = 0;
1737 			lat = dmbuf;
1738 			while (lat) {
1739 				l_len += SCTP_BUF_LEN(lat);
1740 				lat = SCTP_BUF_NEXT(lat);
1741 			}
1742 		}
1743 		if (l_len > the_len) {
1744 			/* Trim the end round bytes off  too */
1745 			m_adj(dmbuf, -(l_len - the_len));
1746 		}
1747 	}
1748 	if (dmbuf == NULL) {
1749 		SCTP_STAT_INCR(sctps_nomem);
1750 		return (0);
1751 	}
1752 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1753 	    asoc->fragmented_delivery_inprogress == 0 &&
1754 	    TAILQ_EMPTY(&asoc->resetHead) &&
1755 	    ((ordered == 0) ||
1756 	    ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1757 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1758 		/* Candidate for express delivery */
1759 		/*
1760 		 * Its not fragmented, No PD-API is up, Nothing in the
1761 		 * delivery queue, Its un-ordered OR ordered and the next to
1762 		 * deliver AND nothing else is stuck on the stream queue,
1763 		 * And there is room for it in the socket buffer. Lets just
1764 		 * stuff it up the buffer....
1765 		 */
1766 
1767 		/* It would be nice to avoid this copy if we could :< */
1768 		sctp_alloc_a_readq(stcb, control);
1769 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1770 		    protocol_id,
1771 		    stcb->asoc.context,
1772 		    strmno, strmseq,
1773 		    chunk_flags,
1774 		    dmbuf);
1775 		if (control == NULL) {
1776 			goto failed_express_del;
1777 		}
1778 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1779 		    control, &stcb->sctp_socket->so_rcv,
1780 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1781 
1782 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1783 			/* for ordered, bump what we delivered */
1784 			asoc->strmin[strmno].last_sequence_delivered++;
1785 		}
1786 		SCTP_STAT_INCR(sctps_recvexpress);
1787 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1788 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1789 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1790 		}
1791 		control = NULL;
1792 
1793 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1794 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1795 			asoc->highest_tsn_inside_nr_map = tsn;
1796 		}
1797 		goto finish_express_del;
1798 	}
1799 failed_express_del:
1800 	/* If we reach here this is a new chunk */
1801 	chk = NULL;
1802 	control = NULL;
1803 	/* Express for fragmented delivery? */
1804 	if ((asoc->fragmented_delivery_inprogress) &&
1805 	    (stcb->asoc.control_pdapi) &&
1806 	    (asoc->str_of_pdapi == strmno) &&
1807 	    (asoc->ssn_of_pdapi == strmseq)
1808 	    ) {
1809 		control = stcb->asoc.control_pdapi;
1810 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1811 			/* Can't be another first? */
1812 			goto failed_pdapi_express_del;
1813 		}
1814 		if (tsn == (control->sinfo_tsn + 1)) {
1815 			/* Yep, we can add it on */
1816 			int end = 0;
1817 			uint32_t cumack;
1818 
1819 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1820 				end = 1;
1821 			}
1822 			cumack = asoc->cumulative_tsn;
1823 			if ((cumack + 1) == tsn)
1824 				cumack = tsn;
1825 
1826 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1827 			    tsn,
1828 			    &stcb->sctp_socket->so_rcv)) {
1829 				SCTP_PRINTF("Append fails end:%d\n", end);
1830 				goto failed_pdapi_express_del;
1831 			}
1832 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1833 			if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1834 				asoc->highest_tsn_inside_nr_map = tsn;
1835 			}
1836 			SCTP_STAT_INCR(sctps_recvexpressm);
1837 			control->sinfo_tsn = tsn;
1838 			asoc->tsn_last_delivered = tsn;
1839 			asoc->fragment_flags = chunk_flags;
1840 			asoc->tsn_of_pdapi_last_delivered = tsn;
1841 			asoc->last_flags_delivered = chunk_flags;
1842 			asoc->last_strm_seq_delivered = strmseq;
1843 			asoc->last_strm_no_delivered = strmno;
1844 			if (end) {
1845 				/* clean up the flags and such */
1846 				asoc->fragmented_delivery_inprogress = 0;
1847 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1848 					asoc->strmin[strmno].last_sequence_delivered++;
1849 				}
1850 				stcb->asoc.control_pdapi = NULL;
1851 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1852 					/*
1853 					 * There could be another message
1854 					 * ready
1855 					 */
1856 					need_reasm_check = 1;
1857 				}
1858 			}
1859 			control = NULL;
1860 			goto finish_express_del;
1861 		}
1862 	}
1863 failed_pdapi_express_del:
1864 	control = NULL;
1865 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1866 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1867 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
1868 			asoc->highest_tsn_inside_nr_map = tsn;
1869 		}
1870 	} else {
1871 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1872 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1873 			asoc->highest_tsn_inside_map = tsn;
1874 		}
1875 	}
1876 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1877 		sctp_alloc_a_chunk(stcb, chk);
1878 		if (chk == NULL) {
1879 			/* No memory so we drop the chunk */
1880 			SCTP_STAT_INCR(sctps_nomem);
1881 			if (last_chunk == 0) {
1882 				/* we copied it, free the copy */
1883 				sctp_m_freem(dmbuf);
1884 			}
1885 			return (0);
1886 		}
1887 		chk->rec.data.TSN_seq = tsn;
1888 		chk->no_fr_allowed = 0;
1889 		chk->rec.data.stream_seq = strmseq;
1890 		chk->rec.data.stream_number = strmno;
1891 		chk->rec.data.payloadtype = protocol_id;
1892 		chk->rec.data.context = stcb->asoc.context;
1893 		chk->rec.data.doing_fast_retransmit = 0;
1894 		chk->rec.data.rcv_flags = chunk_flags;
1895 		chk->asoc = asoc;
1896 		chk->send_size = the_len;
1897 		chk->whoTo = net;
1898 		atomic_add_int(&net->ref_count, 1);
1899 		chk->data = dmbuf;
1900 	} else {
1901 		sctp_alloc_a_readq(stcb, control);
1902 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1903 		    protocol_id,
1904 		    stcb->asoc.context,
1905 		    strmno, strmseq,
1906 		    chunk_flags,
1907 		    dmbuf);
1908 		if (control == NULL) {
1909 			/* No memory so we drop the chunk */
1910 			SCTP_STAT_INCR(sctps_nomem);
1911 			if (last_chunk == 0) {
1912 				/* we copied it, free the copy */
1913 				sctp_m_freem(dmbuf);
1914 			}
1915 			return (0);
1916 		}
1917 		control->length = the_len;
1918 	}
1919 
1920 	/* Mark it as received */
1921 	/* Now queue it where it belongs */
1922 	if (control != NULL) {
1923 		/* First a sanity check */
1924 		if (asoc->fragmented_delivery_inprogress) {
1925 			/*
1926 			 * Ok, we have a fragmented delivery in progress if
1927 			 * this chunk is next to deliver OR belongs in our
1928 			 * view to the reassembly, the peer is evil or
1929 			 * broken.
1930 			 */
1931 			uint32_t estimate_tsn;
1932 
1933 			estimate_tsn = asoc->tsn_last_delivered + 1;
1934 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1935 			    (estimate_tsn == control->sinfo_tsn)) {
1936 				/* Evil/Broke peer */
1937 				sctp_m_freem(control->data);
1938 				control->data = NULL;
1939 				if (control->whoFrom) {
1940 					sctp_free_remote_addr(control->whoFrom);
1941 					control->whoFrom = NULL;
1942 				}
1943 				sctp_free_a_readq(stcb, control);
1944 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1945 				    0, M_DONTWAIT, 1, MT_DATA);
1946 				if (oper) {
1947 					struct sctp_paramhdr *ph;
1948 					uint32_t *ippp;
1949 
1950 					SCTP_BUF_LEN(oper) =
1951 					    sizeof(struct sctp_paramhdr) +
1952 					    (3 * sizeof(uint32_t));
1953 					ph = mtod(oper, struct sctp_paramhdr *);
1954 					ph->param_type =
1955 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1956 					ph->param_length = htons(SCTP_BUF_LEN(oper));
1957 					ippp = (uint32_t *) (ph + 1);
1958 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1959 					ippp++;
1960 					*ippp = tsn;
1961 					ippp++;
1962 					*ippp = ((strmno << 16) | strmseq);
1963 				}
1964 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1965 				sctp_abort_an_association(stcb->sctp_ep, stcb,
1966 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1967 
1968 				*abort_flag = 1;
1969 				return (0);
1970 			} else {
1971 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1972 					sctp_m_freem(control->data);
1973 					control->data = NULL;
1974 					if (control->whoFrom) {
1975 						sctp_free_remote_addr(control->whoFrom);
1976 						control->whoFrom = NULL;
1977 					}
1978 					sctp_free_a_readq(stcb, control);
1979 
1980 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1981 					    0, M_DONTWAIT, 1, MT_DATA);
1982 					if (oper) {
1983 						struct sctp_paramhdr *ph;
1984 						uint32_t *ippp;
1985 
1986 						SCTP_BUF_LEN(oper) =
1987 						    sizeof(struct sctp_paramhdr) +
1988 						    (3 * sizeof(uint32_t));
1989 						ph = mtod(oper,
1990 						    struct sctp_paramhdr *);
1991 						ph->param_type =
1992 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1993 						ph->param_length =
1994 						    htons(SCTP_BUF_LEN(oper));
1995 						ippp = (uint32_t *) (ph + 1);
1996 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1997 						ippp++;
1998 						*ippp = tsn;
1999 						ippp++;
2000 						*ippp = ((strmno << 16) | strmseq);
2001 					}
2002 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2003 					sctp_abort_an_association(stcb->sctp_ep,
2004 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2005 
2006 					*abort_flag = 1;
2007 					return (0);
2008 				}
2009 			}
2010 		} else {
2011 			/* No PDAPI running */
2012 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2013 				/*
2014 				 * Reassembly queue is NOT empty validate
2015 				 * that this tsn does not need to be in
2016 				 * reasembly queue. If it does then our peer
2017 				 * is broken or evil.
2018 				 */
2019 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2020 					sctp_m_freem(control->data);
2021 					control->data = NULL;
2022 					if (control->whoFrom) {
2023 						sctp_free_remote_addr(control->whoFrom);
2024 						control->whoFrom = NULL;
2025 					}
2026 					sctp_free_a_readq(stcb, control);
2027 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2028 					    0, M_DONTWAIT, 1, MT_DATA);
2029 					if (oper) {
2030 						struct sctp_paramhdr *ph;
2031 						uint32_t *ippp;
2032 
2033 						SCTP_BUF_LEN(oper) =
2034 						    sizeof(struct sctp_paramhdr) +
2035 						    (3 * sizeof(uint32_t));
2036 						ph = mtod(oper,
2037 						    struct sctp_paramhdr *);
2038 						ph->param_type =
2039 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2040 						ph->param_length =
2041 						    htons(SCTP_BUF_LEN(oper));
2042 						ippp = (uint32_t *) (ph + 1);
2043 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2044 						ippp++;
2045 						*ippp = tsn;
2046 						ippp++;
2047 						*ippp = ((strmno << 16) | strmseq);
2048 					}
2049 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2050 					sctp_abort_an_association(stcb->sctp_ep,
2051 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2052 
2053 					*abort_flag = 1;
2054 					return (0);
2055 				}
2056 			}
2057 		}
2058 		/* ok, if we reach here we have passed the sanity checks */
2059 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2060 			/* queue directly into socket buffer */
2061 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2062 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2063 			    control,
2064 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2065 		} else {
2066 			/*
2067 			 * Special check for when streams are resetting. We
2068 			 * could be more smart about this and check the
2069 			 * actual stream to see if it is not being reset..
2070 			 * that way we would not create a HOLB when amongst
2071 			 * streams being reset and those not being reset.
2072 			 *
2073 			 * We take complete messages that have a stream reset
2074 			 * intervening (aka the TSN is after where our
2075 			 * cum-ack needs to be) off and put them on a
2076 			 * pending_reply_queue. The reassembly ones we do
2077 			 * not have to worry about since they are all sorted
2078 			 * and proceessed by TSN order. It is only the
2079 			 * singletons I must worry about.
2080 			 */
2081 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2082 			    ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)))
2083 			    ) {
2084 				/*
2085 				 * yep its past where we need to reset... go
2086 				 * ahead and queue it.
2087 				 */
2088 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2089 					/* first one on */
2090 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2091 				} else {
2092 					struct sctp_queued_to_read *ctlOn;
2093 					unsigned char inserted = 0;
2094 
2095 					ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2096 					while (ctlOn) {
2097 						if (compare_with_wrap(control->sinfo_tsn,
2098 						    ctlOn->sinfo_tsn, MAX_TSN)) {
2099 							ctlOn = TAILQ_NEXT(ctlOn, next);
2100 						} else {
2101 							/* found it */
2102 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2103 							inserted = 1;
2104 							break;
2105 						}
2106 					}
2107 					if (inserted == 0) {
2108 						/*
2109 						 * must be put at end, use
2110 						 * prevP (all setup from
2111 						 * loop) to setup nextP.
2112 						 */
2113 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2114 					}
2115 				}
2116 			} else {
2117 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2118 				if (*abort_flag) {
2119 					return (0);
2120 				}
2121 			}
2122 		}
2123 	} else {
2124 		/* Into the re-assembly queue */
2125 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2126 		if (*abort_flag) {
2127 			/*
2128 			 * the assoc is now gone and chk was put onto the
2129 			 * reasm queue, which has all been freed.
2130 			 */
2131 			*m = NULL;
2132 			return (0);
2133 		}
2134 	}
2135 finish_express_del:
2136 	if (tsn == (asoc->cumulative_tsn + 1)) {
2137 		/* Update cum-ack */
2138 		asoc->cumulative_tsn = tsn;
2139 	}
2140 	if (last_chunk) {
2141 		*m = NULL;
2142 	}
2143 	if (ordered) {
2144 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2145 	} else {
2146 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2147 	}
2148 	SCTP_STAT_INCR(sctps_recvdata);
2149 	/* Set it present please */
2150 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2151 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2152 	}
2153 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2154 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2155 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2156 	}
2157 	/* check the special flag for stream resets */
2158 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2159 	    ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2160 	    (asoc->cumulative_tsn == liste->tsn))
2161 	    ) {
2162 		/*
2163 		 * we have finished working through the backlogged TSN's now
2164 		 * time to reset streams. 1: call reset function. 2: free
2165 		 * pending_reply space 3: distribute any chunks in
2166 		 * pending_reply_queue.
2167 		 */
2168 		struct sctp_queued_to_read *ctl;
2169 
2170 		sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2171 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2172 		SCTP_FREE(liste, SCTP_M_STRESET);
2173 		/* sa_ignore FREED_MEMORY */
2174 		liste = TAILQ_FIRST(&asoc->resetHead);
2175 		ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2176 		if (ctl && (liste == NULL)) {
2177 			/* All can be removed */
2178 			while (ctl) {
2179 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2180 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2181 				if (*abort_flag) {
2182 					return (0);
2183 				}
2184 				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2185 			}
2186 		} else if (ctl) {
2187 			/* more than one in queue */
2188 			while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2189 				/*
2190 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2191 				 * process it which is the NOT of
2192 				 * ctl->sinfo_tsn > liste->tsn
2193 				 */
2194 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2195 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2196 				if (*abort_flag) {
2197 					return (0);
2198 				}
2199 				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2200 			}
2201 		}
2202 		/*
2203 		 * Now service re-assembly to pick up anything that has been
2204 		 * held on reassembly queue?
2205 		 */
2206 		sctp_deliver_reasm_check(stcb, asoc);
2207 		need_reasm_check = 0;
2208 	}
2209 	if (need_reasm_check) {
2210 		/* Another one waits ? */
2211 		sctp_deliver_reasm_check(stcb, asoc);
2212 	}
2213 	return (1);
2214 }
2215 
2216 int8_t sctp_map_lookup_tab[256] = {
2217 	0, 1, 0, 2, 0, 1, 0, 3,
2218 	0, 1, 0, 2, 0, 1, 0, 4,
2219 	0, 1, 0, 2, 0, 1, 0, 3,
2220 	0, 1, 0, 2, 0, 1, 0, 5,
2221 	0, 1, 0, 2, 0, 1, 0, 3,
2222 	0, 1, 0, 2, 0, 1, 0, 4,
2223 	0, 1, 0, 2, 0, 1, 0, 3,
2224 	0, 1, 0, 2, 0, 1, 0, 6,
2225 	0, 1, 0, 2, 0, 1, 0, 3,
2226 	0, 1, 0, 2, 0, 1, 0, 4,
2227 	0, 1, 0, 2, 0, 1, 0, 3,
2228 	0, 1, 0, 2, 0, 1, 0, 5,
2229 	0, 1, 0, 2, 0, 1, 0, 3,
2230 	0, 1, 0, 2, 0, 1, 0, 4,
2231 	0, 1, 0, 2, 0, 1, 0, 3,
2232 	0, 1, 0, 2, 0, 1, 0, 7,
2233 	0, 1, 0, 2, 0, 1, 0, 3,
2234 	0, 1, 0, 2, 0, 1, 0, 4,
2235 	0, 1, 0, 2, 0, 1, 0, 3,
2236 	0, 1, 0, 2, 0, 1, 0, 5,
2237 	0, 1, 0, 2, 0, 1, 0, 3,
2238 	0, 1, 0, 2, 0, 1, 0, 4,
2239 	0, 1, 0, 2, 0, 1, 0, 3,
2240 	0, 1, 0, 2, 0, 1, 0, 6,
2241 	0, 1, 0, 2, 0, 1, 0, 3,
2242 	0, 1, 0, 2, 0, 1, 0, 4,
2243 	0, 1, 0, 2, 0, 1, 0, 3,
2244 	0, 1, 0, 2, 0, 1, 0, 5,
2245 	0, 1, 0, 2, 0, 1, 0, 3,
2246 	0, 1, 0, 2, 0, 1, 0, 4,
2247 	0, 1, 0, 2, 0, 1, 0, 3,
2248 	0, 1, 0, 2, 0, 1, 0, 8
2249 };
2250 
2251 
2252 void
2253 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2254 {
2255 	/*
2256 	 * Now we also need to check the mapping array in a couple of ways.
2257 	 * 1) Did we move the cum-ack point?
2258 	 *
2259 	 * When you first glance at this you might think that all entries that
2260 	 * make up the postion of the cum-ack would be in the nr-mapping
2261 	 * array only.. i.e. things up to the cum-ack are always
2262 	 * deliverable. Thats true with one exception, when its a fragmented
2263 	 * message we may not deliver the data until some threshold (or all
2264 	 * of it) is in place. So we must OR the nr_mapping_array and
2265 	 * mapping_array to get a true picture of the cum-ack.
2266 	 */
2267 	struct sctp_association *asoc;
2268 	int at;
2269 	uint8_t val;
2270 	int slide_from, slide_end, lgap, distance;
2271 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2272 
2273 	asoc = &stcb->asoc;
2274 	at = 0;
2275 
2276 	old_cumack = asoc->cumulative_tsn;
2277 	old_base = asoc->mapping_array_base_tsn;
2278 	old_highest = asoc->highest_tsn_inside_map;
2279 	/*
2280 	 * We could probably improve this a small bit by calculating the
2281 	 * offset of the current cum-ack as the starting point.
2282 	 */
2283 	at = 0;
2284 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2285 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2286 		if (val == 0xff) {
2287 			at += 8;
2288 		} else {
2289 			/* there is a 0 bit */
2290 			at += sctp_map_lookup_tab[val];
2291 			break;
2292 		}
2293 	}
2294 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2295 
2296 	if (compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
2297 	    compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
2298 #ifdef INVARIANTS
2299 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2300 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2301 #else
2302 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2303 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2304 		sctp_print_mapping_array(asoc);
2305 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2306 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2307 		}
2308 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2309 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2310 #endif
2311 	}
2312 	if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
2313 	    asoc->highest_tsn_inside_map,
2314 	    MAX_TSN)) {
2315 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2316 	} else {
2317 		highest_tsn = asoc->highest_tsn_inside_map;
2318 	}
2319 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2320 		/* The complete array was completed by a single FR */
2321 		/* highest becomes the cum-ack */
2322 		int clr;
2323 
2324 #ifdef INVARIANTS
2325 		unsigned int i;
2326 
2327 #endif
2328 
2329 		/* clear the array */
2330 		clr = ((at + 7) >> 3);
2331 		if (clr > asoc->mapping_array_size) {
2332 			clr = asoc->mapping_array_size;
2333 		}
2334 		memset(asoc->mapping_array, 0, clr);
2335 		memset(asoc->nr_mapping_array, 0, clr);
2336 #ifdef INVARIANTS
2337 		for (i = 0; i < asoc->mapping_array_size; i++) {
2338 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2339 				printf("Error Mapping array's not clean at clear\n");
2340 				sctp_print_mapping_array(asoc);
2341 			}
2342 		}
2343 #endif
2344 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2345 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2346 	} else if (at >= 8) {
2347 		/* we can slide the mapping array down */
2348 		/* slide_from holds where we hit the first NON 0xff byte */
2349 
2350 		/*
2351 		 * now calculate the ceiling of the move using our highest
2352 		 * TSN value
2353 		 */
2354 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2355 		slide_end = (lgap >> 3);
2356 		if (slide_end < slide_from) {
2357 			sctp_print_mapping_array(asoc);
2358 #ifdef INVARIANTS
2359 			panic("impossible slide");
2360 #else
2361 			printf("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2362 			    lgap, slide_end, slide_from, at);
2363 			return;
2364 #endif
2365 		}
2366 		if (slide_end > asoc->mapping_array_size) {
2367 #ifdef INVARIANTS
2368 			panic("would overrun buffer");
2369 #else
2370 			printf("Gak, would have overrun map end:%d slide_end:%d\n",
2371 			    asoc->mapping_array_size, slide_end);
2372 			slide_end = asoc->mapping_array_size;
2373 #endif
2374 		}
2375 		distance = (slide_end - slide_from) + 1;
2376 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2377 			sctp_log_map(old_base, old_cumack, old_highest,
2378 			    SCTP_MAP_PREPARE_SLIDE);
2379 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2380 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2381 		}
2382 		if (distance + slide_from > asoc->mapping_array_size ||
2383 		    distance < 0) {
2384 			/*
2385 			 * Here we do NOT slide forward the array so that
2386 			 * hopefully when more data comes in to fill it up
2387 			 * we will be able to slide it forward. Really I
2388 			 * don't think this should happen :-0
2389 			 */
2390 
2391 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2392 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2393 				    (uint32_t) asoc->mapping_array_size,
2394 				    SCTP_MAP_SLIDE_NONE);
2395 			}
2396 		} else {
2397 			int ii;
2398 
2399 			for (ii = 0; ii < distance; ii++) {
2400 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2401 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2402 
2403 			}
2404 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2405 				asoc->mapping_array[ii] = 0;
2406 				asoc->nr_mapping_array[ii] = 0;
2407 			}
2408 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2409 				asoc->highest_tsn_inside_map += (slide_from << 3);
2410 			}
2411 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2412 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2413 			}
2414 			asoc->mapping_array_base_tsn += (slide_from << 3);
2415 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2416 				sctp_log_map(asoc->mapping_array_base_tsn,
2417 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2418 				    SCTP_MAP_SLIDE_RESULT);
2419 			}
2420 		}
2421 	}
2422 }
2423 
2424 
2425 void
2426 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap, int *abort_flag)
2427 {
2428 	struct sctp_association *asoc;
2429 	uint32_t highest_tsn;
2430 
2431 	asoc = &stcb->asoc;
2432 	if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
2433 	    asoc->highest_tsn_inside_map,
2434 	    MAX_TSN)) {
2435 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2436 	} else {
2437 		highest_tsn = asoc->highest_tsn_inside_map;
2438 	}
2439 
2440 	/*
2441 	 * Now we need to see if we need to queue a sack or just start the
2442 	 * timer (if allowed).
2443 	 */
2444 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2445 		/*
2446 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2447 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2448 		 * SACK
2449 		 */
2450 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2451 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2452 			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2453 		}
2454 		sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2455 		sctp_send_sack(stcb);
2456 	} else {
2457 		int is_a_gap;
2458 
2459 		/* is there a gap now ? */
2460 		is_a_gap = compare_with_wrap(highest_tsn, stcb->asoc.cumulative_tsn, MAX_TSN);
2461 
2462 		/*
2463 		 * CMT DAC algorithm: increase number of packets received
2464 		 * since last ack
2465 		 */
2466 		stcb->asoc.cmt_dac_pkts_rcvd++;
2467 
2468 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2469 							 * SACK */
2470 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2471 							 * longer is one */
2472 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2473 		    (is_a_gap) ||	/* is still a gap */
2474 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2475 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2476 		    ) {
2477 
2478 			if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off)) &&
2479 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2480 			    (stcb->asoc.send_sack == 0) &&
2481 			    (stcb->asoc.numduptsns == 0) &&
2482 			    (stcb->asoc.delayed_ack) &&
2483 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2484 
2485 				/*
2486 				 * CMT DAC algorithm: With CMT, delay acks
2487 				 * even in the face of
2488 				 *
2489 				 * reordering. Therefore, if acks that do not
2490 				 * have to be sent because of the above
2491 				 * reasons, will be delayed. That is, acks
2492 				 * that would have been sent due to gap
2493 				 * reports will be delayed with DAC. Start
2494 				 * the delayed ack timer.
2495 				 */
2496 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2497 				    stcb->sctp_ep, stcb, NULL);
2498 			} else {
2499 				/*
2500 				 * Ok we must build a SACK since the timer
2501 				 * is pending, we got our first packet OR
2502 				 * there are gaps or duplicates.
2503 				 */
2504 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2505 				sctp_send_sack(stcb);
2506 			}
2507 		} else {
2508 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2509 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2510 				    stcb->sctp_ep, stcb, NULL);
2511 			}
2512 		}
2513 	}
2514 }
2515 
2516 void
2517 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2518 {
2519 	struct sctp_tmit_chunk *chk;
2520 	uint32_t tsize, pd_point;
2521 	uint16_t nxt_todel;
2522 
2523 	if (asoc->fragmented_delivery_inprogress) {
2524 		sctp_service_reassembly(stcb, asoc);
2525 	}
2526 	/* Can we proceed further, i.e. the PD-API is complete */
2527 	if (asoc->fragmented_delivery_inprogress) {
2528 		/* no */
2529 		return;
2530 	}
2531 	/*
2532 	 * Now is there some other chunk I can deliver from the reassembly
2533 	 * queue.
2534 	 */
2535 doit_again:
2536 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2537 	if (chk == NULL) {
2538 		asoc->size_on_reasm_queue = 0;
2539 		asoc->cnt_on_reasm_queue = 0;
2540 		return;
2541 	}
2542 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2543 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2544 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2545 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2546 		/*
2547 		 * Yep the first one is here. We setup to start reception,
2548 		 * by backing down the TSN just in case we can't deliver.
2549 		 */
2550 
2551 		/*
2552 		 * Before we start though either all of the message should
2553 		 * be here or the socket buffer max or nothing on the
2554 		 * delivery queue and something can be delivered.
2555 		 */
2556 		if (stcb->sctp_socket) {
2557 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2558 			    stcb->sctp_ep->partial_delivery_point);
2559 		} else {
2560 			pd_point = stcb->sctp_ep->partial_delivery_point;
2561 		}
2562 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2563 			asoc->fragmented_delivery_inprogress = 1;
2564 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2565 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2566 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2567 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2568 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2569 			sctp_service_reassembly(stcb, asoc);
2570 			if (asoc->fragmented_delivery_inprogress == 0) {
2571 				goto doit_again;
2572 			}
2573 		}
2574 	}
2575 }
2576 
2577 int
2578 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2579     struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2580     struct sctp_nets *net, uint32_t * high_tsn)
2581 {
2582 	struct sctp_data_chunk *ch, chunk_buf;
2583 	struct sctp_association *asoc;
2584 	int num_chunks = 0;	/* number of control chunks processed */
2585 	int stop_proc = 0;
2586 	int chk_length, break_flag, last_chunk;
2587 	int abort_flag = 0, was_a_gap = 0;
2588 	struct mbuf *m;
2589 
2590 	/* set the rwnd */
2591 	sctp_set_rwnd(stcb, &stcb->asoc);
2592 
2593 	m = *mm;
2594 	SCTP_TCB_LOCK_ASSERT(stcb);
2595 	asoc = &stcb->asoc;
2596 	if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2597 	    stcb->asoc.cumulative_tsn, MAX_TSN)) {
2598 		/* there was a gap before this data was processed */
2599 		was_a_gap = 1;
2600 	}
2601 	/*
2602 	 * setup where we got the last DATA packet from for any SACK that
2603 	 * may need to go out. Don't bump the net. This is done ONLY when a
2604 	 * chunk is assigned.
2605 	 */
2606 	asoc->last_data_chunk_from = net;
2607 
2608 	/*-
2609 	 * Now before we proceed we must figure out if this is a wasted
2610 	 * cluster... i.e. it is a small packet sent in and yet the driver
2611 	 * underneath allocated a full cluster for it. If so we must copy it
2612 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2613 	 * with cluster starvation. Note for __Panda__ we don't do this
2614 	 * since it has clusters all the way down to 64 bytes.
2615 	 */
2616 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2617 		/* we only handle mbufs that are singletons.. not chains */
2618 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2619 		if (m) {
2620 			/* ok lets see if we can copy the data up */
2621 			caddr_t *from, *to;
2622 
2623 			/* get the pointers and copy */
2624 			to = mtod(m, caddr_t *);
2625 			from = mtod((*mm), caddr_t *);
2626 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2627 			/* copy the length and free up the old */
2628 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2629 			sctp_m_freem(*mm);
2630 			/* sucess, back copy */
2631 			*mm = m;
2632 		} else {
2633 			/* We are in trouble in the mbuf world .. yikes */
2634 			m = *mm;
2635 		}
2636 	}
2637 	/* get pointer to the first chunk header */
2638 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2639 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2640 	if (ch == NULL) {
2641 		return (1);
2642 	}
2643 	/*
2644 	 * process all DATA chunks...
2645 	 */
2646 	*high_tsn = asoc->cumulative_tsn;
2647 	break_flag = 0;
2648 	asoc->data_pkts_seen++;
2649 	while (stop_proc == 0) {
2650 		/* validate chunk length */
2651 		chk_length = ntohs(ch->ch.chunk_length);
2652 		if (length - *offset < chk_length) {
2653 			/* all done, mutulated chunk */
2654 			stop_proc = 1;
2655 			break;
2656 		}
2657 		if (ch->ch.chunk_type == SCTP_DATA) {
2658 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2659 				/*
2660 				 * Need to send an abort since we had a
2661 				 * invalid data chunk.
2662 				 */
2663 				struct mbuf *op_err;
2664 
2665 				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2666 				    0, M_DONTWAIT, 1, MT_DATA);
2667 
2668 				if (op_err) {
2669 					struct sctp_paramhdr *ph;
2670 					uint32_t *ippp;
2671 
2672 					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2673 					    (2 * sizeof(uint32_t));
2674 					ph = mtod(op_err, struct sctp_paramhdr *);
2675 					ph->param_type =
2676 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2677 					ph->param_length = htons(SCTP_BUF_LEN(op_err));
2678 					ippp = (uint32_t *) (ph + 1);
2679 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2680 					ippp++;
2681 					*ippp = asoc->cumulative_tsn;
2682 
2683 				}
2684 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2685 				sctp_abort_association(inp, stcb, m, iphlen, sh,
2686 				    op_err, 0, net->port);
2687 				return (2);
2688 			}
2689 #ifdef SCTP_AUDITING_ENABLED
2690 			sctp_audit_log(0xB1, 0);
2691 #endif
2692 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2693 				last_chunk = 1;
2694 			} else {
2695 				last_chunk = 0;
2696 			}
2697 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2698 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2699 			    last_chunk)) {
2700 				num_chunks++;
2701 			}
2702 			if (abort_flag)
2703 				return (2);
2704 
2705 			if (break_flag) {
2706 				/*
2707 				 * Set because of out of rwnd space and no
2708 				 * drop rep space left.
2709 				 */
2710 				stop_proc = 1;
2711 				break;
2712 			}
2713 		} else {
2714 			/* not a data chunk in the data region */
2715 			switch (ch->ch.chunk_type) {
2716 			case SCTP_INITIATION:
2717 			case SCTP_INITIATION_ACK:
2718 			case SCTP_SELECTIVE_ACK:
2719 			case SCTP_NR_SELECTIVE_ACK:	/* EY */
2720 			case SCTP_HEARTBEAT_REQUEST:
2721 			case SCTP_HEARTBEAT_ACK:
2722 			case SCTP_ABORT_ASSOCIATION:
2723 			case SCTP_SHUTDOWN:
2724 			case SCTP_SHUTDOWN_ACK:
2725 			case SCTP_OPERATION_ERROR:
2726 			case SCTP_COOKIE_ECHO:
2727 			case SCTP_COOKIE_ACK:
2728 			case SCTP_ECN_ECHO:
2729 			case SCTP_ECN_CWR:
2730 			case SCTP_SHUTDOWN_COMPLETE:
2731 			case SCTP_AUTHENTICATION:
2732 			case SCTP_ASCONF_ACK:
2733 			case SCTP_PACKET_DROPPED:
2734 			case SCTP_STREAM_RESET:
2735 			case SCTP_FORWARD_CUM_TSN:
2736 			case SCTP_ASCONF:
2737 				/*
2738 				 * Now, what do we do with KNOWN chunks that
2739 				 * are NOT in the right place?
2740 				 *
2741 				 * For now, I do nothing but ignore them. We
2742 				 * may later want to add sysctl stuff to
2743 				 * switch out and do either an ABORT() or
2744 				 * possibly process them.
2745 				 */
2746 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2747 					struct mbuf *op_err;
2748 
2749 					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2750 					sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
2751 					return (2);
2752 				}
2753 				break;
2754 			default:
2755 				/* unknown chunk type, use bit rules */
2756 				if (ch->ch.chunk_type & 0x40) {
2757 					/* Add a error report to the queue */
2758 					struct mbuf *merr;
2759 					struct sctp_paramhdr *phd;
2760 
2761 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2762 					if (merr) {
2763 						phd = mtod(merr, struct sctp_paramhdr *);
2764 						/*
2765 						 * We cheat and use param
2766 						 * type since we did not
2767 						 * bother to define a error
2768 						 * cause struct. They are
2769 						 * the same basic format
2770 						 * with different names.
2771 						 */
2772 						phd->param_type =
2773 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2774 						phd->param_length =
2775 						    htons(chk_length + sizeof(*phd));
2776 						SCTP_BUF_LEN(merr) = sizeof(*phd);
2777 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
2778 						    SCTP_SIZE32(chk_length),
2779 						    M_DONTWAIT);
2780 						if (SCTP_BUF_NEXT(merr)) {
2781 							sctp_queue_op_err(stcb, merr);
2782 						} else {
2783 							sctp_m_freem(merr);
2784 						}
2785 					}
2786 				}
2787 				if ((ch->ch.chunk_type & 0x80) == 0) {
2788 					/* discard the rest of this packet */
2789 					stop_proc = 1;
2790 				}	/* else skip this bad chunk and
2791 					 * continue... */
2792 				break;
2793 			};	/* switch of chunk type */
2794 		}
2795 		*offset += SCTP_SIZE32(chk_length);
2796 		if ((*offset >= length) || stop_proc) {
2797 			/* no more data left in the mbuf chain */
2798 			stop_proc = 1;
2799 			continue;
2800 		}
2801 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2802 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2803 		if (ch == NULL) {
2804 			*offset = length;
2805 			stop_proc = 1;
2806 			break;
2807 
2808 		}
2809 	}			/* while */
2810 	if (break_flag) {
2811 		/*
2812 		 * we need to report rwnd overrun drops.
2813 		 */
2814 		sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2815 	}
2816 	if (num_chunks) {
2817 		/*
2818 		 * Did we get data, if so update the time for auto-close and
2819 		 * give peer credit for being alive.
2820 		 */
2821 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2822 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2823 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2824 			    stcb->asoc.overall_error_count,
2825 			    0,
2826 			    SCTP_FROM_SCTP_INDATA,
2827 			    __LINE__);
2828 		}
2829 		stcb->asoc.overall_error_count = 0;
2830 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2831 	}
2832 	/* now service all of the reassm queue if needed */
2833 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2834 		sctp_service_queues(stcb, asoc);
2835 
2836 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2837 		/* Assure that we ack right away */
2838 		stcb->asoc.send_sack = 1;
2839 	}
2840 	/* Start a sack timer or QUEUE a SACK for sending */
2841 	sctp_sack_check(stcb, was_a_gap, &abort_flag);
2842 	if (abort_flag)
2843 		return (2);
2844 
2845 	return (0);
2846 }
2847 
2848 static int
2849 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2850     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2851     int *num_frs,
2852     uint32_t * biggest_newly_acked_tsn,
2853     uint32_t * this_sack_lowest_newack,
2854     int *ecn_seg_sums)
2855 {
2856 	struct sctp_tmit_chunk *tp1;
2857 	unsigned int theTSN;
2858 	int j, wake_him = 0, circled = 0;
2859 
2860 	/* Recover the tp1 we last saw */
2861 	tp1 = *p_tp1;
2862 	if (tp1 == NULL) {
2863 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2864 	}
2865 	for (j = frag_strt; j <= frag_end; j++) {
2866 		theTSN = j + last_tsn;
2867 		while (tp1) {
2868 			if (tp1->rec.data.doing_fast_retransmit)
2869 				(*num_frs) += 1;
2870 
2871 			/*-
2872 			 * CMT: CUCv2 algorithm. For each TSN being
2873 			 * processed from the sent queue, track the
2874 			 * next expected pseudo-cumack, or
2875 			 * rtx_pseudo_cumack, if required. Separate
2876 			 * cumack trackers for first transmissions,
2877 			 * and retransmissions.
2878 			 */
2879 			if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2880 			    (tp1->snd_count == 1)) {
2881 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2882 				tp1->whoTo->find_pseudo_cumack = 0;
2883 			}
2884 			if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2885 			    (tp1->snd_count > 1)) {
2886 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2887 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2888 			}
2889 			if (tp1->rec.data.TSN_seq == theTSN) {
2890 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2891 					/*-
2892 					 * must be held until
2893 					 * cum-ack passes
2894 					 */
2895 					/*-
2896 					 * ECN Nonce: Add the nonce
2897 					 * value to the sender's
2898 					 * nonce sum
2899 					 */
2900 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2901 						/*-
2902 						 * If it is less than RESEND, it is
2903 						 * now no-longer in flight.
2904 						 * Higher values may already be set
2905 						 * via previous Gap Ack Blocks...
2906 						 * i.e. ACKED or RESEND.
2907 						 */
2908 						if (compare_with_wrap(tp1->rec.data.TSN_seq,
2909 						    *biggest_newly_acked_tsn, MAX_TSN)) {
2910 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2911 						}
2912 						/*-
2913 						 * CMT: SFR algo (and HTNA) - set
2914 						 * saw_newack to 1 for dest being
2915 						 * newly acked. update
2916 						 * this_sack_highest_newack if
2917 						 * appropriate.
2918 						 */
2919 						if (tp1->rec.data.chunk_was_revoked == 0)
2920 							tp1->whoTo->saw_newack = 1;
2921 
2922 						if (compare_with_wrap(tp1->rec.data.TSN_seq,
2923 						    tp1->whoTo->this_sack_highest_newack,
2924 						    MAX_TSN)) {
2925 							tp1->whoTo->this_sack_highest_newack =
2926 							    tp1->rec.data.TSN_seq;
2927 						}
2928 						/*-
2929 						 * CMT DAC algo: also update
2930 						 * this_sack_lowest_newack
2931 						 */
2932 						if (*this_sack_lowest_newack == 0) {
2933 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2934 								sctp_log_sack(*this_sack_lowest_newack,
2935 								    last_tsn,
2936 								    tp1->rec.data.TSN_seq,
2937 								    0,
2938 								    0,
2939 								    SCTP_LOG_TSN_ACKED);
2940 							}
2941 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2942 						}
2943 						/*-
2944 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2945 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2946 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2947 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2948 						 * Separate pseudo_cumack trackers for first transmissions and
2949 						 * retransmissions.
2950 						 */
2951 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2952 							if (tp1->rec.data.chunk_was_revoked == 0) {
2953 								tp1->whoTo->new_pseudo_cumack = 1;
2954 							}
2955 							tp1->whoTo->find_pseudo_cumack = 1;
2956 						}
2957 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2958 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2959 						}
2960 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2961 							if (tp1->rec.data.chunk_was_revoked == 0) {
2962 								tp1->whoTo->new_pseudo_cumack = 1;
2963 							}
2964 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2965 						}
2966 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2967 							sctp_log_sack(*biggest_newly_acked_tsn,
2968 							    last_tsn,
2969 							    tp1->rec.data.TSN_seq,
2970 							    frag_strt,
2971 							    frag_end,
2972 							    SCTP_LOG_TSN_ACKED);
2973 						}
2974 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2975 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2976 							    tp1->whoTo->flight_size,
2977 							    tp1->book_size,
2978 							    (uintptr_t) tp1->whoTo,
2979 							    tp1->rec.data.TSN_seq);
2980 						}
2981 						sctp_flight_size_decrease(tp1);
2982 						sctp_total_flight_decrease(stcb, tp1);
2983 
2984 						tp1->whoTo->net_ack += tp1->send_size;
2985 						if (tp1->snd_count < 2) {
2986 							/*-
2987 							 * True non-retransmited chunk
2988 							 */
2989 							tp1->whoTo->net_ack2 += tp1->send_size;
2990 
2991 							/*-
2992 							 * update RTO too ?
2993 							 */
2994 							if (tp1->do_rtt) {
2995 								tp1->whoTo->RTO =
2996 								    sctp_calculate_rto(stcb,
2997 								    &stcb->asoc,
2998 								    tp1->whoTo,
2999 								    &tp1->sent_rcv_time,
3000 								    sctp_align_safe_nocopy);
3001 								tp1->do_rtt = 0;
3002 							}
3003 						}
3004 					}
3005 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3006 						(*ecn_seg_sums) += tp1->rec.data.ect_nonce;
3007 						(*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
3008 						if (compare_with_wrap(tp1->rec.data.TSN_seq,
3009 						    stcb->asoc.this_sack_highest_gap,
3010 						    MAX_TSN)) {
3011 							stcb->asoc.this_sack_highest_gap =
3012 							    tp1->rec.data.TSN_seq;
3013 						}
3014 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3015 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3016 #ifdef SCTP_AUDITING_ENABLED
3017 							sctp_audit_log(0xB2,
3018 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3019 #endif
3020 						}
3021 					}
3022 					/*-
3023 					 * All chunks NOT UNSENT fall through here and are marked
3024 					 * (leave PR-SCTP ones that are to skip alone though)
3025 					 */
3026 					if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
3027 						tp1->sent = SCTP_DATAGRAM_MARKED;
3028 
3029 					if (tp1->rec.data.chunk_was_revoked) {
3030 						/* deflate the cwnd */
3031 						tp1->whoTo->cwnd -= tp1->book_size;
3032 						tp1->rec.data.chunk_was_revoked = 0;
3033 					}
3034 					/* NR Sack code here */
3035 					if (nr_sacking) {
3036 						if (tp1->data) {
3037 							/*
3038 							 * sa_ignore
3039 							 * NO_NULL_CHK
3040 							 */
3041 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3042 							sctp_m_freem(tp1->data);
3043 							tp1->data = NULL;
3044 						}
3045 						wake_him++;
3046 					}
3047 				}
3048 				break;
3049 			}	/* if (tp1->TSN_seq == theTSN) */
3050 			if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
3051 			    MAX_TSN))
3052 				break;
3053 
3054 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3055 			if ((tp1 == NULL) && (circled == 0)) {
3056 				circled++;
3057 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3058 			}
3059 		}		/* end while (tp1) */
3060 		if (tp1 == NULL) {
3061 			circled = 0;
3062 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3063 		}
3064 		/* In case the fragments were not in order we must reset */
3065 	}			/* end for (j = fragStart */
3066 	*p_tp1 = tp1;
3067 	return (wake_him);	/* Return value only used for nr-sack */
3068 }
3069 
3070 
3071 static int
3072 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3073     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3074     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3075     int num_seg, int num_nr_seg, int *ecn_seg_sums)
3076 {
3077 	struct sctp_gap_ack_block *frag, block;
3078 	struct sctp_tmit_chunk *tp1;
3079 	int i;
3080 	int num_frs = 0;
3081 	int chunk_freed;
3082 	int non_revocable;
3083 	uint16_t frag_strt, frag_end;
3084 	uint32_t last_frag_high;
3085 
3086 	tp1 = NULL;
3087 	last_frag_high = 0;
3088 	chunk_freed = 0;
3089 
3090 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3091 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3092 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3093 		*offset += sizeof(block);
3094 		if (frag == NULL) {
3095 			return (chunk_freed);
3096 		}
3097 		frag_strt = ntohs(frag->start);
3098 		frag_end = ntohs(frag->end);
3099 		/* some sanity checks on the fragment offsets */
3100 		if (frag_strt > frag_end) {
3101 			/* this one is malformed, skip */
3102 			continue;
3103 		}
3104 		if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
3105 		    MAX_TSN))
3106 			*biggest_tsn_acked = frag_end + last_tsn;
3107 
3108 		/* mark acked dgs and find out the highestTSN being acked */
3109 		if (tp1 == NULL) {
3110 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3111 			/* save the locations of the last frags */
3112 			last_frag_high = frag_end + last_tsn;
3113 		} else {
3114 			/*
3115 			 * now lets see if we need to reset the queue due to
3116 			 * a out-of-order SACK fragment
3117 			 */
3118 			if (compare_with_wrap(frag_strt + last_tsn,
3119 			    last_frag_high, MAX_TSN)) {
3120 				/*
3121 				 * if the new frag starts after the last TSN
3122 				 * frag covered, we are ok and this one is
3123 				 * beyond the last one
3124 				 */
3125 				;
3126 			} else {
3127 				/*
3128 				 * ok, they have reset us, so we need to
3129 				 * reset the queue this will cause extra
3130 				 * hunting but hey, they chose the
3131 				 * performance hit when they failed to order
3132 				 * their gaps
3133 				 */
3134 				tp1 = TAILQ_FIRST(&asoc->sent_queue);
3135 			}
3136 			last_frag_high = frag_end + last_tsn;
3137 		}
3138 		if (i < num_seg) {
3139 			non_revocable = 0;
3140 		} else {
3141 			non_revocable = 1;
3142 		}
3143 		if (i == num_seg) {
3144 			tp1 = NULL;
3145 		}
3146 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3147 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3148 		    this_sack_lowest_newack, ecn_seg_sums)) {
3149 			chunk_freed = 1;
3150 		}
3151 	}
3152 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3153 		if (num_frs)
3154 			sctp_log_fr(*biggest_tsn_acked,
3155 			    *biggest_newly_acked_tsn,
3156 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3157 	}
3158 	return (chunk_freed);
3159 }
3160 
3161 static void
3162 sctp_check_for_revoked(struct sctp_tcb *stcb,
3163     struct sctp_association *asoc, uint32_t cumack,
3164     uint32_t biggest_tsn_acked)
3165 {
3166 	struct sctp_tmit_chunk *tp1;
3167 	int tot_revoked = 0;
3168 
3169 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3170 	while (tp1) {
3171 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3172 		    MAX_TSN)) {
3173 			/*
3174 			 * ok this guy is either ACK or MARKED. If it is
3175 			 * ACKED it has been previously acked but not this
3176 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3177 			 * again.
3178 			 */
3179 			if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3180 			    MAX_TSN))
3181 				break;
3182 
3183 
3184 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3185 				/* it has been revoked */
3186 				tp1->sent = SCTP_DATAGRAM_SENT;
3187 				tp1->rec.data.chunk_was_revoked = 1;
3188 				/*
3189 				 * We must add this stuff back in to assure
3190 				 * timers and such get started.
3191 				 */
3192 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3193 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3194 					    tp1->whoTo->flight_size,
3195 					    tp1->book_size,
3196 					    (uintptr_t) tp1->whoTo,
3197 					    tp1->rec.data.TSN_seq);
3198 				}
3199 				sctp_flight_size_increase(tp1);
3200 				sctp_total_flight_increase(stcb, tp1);
3201 				/*
3202 				 * We inflate the cwnd to compensate for our
3203 				 * artificial inflation of the flight_size.
3204 				 */
3205 				tp1->whoTo->cwnd += tp1->book_size;
3206 				tot_revoked++;
3207 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3208 					sctp_log_sack(asoc->last_acked_seq,
3209 					    cumack,
3210 					    tp1->rec.data.TSN_seq,
3211 					    0,
3212 					    0,
3213 					    SCTP_LOG_TSN_REVOKED);
3214 				}
3215 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3216 				/* it has been re-acked in this SACK */
3217 				tp1->sent = SCTP_DATAGRAM_ACKED;
3218 			}
3219 		}
3220 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3221 			break;
3222 		tp1 = TAILQ_NEXT(tp1, sctp_next);
3223 	}
3224 	if (tot_revoked > 0) {
3225 		/*
3226 		 * Setup the ecn nonce re-sync point. We do this since once
3227 		 * data is revoked we begin to retransmit things, which do
3228 		 * NOT have the ECN bits set. This means we are now out of
3229 		 * sync and must wait until we get back in sync with the
3230 		 * peer to check ECN bits.
3231 		 */
3232 		tp1 = TAILQ_FIRST(&asoc->send_queue);
3233 		if (tp1 == NULL) {
3234 			asoc->nonce_resync_tsn = asoc->sending_seq;
3235 		} else {
3236 			asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3237 		}
3238 		asoc->nonce_wait_for_ecne = 0;
3239 		asoc->nonce_sum_check = 0;
3240 	}
3241 }
3242 
3243 
3244 static void
3245 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3246     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3247 {
3248 	struct sctp_tmit_chunk *tp1;
3249 	int strike_flag = 0;
3250 	struct timeval now;
3251 	int tot_retrans = 0;
3252 	uint32_t sending_seq;
3253 	struct sctp_nets *net;
3254 	int num_dests_sacked = 0;
3255 
3256 	/*
3257 	 * select the sending_seq, this is either the next thing ready to be
3258 	 * sent but not transmitted, OR, the next seq we assign.
3259 	 */
3260 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3261 	if (tp1 == NULL) {
3262 		sending_seq = asoc->sending_seq;
3263 	} else {
3264 		sending_seq = tp1->rec.data.TSN_seq;
3265 	}
3266 
3267 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3268 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3269 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3270 			if (net->saw_newack)
3271 				num_dests_sacked++;
3272 		}
3273 	}
3274 	if (stcb->asoc.peer_supports_prsctp) {
3275 		(void)SCTP_GETTIME_TIMEVAL(&now);
3276 	}
3277 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3278 	while (tp1) {
3279 		strike_flag = 0;
3280 		if (tp1->no_fr_allowed) {
3281 			/* this one had a timeout or something */
3282 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3283 			continue;
3284 		}
3285 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3286 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3287 				sctp_log_fr(biggest_tsn_newly_acked,
3288 				    tp1->rec.data.TSN_seq,
3289 				    tp1->sent,
3290 				    SCTP_FR_LOG_CHECK_STRIKE);
3291 		}
3292 		if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3293 		    MAX_TSN) ||
3294 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3295 			/* done */
3296 			break;
3297 		}
3298 		if (stcb->asoc.peer_supports_prsctp) {
3299 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3300 				/* Is it expired? */
3301 				if (
3302 				/*
3303 				 * TODO sctp_constants.h needs alternative
3304 				 * time macros when _KERNEL is undefined.
3305 				 */
3306 				    (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3307 				    ) {
3308 					/* Yes so drop it */
3309 					if (tp1->data != NULL) {
3310 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3311 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3312 						    SCTP_SO_NOT_LOCKED);
3313 					}
3314 					tp1 = TAILQ_NEXT(tp1, sctp_next);
3315 					continue;
3316 				}
3317 			}
3318 		}
3319 		if (compare_with_wrap(tp1->rec.data.TSN_seq,
3320 		    asoc->this_sack_highest_gap, MAX_TSN)) {
3321 			/* we are beyond the tsn in the sack  */
3322 			break;
3323 		}
3324 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3325 			/* either a RESEND, ACKED, or MARKED */
3326 			/* skip */
3327 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3328 				/* Continue strikin FWD-TSN chunks */
3329 				tp1->rec.data.fwd_tsn_cnt++;
3330 			}
3331 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3332 			continue;
3333 		}
3334 		/*
3335 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3336 		 */
3337 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3338 			/*
3339 			 * No new acks were receieved for data sent to this
3340 			 * dest. Therefore, according to the SFR algo for
3341 			 * CMT, no data sent to this dest can be marked for
3342 			 * FR using this SACK.
3343 			 */
3344 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3345 			continue;
3346 		} else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq,
3347 		    tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3348 			/*
3349 			 * CMT: New acks were receieved for data sent to
3350 			 * this dest. But no new acks were seen for data
3351 			 * sent after tp1. Therefore, according to the SFR
3352 			 * algo for CMT, tp1 cannot be marked for FR using
3353 			 * this SACK. This step covers part of the DAC algo
3354 			 * and the HTNA algo as well.
3355 			 */
3356 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3357 			continue;
3358 		}
3359 		/*
3360 		 * Here we check to see if we were have already done a FR
3361 		 * and if so we see if the biggest TSN we saw in the sack is
3362 		 * smaller than the recovery point. If so we don't strike
3363 		 * the tsn... otherwise we CAN strike the TSN.
3364 		 */
3365 		/*
3366 		 * @@@ JRI: Check for CMT if (accum_moved &&
3367 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3368 		 * 0)) {
3369 		 */
3370 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3371 			/*
3372 			 * Strike the TSN if in fast-recovery and cum-ack
3373 			 * moved.
3374 			 */
3375 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3376 				sctp_log_fr(biggest_tsn_newly_acked,
3377 				    tp1->rec.data.TSN_seq,
3378 				    tp1->sent,
3379 				    SCTP_FR_LOG_STRIKE_CHUNK);
3380 			}
3381 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3382 				tp1->sent++;
3383 			}
3384 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3385 				/*
3386 				 * CMT DAC algorithm: If SACK flag is set to
3387 				 * 0, then lowest_newack test will not pass
3388 				 * because it would have been set to the
3389 				 * cumack earlier. If not already to be
3390 				 * rtx'd, If not a mixed sack and if tp1 is
3391 				 * not between two sacked TSNs, then mark by
3392 				 * one more. NOTE that we are marking by one
3393 				 * additional time since the SACK DAC flag
3394 				 * indicates that two packets have been
3395 				 * received after this missing TSN.
3396 				 */
3397 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3398 				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3399 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3400 						sctp_log_fr(16 + num_dests_sacked,
3401 						    tp1->rec.data.TSN_seq,
3402 						    tp1->sent,
3403 						    SCTP_FR_LOG_STRIKE_CHUNK);
3404 					}
3405 					tp1->sent++;
3406 				}
3407 			}
3408 		} else if ((tp1->rec.data.doing_fast_retransmit) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
3409 			/*
3410 			 * For those that have done a FR we must take
3411 			 * special consideration if we strike. I.e the
3412 			 * biggest_newly_acked must be higher than the
3413 			 * sending_seq at the time we did the FR.
3414 			 */
3415 			if (
3416 #ifdef SCTP_FR_TO_ALTERNATE
3417 			/*
3418 			 * If FR's go to new networks, then we must only do
3419 			 * this for singly homed asoc's. However if the FR's
3420 			 * go to the same network (Armando's work) then its
3421 			 * ok to FR multiple times.
3422 			 */
3423 			    (asoc->numnets < 2)
3424 #else
3425 			    (1)
3426 #endif
3427 			    ) {
3428 
3429 				if ((compare_with_wrap(biggest_tsn_newly_acked,
3430 				    tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3431 				    (biggest_tsn_newly_acked ==
3432 				    tp1->rec.data.fast_retran_tsn)) {
3433 					/*
3434 					 * Strike the TSN, since this ack is
3435 					 * beyond where things were when we
3436 					 * did a FR.
3437 					 */
3438 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3439 						sctp_log_fr(biggest_tsn_newly_acked,
3440 						    tp1->rec.data.TSN_seq,
3441 						    tp1->sent,
3442 						    SCTP_FR_LOG_STRIKE_CHUNK);
3443 					}
3444 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3445 						tp1->sent++;
3446 					}
3447 					strike_flag = 1;
3448 					if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3449 						/*
3450 						 * CMT DAC algorithm: If
3451 						 * SACK flag is set to 0,
3452 						 * then lowest_newack test
3453 						 * will not pass because it
3454 						 * would have been set to
3455 						 * the cumack earlier. If
3456 						 * not already to be rtx'd,
3457 						 * If not a mixed sack and
3458 						 * if tp1 is not between two
3459 						 * sacked TSNs, then mark by
3460 						 * one more. NOTE that we
3461 						 * are marking by one
3462 						 * additional time since the
3463 						 * SACK DAC flag indicates
3464 						 * that two packets have
3465 						 * been received after this
3466 						 * missing TSN.
3467 						 */
3468 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3469 						    (num_dests_sacked == 1) &&
3470 						    compare_with_wrap(this_sack_lowest_newack,
3471 						    tp1->rec.data.TSN_seq, MAX_TSN)) {
3472 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3473 								sctp_log_fr(32 + num_dests_sacked,
3474 								    tp1->rec.data.TSN_seq,
3475 								    tp1->sent,
3476 								    SCTP_FR_LOG_STRIKE_CHUNK);
3477 							}
3478 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3479 								tp1->sent++;
3480 							}
3481 						}
3482 					}
3483 				}
3484 			}
3485 			/*
3486 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3487 			 * algo covers HTNA.
3488 			 */
3489 		} else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3490 		    biggest_tsn_newly_acked, MAX_TSN)) {
3491 			/*
3492 			 * We don't strike these: This is the  HTNA
3493 			 * algorithm i.e. we don't strike If our TSN is
3494 			 * larger than the Highest TSN Newly Acked.
3495 			 */
3496 			;
3497 		} else {
3498 			/* Strike the TSN */
3499 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3500 				sctp_log_fr(biggest_tsn_newly_acked,
3501 				    tp1->rec.data.TSN_seq,
3502 				    tp1->sent,
3503 				    SCTP_FR_LOG_STRIKE_CHUNK);
3504 			}
3505 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3506 				tp1->sent++;
3507 			}
3508 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3509 				/*
3510 				 * CMT DAC algorithm: If SACK flag is set to
3511 				 * 0, then lowest_newack test will not pass
3512 				 * because it would have been set to the
3513 				 * cumack earlier. If not already to be
3514 				 * rtx'd, If not a mixed sack and if tp1 is
3515 				 * not between two sacked TSNs, then mark by
3516 				 * one more. NOTE that we are marking by one
3517 				 * additional time since the SACK DAC flag
3518 				 * indicates that two packets have been
3519 				 * received after this missing TSN.
3520 				 */
3521 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3522 				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3523 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3524 						sctp_log_fr(48 + num_dests_sacked,
3525 						    tp1->rec.data.TSN_seq,
3526 						    tp1->sent,
3527 						    SCTP_FR_LOG_STRIKE_CHUNK);
3528 					}
3529 					tp1->sent++;
3530 				}
3531 			}
3532 		}
3533 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3534 			struct sctp_nets *alt;
3535 
3536 			/* fix counts and things */
3537 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3538 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3539 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3540 				    tp1->book_size,
3541 				    (uintptr_t) tp1->whoTo,
3542 				    tp1->rec.data.TSN_seq);
3543 			}
3544 			if (tp1->whoTo) {
3545 				tp1->whoTo->net_ack++;
3546 				sctp_flight_size_decrease(tp1);
3547 			}
3548 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3549 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3550 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3551 			}
3552 			/* add back to the rwnd */
3553 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3554 
3555 			/* remove from the total flight */
3556 			sctp_total_flight_decrease(stcb, tp1);
3557 
3558 			if ((stcb->asoc.peer_supports_prsctp) &&
3559 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3560 				/*
3561 				 * Has it been retransmitted tv_sec times? -
3562 				 * we store the retran count there.
3563 				 */
3564 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3565 					/* Yes, so drop it */
3566 					if (tp1->data != NULL) {
3567 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3568 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3569 						    SCTP_SO_NOT_LOCKED);
3570 					}
3571 					/* Make sure to flag we had a FR */
3572 					tp1->whoTo->net_ack++;
3573 					tp1 = TAILQ_NEXT(tp1, sctp_next);
3574 					continue;
3575 				}
3576 			}
3577 			/* printf("OK, we are now ready to FR this guy\n"); */
3578 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3579 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3580 				    0, SCTP_FR_MARKED);
3581 			}
3582 			if (strike_flag) {
3583 				/* This is a subsequent FR */
3584 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3585 			}
3586 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3587 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
3588 				/*
3589 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3590 				 * If CMT is being used, then pick dest with
3591 				 * largest ssthresh for any retransmission.
3592 				 */
3593 				tp1->no_fr_allowed = 1;
3594 				alt = tp1->whoTo;
3595 				/* sa_ignore NO_NULL_CHK */
3596 				if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
3597 					/*
3598 					 * JRS 5/18/07 - If CMT PF is on,
3599 					 * use the PF version of
3600 					 * find_alt_net()
3601 					 */
3602 					alt = sctp_find_alternate_net(stcb, alt, 2);
3603 				} else {
3604 					/*
3605 					 * JRS 5/18/07 - If only CMT is on,
3606 					 * use the CMT version of
3607 					 * find_alt_net()
3608 					 */
3609 					/* sa_ignore NO_NULL_CHK */
3610 					alt = sctp_find_alternate_net(stcb, alt, 1);
3611 				}
3612 				if (alt == NULL) {
3613 					alt = tp1->whoTo;
3614 				}
3615 				/*
3616 				 * CUCv2: If a different dest is picked for
3617 				 * the retransmission, then new
3618 				 * (rtx-)pseudo_cumack needs to be tracked
3619 				 * for orig dest. Let CUCv2 track new (rtx-)
3620 				 * pseudo-cumack always.
3621 				 */
3622 				if (tp1->whoTo) {
3623 					tp1->whoTo->find_pseudo_cumack = 1;
3624 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3625 				}
3626 			} else {/* CMT is OFF */
3627 
3628 #ifdef SCTP_FR_TO_ALTERNATE
3629 				/* Can we find an alternate? */
3630 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3631 #else
3632 				/*
3633 				 * default behavior is to NOT retransmit
3634 				 * FR's to an alternate. Armando Caro's
3635 				 * paper details why.
3636 				 */
3637 				alt = tp1->whoTo;
3638 #endif
3639 			}
3640 
3641 			tp1->rec.data.doing_fast_retransmit = 1;
3642 			tot_retrans++;
3643 			/* mark the sending seq for possible subsequent FR's */
3644 			/*
3645 			 * printf("Marking TSN for FR new value %x\n",
3646 			 * (uint32_t)tpi->rec.data.TSN_seq);
3647 			 */
3648 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3649 				/*
3650 				 * If the queue of send is empty then its
3651 				 * the next sequence number that will be
3652 				 * assigned so we subtract one from this to
3653 				 * get the one we last sent.
3654 				 */
3655 				tp1->rec.data.fast_retran_tsn = sending_seq;
3656 			} else {
3657 				/*
3658 				 * If there are chunks on the send queue
3659 				 * (unsent data that has made it from the
3660 				 * stream queues but not out the door, we
3661 				 * take the first one (which will have the
3662 				 * lowest TSN) and subtract one to get the
3663 				 * one we last sent.
3664 				 */
3665 				struct sctp_tmit_chunk *ttt;
3666 
3667 				ttt = TAILQ_FIRST(&asoc->send_queue);
3668 				tp1->rec.data.fast_retran_tsn =
3669 				    ttt->rec.data.TSN_seq;
3670 			}
3671 
3672 			if (tp1->do_rtt) {
3673 				/*
3674 				 * this guy had a RTO calculation pending on
3675 				 * it, cancel it
3676 				 */
3677 				tp1->do_rtt = 0;
3678 			}
3679 			if (alt != tp1->whoTo) {
3680 				/* yes, there is an alternate. */
3681 				sctp_free_remote_addr(tp1->whoTo);
3682 				/* sa_ignore FREED_MEMORY */
3683 				tp1->whoTo = alt;
3684 				atomic_add_int(&alt->ref_count, 1);
3685 			}
3686 		}
3687 		tp1 = TAILQ_NEXT(tp1, sctp_next);
3688 	}			/* while (tp1) */
3689 
3690 	if (tot_retrans > 0) {
3691 		/*
3692 		 * Setup the ecn nonce re-sync point. We do this since once
3693 		 * we go to FR something we introduce a Karn's rule scenario
3694 		 * and won't know the totals for the ECN bits.
3695 		 */
3696 		asoc->nonce_resync_tsn = sending_seq;
3697 		asoc->nonce_wait_for_ecne = 0;
3698 		asoc->nonce_sum_check = 0;
3699 	}
3700 }
3701 
3702 struct sctp_tmit_chunk *
3703 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3704     struct sctp_association *asoc)
3705 {
3706 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3707 	struct timeval now;
3708 	int now_filled = 0;
3709 
3710 	if (asoc->peer_supports_prsctp == 0) {
3711 		return (NULL);
3712 	}
3713 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3714 	while (tp1) {
3715 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3716 		    tp1->sent != SCTP_DATAGRAM_RESEND) {
3717 			/* no chance to advance, out of here */
3718 			break;
3719 		}
3720 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3721 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3722 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3723 				    asoc->advanced_peer_ack_point,
3724 				    tp1->rec.data.TSN_seq, 0, 0);
3725 			}
3726 		}
3727 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3728 			/*
3729 			 * We can't fwd-tsn past any that are reliable aka
3730 			 * retransmitted until the asoc fails.
3731 			 */
3732 			break;
3733 		}
3734 		if (!now_filled) {
3735 			(void)SCTP_GETTIME_TIMEVAL(&now);
3736 			now_filled = 1;
3737 		}
3738 		tp2 = TAILQ_NEXT(tp1, sctp_next);
3739 		/*
3740 		 * now we got a chunk which is marked for another
3741 		 * retransmission to a PR-stream but has run out its chances
3742 		 * already maybe OR has been marked to skip now. Can we skip
3743 		 * it if its a resend?
3744 		 */
3745 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3746 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3747 			/*
3748 			 * Now is this one marked for resend and its time is
3749 			 * now up?
3750 			 */
3751 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3752 				/* Yes so drop it */
3753 				if (tp1->data) {
3754 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3755 					    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3756 					    SCTP_SO_NOT_LOCKED);
3757 				}
3758 			} else {
3759 				/*
3760 				 * No, we are done when hit one for resend
3761 				 * whos time as not expired.
3762 				 */
3763 				break;
3764 			}
3765 		}
3766 		/*
3767 		 * Ok now if this chunk is marked to drop it we can clean up
3768 		 * the chunk, advance our peer ack point and we can check
3769 		 * the next chunk.
3770 		 */
3771 		if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3772 			/* advance PeerAckPoint goes forward */
3773 			if (compare_with_wrap(tp1->rec.data.TSN_seq,
3774 			    asoc->advanced_peer_ack_point,
3775 			    MAX_TSN)) {
3776 
3777 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3778 				a_adv = tp1;
3779 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3780 				/* No update but we do save the chk */
3781 				a_adv = tp1;
3782 			}
3783 		} else {
3784 			/*
3785 			 * If it is still in RESEND we can advance no
3786 			 * further
3787 			 */
3788 			break;
3789 		}
3790 		/*
3791 		 * If we hit here we just dumped tp1, move to next tsn on
3792 		 * sent queue.
3793 		 */
3794 		tp1 = tp2;
3795 	}
3796 	return (a_adv);
3797 }
3798 
3799 static int
3800 sctp_fs_audit(struct sctp_association *asoc)
3801 {
3802 	struct sctp_tmit_chunk *chk;
3803 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3804 	int entry_flight, entry_cnt, ret;
3805 
3806 	entry_flight = asoc->total_flight;
3807 	entry_cnt = asoc->total_flight_count;
3808 	ret = 0;
3809 
3810 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3811 		return (0);
3812 
3813 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3814 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3815 			printf("Chk TSN:%u size:%d inflight cnt:%d\n",
3816 			    chk->rec.data.TSN_seq,
3817 			    chk->send_size,
3818 			    chk->snd_count
3819 			    );
3820 			inflight++;
3821 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3822 			resend++;
3823 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3824 			inbetween++;
3825 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3826 			above++;
3827 		} else {
3828 			acked++;
3829 		}
3830 	}
3831 
3832 	if ((inflight > 0) || (inbetween > 0)) {
3833 #ifdef INVARIANTS
3834 		panic("Flight size-express incorrect? \n");
3835 #else
3836 		printf("asoc->total_flight:%d cnt:%d\n",
3837 		    entry_flight, entry_cnt);
3838 
3839 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3840 		    inflight, inbetween, resend, above, acked);
3841 		ret = 1;
3842 #endif
3843 	}
3844 	return (ret);
3845 }
3846 
3847 
3848 static void
3849 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3850     struct sctp_association *asoc,
3851     struct sctp_nets *net,
3852     struct sctp_tmit_chunk *tp1)
3853 {
3854 	tp1->window_probe = 0;
3855 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3856 		/* TSN's skipped we do NOT move back. */
3857 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3858 		    tp1->whoTo->flight_size,
3859 		    tp1->book_size,
3860 		    (uintptr_t) tp1->whoTo,
3861 		    tp1->rec.data.TSN_seq);
3862 		return;
3863 	}
3864 	/* First setup this by shrinking flight */
3865 	sctp_flight_size_decrease(tp1);
3866 	sctp_total_flight_decrease(stcb, tp1);
3867 	/* Now mark for resend */
3868 	tp1->sent = SCTP_DATAGRAM_RESEND;
3869 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3870 
3871 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3872 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3873 		    tp1->whoTo->flight_size,
3874 		    tp1->book_size,
3875 		    (uintptr_t) tp1->whoTo,
3876 		    tp1->rec.data.TSN_seq);
3877 	}
3878 }
3879 
3880 void
3881 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3882     uint32_t rwnd, int nonce_sum_flag, int *abort_now)
3883 {
3884 	struct sctp_nets *net;
3885 	struct sctp_association *asoc;
3886 	struct sctp_tmit_chunk *tp1, *tp2;
3887 	uint32_t old_rwnd;
3888 	int win_probe_recovery = 0;
3889 	int win_probe_recovered = 0;
3890 	int j, done_once = 0;
3891 
3892 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3893 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3894 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3895 	}
3896 	SCTP_TCB_LOCK_ASSERT(stcb);
3897 #ifdef SCTP_ASOCLOG_OF_TSNS
3898 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3899 	stcb->asoc.cumack_log_at++;
3900 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3901 		stcb->asoc.cumack_log_at = 0;
3902 	}
3903 #endif
3904 	asoc = &stcb->asoc;
3905 	old_rwnd = asoc->peers_rwnd;
3906 	if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
3907 		/* old ack */
3908 		return;
3909 	} else if (asoc->last_acked_seq == cumack) {
3910 		/* Window update sack */
3911 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3912 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3913 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3914 			/* SWS sender side engages */
3915 			asoc->peers_rwnd = 0;
3916 		}
3917 		if (asoc->peers_rwnd > old_rwnd) {
3918 			goto again;
3919 		}
3920 		return;
3921 	}
3922 	/* First setup for CC stuff */
3923 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3924 		net->prev_cwnd = net->cwnd;
3925 		net->net_ack = 0;
3926 		net->net_ack2 = 0;
3927 
3928 		/*
3929 		 * CMT: Reset CUC and Fast recovery algo variables before
3930 		 * SACK processing
3931 		 */
3932 		net->new_pseudo_cumack = 0;
3933 		net->will_exit_fast_recovery = 0;
3934 	}
3935 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3936 		uint32_t send_s;
3937 
3938 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3939 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3940 			    sctpchunk_listhead);
3941 			send_s = tp1->rec.data.TSN_seq + 1;
3942 		} else {
3943 			send_s = asoc->sending_seq;
3944 		}
3945 		if ((cumack == send_s) ||
3946 		    compare_with_wrap(cumack, send_s, MAX_TSN)) {
3947 #ifndef INVARIANTS
3948 			struct mbuf *oper;
3949 
3950 #endif
3951 #ifdef INVARIANTS
3952 			panic("Impossible sack 1");
3953 #else
3954 
3955 			*abort_now = 1;
3956 			/* XXX */
3957 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3958 			    0, M_DONTWAIT, 1, MT_DATA);
3959 			if (oper) {
3960 				struct sctp_paramhdr *ph;
3961 				uint32_t *ippp;
3962 
3963 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3964 				    sizeof(uint32_t);
3965 				ph = mtod(oper, struct sctp_paramhdr *);
3966 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3967 				ph->param_length = htons(SCTP_BUF_LEN(oper));
3968 				ippp = (uint32_t *) (ph + 1);
3969 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3970 			}
3971 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3972 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
3973 			return;
3974 #endif
3975 		}
3976 	}
3977 	asoc->this_sack_highest_gap = cumack;
3978 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3979 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3980 		    stcb->asoc.overall_error_count,
3981 		    0,
3982 		    SCTP_FROM_SCTP_INDATA,
3983 		    __LINE__);
3984 	}
3985 	stcb->asoc.overall_error_count = 0;
3986 	if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
3987 		/* process the new consecutive TSN first */
3988 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
3989 		while (tp1) {
3990 			tp2 = TAILQ_NEXT(tp1, sctp_next);
3991 			if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
3992 			    MAX_TSN) ||
3993 			    cumack == tp1->rec.data.TSN_seq) {
3994 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3995 					printf("Warning, an unsent is now acked?\n");
3996 				}
3997 				/*
3998 				 * ECN Nonce: Add the nonce to the sender's
3999 				 * nonce sum
4000 				 */
4001 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4002 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4003 					/*
4004 					 * If it is less than ACKED, it is
4005 					 * now no-longer in flight. Higher
4006 					 * values may occur during marking
4007 					 */
4008 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4009 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4010 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4011 							    tp1->whoTo->flight_size,
4012 							    tp1->book_size,
4013 							    (uintptr_t) tp1->whoTo,
4014 							    tp1->rec.data.TSN_seq);
4015 						}
4016 						sctp_flight_size_decrease(tp1);
4017 						/* sa_ignore NO_NULL_CHK */
4018 						sctp_total_flight_decrease(stcb, tp1);
4019 					}
4020 					tp1->whoTo->net_ack += tp1->send_size;
4021 					if (tp1->snd_count < 2) {
4022 						/*
4023 						 * True non-retransmited
4024 						 * chunk
4025 						 */
4026 						tp1->whoTo->net_ack2 +=
4027 						    tp1->send_size;
4028 
4029 						/* update RTO too? */
4030 						if (tp1->do_rtt) {
4031 							tp1->whoTo->RTO =
4032 							/*
4033 							 * sa_ignore
4034 							 * NO_NULL_CHK
4035 							 */
4036 							    sctp_calculate_rto(stcb,
4037 							    asoc, tp1->whoTo,
4038 							    &tp1->sent_rcv_time,
4039 							    sctp_align_safe_nocopy);
4040 							tp1->do_rtt = 0;
4041 						}
4042 					}
4043 					/*
4044 					 * CMT: CUCv2 algorithm. From the
4045 					 * cumack'd TSNs, for each TSN being
4046 					 * acked for the first time, set the
4047 					 * following variables for the
4048 					 * corresp destination.
4049 					 * new_pseudo_cumack will trigger a
4050 					 * cwnd update.
4051 					 * find_(rtx_)pseudo_cumack will
4052 					 * trigger search for the next
4053 					 * expected (rtx-)pseudo-cumack.
4054 					 */
4055 					tp1->whoTo->new_pseudo_cumack = 1;
4056 					tp1->whoTo->find_pseudo_cumack = 1;
4057 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4058 
4059 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4060 						/* sa_ignore NO_NULL_CHK */
4061 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4062 					}
4063 				}
4064 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4065 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4066 				}
4067 				if (tp1->rec.data.chunk_was_revoked) {
4068 					/* deflate the cwnd */
4069 					tp1->whoTo->cwnd -= tp1->book_size;
4070 					tp1->rec.data.chunk_was_revoked = 0;
4071 				}
4072 				tp1->sent = SCTP_DATAGRAM_ACKED;
4073 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4074 				if (tp1->data) {
4075 					/* sa_ignore NO_NULL_CHK */
4076 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4077 					sctp_m_freem(tp1->data);
4078 				}
4079 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4080 					sctp_log_sack(asoc->last_acked_seq,
4081 					    cumack,
4082 					    tp1->rec.data.TSN_seq,
4083 					    0,
4084 					    0,
4085 					    SCTP_LOG_FREE_SENT);
4086 				}
4087 				tp1->data = NULL;
4088 				asoc->sent_queue_cnt--;
4089 				sctp_free_a_chunk(stcb, tp1);
4090 				tp1 = tp2;
4091 			} else {
4092 				break;
4093 			}
4094 		}
4095 
4096 	}
4097 	/* sa_ignore NO_NULL_CHK */
4098 	if (stcb->sctp_socket) {
4099 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4100 		struct socket *so;
4101 
4102 #endif
4103 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4104 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4105 			/* sa_ignore NO_NULL_CHK */
4106 			sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4107 		}
4108 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4109 		so = SCTP_INP_SO(stcb->sctp_ep);
4110 		atomic_add_int(&stcb->asoc.refcnt, 1);
4111 		SCTP_TCB_UNLOCK(stcb);
4112 		SCTP_SOCKET_LOCK(so, 1);
4113 		SCTP_TCB_LOCK(stcb);
4114 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4115 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4116 			/* assoc was freed while we were unlocked */
4117 			SCTP_SOCKET_UNLOCK(so, 1);
4118 			return;
4119 		}
4120 #endif
4121 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4122 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4123 		SCTP_SOCKET_UNLOCK(so, 1);
4124 #endif
4125 	} else {
4126 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4127 			sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4128 		}
4129 	}
4130 
4131 	/* JRS - Use the congestion control given in the CC module */
4132 	if (asoc->last_acked_seq != cumack)
4133 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4134 
4135 	asoc->last_acked_seq = cumack;
4136 
4137 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4138 		/* nothing left in-flight */
4139 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4140 			net->flight_size = 0;
4141 			net->partial_bytes_acked = 0;
4142 		}
4143 		asoc->total_flight = 0;
4144 		asoc->total_flight_count = 0;
4145 	}
4146 	/* ECN Nonce updates */
4147 	if (asoc->ecn_nonce_allowed) {
4148 		if (asoc->nonce_sum_check) {
4149 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4150 				if (asoc->nonce_wait_for_ecne == 0) {
4151 					struct sctp_tmit_chunk *lchk;
4152 
4153 					lchk = TAILQ_FIRST(&asoc->send_queue);
4154 					asoc->nonce_wait_for_ecne = 1;
4155 					if (lchk) {
4156 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4157 					} else {
4158 						asoc->nonce_wait_tsn = asoc->sending_seq;
4159 					}
4160 				} else {
4161 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4162 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4163 						/*
4164 						 * Misbehaving peer. We need
4165 						 * to react to this guy
4166 						 */
4167 						asoc->ecn_allowed = 0;
4168 						asoc->ecn_nonce_allowed = 0;
4169 					}
4170 				}
4171 			}
4172 		} else {
4173 			/* See if Resynchronization Possible */
4174 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4175 				asoc->nonce_sum_check = 1;
4176 				/*
4177 				 * Now we must calculate what the base is.
4178 				 * We do this based on two things, we know
4179 				 * the total's for all the segments
4180 				 * gap-acked in the SACK (none). We also
4181 				 * know the SACK's nonce sum, its in
4182 				 * nonce_sum_flag. So we can build a truth
4183 				 * table to back-calculate the new value of
4184 				 * asoc->nonce_sum_expect_base:
4185 				 *
4186 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
4187 				 * 1                    0 1 0 1 1 1
4188 				 * 1 0
4189 				 */
4190 				asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4191 			}
4192 		}
4193 	}
4194 	/* RWND update */
4195 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4196 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4197 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4198 		/* SWS sender side engages */
4199 		asoc->peers_rwnd = 0;
4200 	}
4201 	if (asoc->peers_rwnd > old_rwnd) {
4202 		win_probe_recovery = 1;
4203 	}
4204 	/* Now assure a timer where data is queued at */
4205 again:
4206 	j = 0;
4207 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4208 		int to_ticks;
4209 
4210 		if (win_probe_recovery && (net->window_probe)) {
4211 			win_probe_recovered = 1;
4212 			/*
4213 			 * Find first chunk that was used with window probe
4214 			 * and clear the sent
4215 			 */
4216 			/* sa_ignore FREED_MEMORY */
4217 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4218 				if (tp1->window_probe) {
4219 					/* move back to data send queue */
4220 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
4221 					break;
4222 				}
4223 			}
4224 		}
4225 		if (net->RTO == 0) {
4226 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4227 		} else {
4228 			to_ticks = MSEC_TO_TICKS(net->RTO);
4229 		}
4230 		if (net->flight_size) {
4231 			j++;
4232 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4233 			    sctp_timeout_handler, &net->rxt_timer);
4234 			if (net->window_probe) {
4235 				net->window_probe = 0;
4236 			}
4237 		} else {
4238 			if (net->window_probe) {
4239 				/*
4240 				 * In window probes we must assure a timer
4241 				 * is still running there
4242 				 */
4243 				net->window_probe = 0;
4244 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4245 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4246 					    sctp_timeout_handler, &net->rxt_timer);
4247 				}
4248 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4249 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4250 				    stcb, net,
4251 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4252 			}
4253 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4254 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4255 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4256 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4257 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4258 				}
4259 			}
4260 		}
4261 	}
4262 	if ((j == 0) &&
4263 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4264 	    (asoc->sent_queue_retran_cnt == 0) &&
4265 	    (win_probe_recovered == 0) &&
4266 	    (done_once == 0)) {
4267 		/*
4268 		 * huh, this should not happen unless all packets are
4269 		 * PR-SCTP and marked to skip of course.
4270 		 */
4271 		if (sctp_fs_audit(asoc)) {
4272 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4273 				net->flight_size = 0;
4274 			}
4275 			asoc->total_flight = 0;
4276 			asoc->total_flight_count = 0;
4277 			asoc->sent_queue_retran_cnt = 0;
4278 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4279 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4280 					sctp_flight_size_increase(tp1);
4281 					sctp_total_flight_increase(stcb, tp1);
4282 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4283 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4284 				}
4285 			}
4286 		}
4287 		done_once = 1;
4288 		goto again;
4289 	}
4290 	/**********************************/
4291 	/* Now what about shutdown issues */
4292 	/**********************************/
4293 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4294 		/* nothing left on sendqueue.. consider done */
4295 		/* clean up */
4296 		if ((asoc->stream_queue_cnt == 1) &&
4297 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4298 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4299 		    (asoc->locked_on_sending)
4300 		    ) {
4301 			struct sctp_stream_queue_pending *sp;
4302 
4303 			/*
4304 			 * I may be in a state where we got all across.. but
4305 			 * cannot write more due to a shutdown... we abort
4306 			 * since the user did not indicate EOR in this case.
4307 			 * The sp will be cleaned during free of the asoc.
4308 			 */
4309 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4310 			    sctp_streamhead);
4311 			if ((sp) && (sp->length == 0)) {
4312 				/* Let cleanup code purge it */
4313 				if (sp->msg_is_complete) {
4314 					asoc->stream_queue_cnt--;
4315 				} else {
4316 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4317 					asoc->locked_on_sending = NULL;
4318 					asoc->stream_queue_cnt--;
4319 				}
4320 			}
4321 		}
4322 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4323 		    (asoc->stream_queue_cnt == 0)) {
4324 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4325 				/* Need to abort here */
4326 				struct mbuf *oper;
4327 
4328 		abort_out_now:
4329 				*abort_now = 1;
4330 				/* XXX */
4331 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4332 				    0, M_DONTWAIT, 1, MT_DATA);
4333 				if (oper) {
4334 					struct sctp_paramhdr *ph;
4335 					uint32_t *ippp;
4336 
4337 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4338 					    sizeof(uint32_t);
4339 					ph = mtod(oper, struct sctp_paramhdr *);
4340 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4341 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4342 					ippp = (uint32_t *) (ph + 1);
4343 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4344 				}
4345 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4346 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4347 			} else {
4348 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4349 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4350 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4351 				}
4352 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4353 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4354 				sctp_stop_timers_for_shutdown(stcb);
4355 				sctp_send_shutdown(stcb,
4356 				    stcb->asoc.primary_destination);
4357 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4358 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4359 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4360 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4361 			}
4362 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4363 		    (asoc->stream_queue_cnt == 0)) {
4364 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4365 				goto abort_out_now;
4366 			}
4367 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4368 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4369 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4370 			sctp_send_shutdown_ack(stcb,
4371 			    stcb->asoc.primary_destination);
4372 
4373 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4374 			    stcb->sctp_ep, stcb, asoc->primary_destination);
4375 		}
4376 	}
4377 	/*********************************************/
4378 	/* Here we perform PR-SCTP procedures        */
4379 	/* (section 4.2)                             */
4380 	/*********************************************/
4381 	/* C1. update advancedPeerAckPoint */
4382 	if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4383 		asoc->advanced_peer_ack_point = cumack;
4384 	}
4385 	/* PR-Sctp issues need to be addressed too */
4386 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4387 		struct sctp_tmit_chunk *lchk;
4388 		uint32_t old_adv_peer_ack_point;
4389 
4390 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4391 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4392 		/* C3. See if we need to send a Fwd-TSN */
4393 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cumack,
4394 		    MAX_TSN)) {
4395 			/*
4396 			 * ISSUE with ECN, see FWD-TSN processing for notes
4397 			 * on issues that will occur when the ECN NONCE
4398 			 * stuff is put into SCTP for cross checking.
4399 			 */
4400 			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
4401 			    MAX_TSN)) {
4402 				send_forward_tsn(stcb, asoc);
4403 				/*
4404 				 * ECN Nonce: Disable Nonce Sum check when
4405 				 * FWD TSN is sent and store resync tsn
4406 				 */
4407 				asoc->nonce_sum_check = 0;
4408 				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4409 			} else if (lchk) {
4410 				/* try to FR fwd-tsn's that get lost too */
4411 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4412 					send_forward_tsn(stcb, asoc);
4413 				}
4414 			}
4415 		}
4416 		if (lchk) {
4417 			/* Assure a timer is up */
4418 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4419 			    stcb->sctp_ep, stcb, lchk->whoTo);
4420 		}
4421 	}
4422 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4423 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4424 		    rwnd,
4425 		    stcb->asoc.peers_rwnd,
4426 		    stcb->asoc.total_flight,
4427 		    stcb->asoc.total_output_queue_size);
4428 	}
4429 }
4430 
4431 void
4432 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4433     struct sctp_tcb *stcb, struct sctp_nets *net_from,
4434     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4435     int *abort_now, uint8_t flags,
4436     uint32_t cum_ack, uint32_t rwnd)
4437 {
4438 	struct sctp_association *asoc;
4439 	struct sctp_tmit_chunk *tp1, *tp2;
4440 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4441 	uint32_t sav_cum_ack;
4442 	uint16_t wake_him = 0;
4443 	uint32_t send_s = 0;
4444 	long j;
4445 	int accum_moved = 0;
4446 	int will_exit_fast_recovery = 0;
4447 	uint32_t a_rwnd, old_rwnd;
4448 	int win_probe_recovery = 0;
4449 	int win_probe_recovered = 0;
4450 	struct sctp_nets *net = NULL;
4451 	int nonce_sum_flag, ecn_seg_sums = 0;
4452 	int done_once;
4453 	uint8_t reneged_all = 0;
4454 	uint8_t cmt_dac_flag;
4455 
4456 	/*
4457 	 * we take any chance we can to service our queues since we cannot
4458 	 * get awoken when the socket is read from :<
4459 	 */
4460 	/*
4461 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4462 	 * old sack, if so discard. 2) If there is nothing left in the send
4463 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4464 	 * too, update any rwnd change and verify no timers are running.
4465 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4466 	 * moved process these first and note that it moved. 4) Process any
4467 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4468 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4469 	 * sync up flightsizes and things, stop all timers and also check
4470 	 * for shutdown_pending state. If so then go ahead and send off the
4471 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4472 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4473 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4474 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4475 	 * if in shutdown_recv state.
4476 	 */
4477 	SCTP_TCB_LOCK_ASSERT(stcb);
4478 	/* CMT DAC algo */
4479 	this_sack_lowest_newack = 0;
4480 	j = 0;
4481 	SCTP_STAT_INCR(sctps_slowpath_sack);
4482 	last_tsn = cum_ack;
4483 	nonce_sum_flag = flags & SCTP_SACK_NONCE_SUM;
4484 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4485 #ifdef SCTP_ASOCLOG_OF_TSNS
4486 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4487 	stcb->asoc.cumack_log_at++;
4488 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4489 		stcb->asoc.cumack_log_at = 0;
4490 	}
4491 #endif
4492 	a_rwnd = rwnd;
4493 
4494 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4495 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4496 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4497 	}
4498 	old_rwnd = stcb->asoc.peers_rwnd;
4499 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4500 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4501 		    stcb->asoc.overall_error_count,
4502 		    0,
4503 		    SCTP_FROM_SCTP_INDATA,
4504 		    __LINE__);
4505 	}
4506 	stcb->asoc.overall_error_count = 0;
4507 	asoc = &stcb->asoc;
4508 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4509 		sctp_log_sack(asoc->last_acked_seq,
4510 		    cum_ack,
4511 		    0,
4512 		    num_seg,
4513 		    num_dup,
4514 		    SCTP_LOG_NEW_SACK);
4515 	}
4516 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4517 		uint16_t i;
4518 		uint32_t *dupdata, dblock;
4519 
4520 		for (i = 0; i < num_dup; i++) {
4521 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4522 			    sizeof(uint32_t), (uint8_t *) & dblock);
4523 			if (dupdata == NULL) {
4524 				break;
4525 			}
4526 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4527 		}
4528 	}
4529 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4530 		/* reality check */
4531 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4532 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4533 			    sctpchunk_listhead);
4534 			send_s = tp1->rec.data.TSN_seq + 1;
4535 		} else {
4536 			tp1 = NULL;
4537 			send_s = asoc->sending_seq;
4538 		}
4539 		if (cum_ack == send_s ||
4540 		    compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4541 			struct mbuf *oper;
4542 
4543 			/*
4544 			 * no way, we have not even sent this TSN out yet.
4545 			 * Peer is hopelessly messed up with us.
4546 			 */
4547 			printf("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4548 			    cum_ack, send_s);
4549 			if (tp1) {
4550 				printf("Got send_s from tsn:%x + 1 of tp1:%p\n",
4551 				    tp1->rec.data.TSN_seq, tp1);
4552 			}
4553 	hopeless_peer:
4554 			*abort_now = 1;
4555 			/* XXX */
4556 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4557 			    0, M_DONTWAIT, 1, MT_DATA);
4558 			if (oper) {
4559 				struct sctp_paramhdr *ph;
4560 				uint32_t *ippp;
4561 
4562 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4563 				    sizeof(uint32_t);
4564 				ph = mtod(oper, struct sctp_paramhdr *);
4565 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4566 				ph->param_length = htons(SCTP_BUF_LEN(oper));
4567 				ippp = (uint32_t *) (ph + 1);
4568 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4569 			}
4570 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4571 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4572 			return;
4573 		}
4574 	}
4575 	/**********************/
4576 	/* 1) check the range */
4577 	/**********************/
4578 	if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4579 		/* acking something behind */
4580 		return;
4581 	}
4582 	sav_cum_ack = asoc->last_acked_seq;
4583 
4584 	/* update the Rwnd of the peer */
4585 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4586 	    TAILQ_EMPTY(&asoc->send_queue) &&
4587 	    (asoc->stream_queue_cnt == 0)) {
4588 		/* nothing left on send/sent and strmq */
4589 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4590 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4591 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4592 		}
4593 		asoc->peers_rwnd = a_rwnd;
4594 		if (asoc->sent_queue_retran_cnt) {
4595 			asoc->sent_queue_retran_cnt = 0;
4596 		}
4597 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4598 			/* SWS sender side engages */
4599 			asoc->peers_rwnd = 0;
4600 		}
4601 		/* stop any timers */
4602 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4603 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4604 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4605 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4606 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4607 					SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4608 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4609 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4610 				}
4611 			}
4612 			net->partial_bytes_acked = 0;
4613 			net->flight_size = 0;
4614 		}
4615 		asoc->total_flight = 0;
4616 		asoc->total_flight_count = 0;
4617 		return;
4618 	}
4619 	/*
4620 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4621 	 * things. The total byte count acked is tracked in netAckSz AND
4622 	 * netAck2 is used to track the total bytes acked that are un-
4623 	 * amibguious and were never retransmitted. We track these on a per
4624 	 * destination address basis.
4625 	 */
4626 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4627 		net->prev_cwnd = net->cwnd;
4628 		net->net_ack = 0;
4629 		net->net_ack2 = 0;
4630 
4631 		/*
4632 		 * CMT: Reset CUC and Fast recovery algo variables before
4633 		 * SACK processing
4634 		 */
4635 		net->new_pseudo_cumack = 0;
4636 		net->will_exit_fast_recovery = 0;
4637 	}
4638 	/* process the new consecutive TSN first */
4639 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4640 	while (tp1) {
4641 		if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
4642 		    MAX_TSN) ||
4643 		    last_tsn == tp1->rec.data.TSN_seq) {
4644 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4645 				/*
4646 				 * ECN Nonce: Add the nonce to the sender's
4647 				 * nonce sum
4648 				 */
4649 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4650 				accum_moved = 1;
4651 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4652 					/*
4653 					 * If it is less than ACKED, it is
4654 					 * now no-longer in flight. Higher
4655 					 * values may occur during marking
4656 					 */
4657 					if ((tp1->whoTo->dest_state &
4658 					    SCTP_ADDR_UNCONFIRMED) &&
4659 					    (tp1->snd_count < 2)) {
4660 						/*
4661 						 * If there was no retran
4662 						 * and the address is
4663 						 * un-confirmed and we sent
4664 						 * there and are now
4665 						 * sacked.. its confirmed,
4666 						 * mark it so.
4667 						 */
4668 						tp1->whoTo->dest_state &=
4669 						    ~SCTP_ADDR_UNCONFIRMED;
4670 					}
4671 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4672 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4673 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4674 							    tp1->whoTo->flight_size,
4675 							    tp1->book_size,
4676 							    (uintptr_t) tp1->whoTo,
4677 							    tp1->rec.data.TSN_seq);
4678 						}
4679 						sctp_flight_size_decrease(tp1);
4680 						sctp_total_flight_decrease(stcb, tp1);
4681 					}
4682 					tp1->whoTo->net_ack += tp1->send_size;
4683 
4684 					/* CMT SFR and DAC algos */
4685 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4686 					tp1->whoTo->saw_newack = 1;
4687 
4688 					if (tp1->snd_count < 2) {
4689 						/*
4690 						 * True non-retransmited
4691 						 * chunk
4692 						 */
4693 						tp1->whoTo->net_ack2 +=
4694 						    tp1->send_size;
4695 
4696 						/* update RTO too? */
4697 						if (tp1->do_rtt) {
4698 							tp1->whoTo->RTO =
4699 							    sctp_calculate_rto(stcb,
4700 							    asoc, tp1->whoTo,
4701 							    &tp1->sent_rcv_time,
4702 							    sctp_align_safe_nocopy);
4703 							tp1->do_rtt = 0;
4704 						}
4705 					}
4706 					/*
4707 					 * CMT: CUCv2 algorithm. From the
4708 					 * cumack'd TSNs, for each TSN being
4709 					 * acked for the first time, set the
4710 					 * following variables for the
4711 					 * corresp destination.
4712 					 * new_pseudo_cumack will trigger a
4713 					 * cwnd update.
4714 					 * find_(rtx_)pseudo_cumack will
4715 					 * trigger search for the next
4716 					 * expected (rtx-)pseudo-cumack.
4717 					 */
4718 					tp1->whoTo->new_pseudo_cumack = 1;
4719 					tp1->whoTo->find_pseudo_cumack = 1;
4720 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4721 
4722 
4723 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4724 						sctp_log_sack(asoc->last_acked_seq,
4725 						    cum_ack,
4726 						    tp1->rec.data.TSN_seq,
4727 						    0,
4728 						    0,
4729 						    SCTP_LOG_TSN_ACKED);
4730 					}
4731 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4732 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4733 					}
4734 				}
4735 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4736 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4737 #ifdef SCTP_AUDITING_ENABLED
4738 					sctp_audit_log(0xB3,
4739 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4740 #endif
4741 				}
4742 				if (tp1->rec.data.chunk_was_revoked) {
4743 					/* deflate the cwnd */
4744 					tp1->whoTo->cwnd -= tp1->book_size;
4745 					tp1->rec.data.chunk_was_revoked = 0;
4746 				}
4747 				tp1->sent = SCTP_DATAGRAM_ACKED;
4748 			}
4749 		} else {
4750 			break;
4751 		}
4752 		tp1 = TAILQ_NEXT(tp1, sctp_next);
4753 	}
4754 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4755 	/* always set this up to cum-ack */
4756 	asoc->this_sack_highest_gap = last_tsn;
4757 
4758 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4759 
4760 		/*
4761 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4762 		 * to be greater than the cumack. Also reset saw_newack to 0
4763 		 * for all dests.
4764 		 */
4765 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4766 			net->saw_newack = 0;
4767 			net->this_sack_highest_newack = last_tsn;
4768 		}
4769 
4770 		/*
4771 		 * thisSackHighestGap will increase while handling NEW
4772 		 * segments this_sack_highest_newack will increase while
4773 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4774 		 * used for CMT DAC algo. saw_newack will also change.
4775 		 */
4776 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4777 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4778 		    num_seg, num_nr_seg, &ecn_seg_sums)) {
4779 			wake_him++;
4780 		}
4781 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4782 			/*
4783 			 * validate the biggest_tsn_acked in the gap acks if
4784 			 * strict adherence is wanted.
4785 			 */
4786 			if ((biggest_tsn_acked == send_s) ||
4787 			    (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
4788 				/*
4789 				 * peer is either confused or we are under
4790 				 * attack. We must abort.
4791 				 */
4792 				printf("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4793 				    biggest_tsn_acked,
4794 				    send_s);
4795 
4796 				goto hopeless_peer;
4797 			}
4798 		}
4799 	}
4800 	/*******************************************/
4801 	/* cancel ALL T3-send timer if accum moved */
4802 	/*******************************************/
4803 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
4804 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4805 			if (net->new_pseudo_cumack)
4806 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4807 				    stcb, net,
4808 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4809 
4810 		}
4811 	} else {
4812 		if (accum_moved) {
4813 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4814 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4815 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4816 			}
4817 		}
4818 	}
4819 	/********************************************/
4820 	/* drop the acked chunks from the sendqueue */
4821 	/********************************************/
4822 	asoc->last_acked_seq = cum_ack;
4823 
4824 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4825 	if (tp1 == NULL)
4826 		goto done_with_it;
4827 	do {
4828 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
4829 		    MAX_TSN)) {
4830 			break;
4831 		}
4832 		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4833 			/* no more sent on list */
4834 			printf("Warning, tp1->sent == %d and its now acked?\n",
4835 			    tp1->sent);
4836 		}
4837 		tp2 = TAILQ_NEXT(tp1, sctp_next);
4838 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4839 		if (tp1->pr_sctp_on) {
4840 			if (asoc->pr_sctp_cnt != 0)
4841 				asoc->pr_sctp_cnt--;
4842 		}
4843 		if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
4844 		    (asoc->total_flight > 0)) {
4845 #ifdef INVARIANTS
4846 			panic("Warning flight size is postive and should be 0");
4847 #else
4848 			SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4849 			    asoc->total_flight);
4850 #endif
4851 			asoc->total_flight = 0;
4852 		}
4853 		if (tp1->data) {
4854 			/* sa_ignore NO_NULL_CHK */
4855 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4856 			sctp_m_freem(tp1->data);
4857 			if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4858 				asoc->sent_queue_cnt_removeable--;
4859 			}
4860 		}
4861 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4862 			sctp_log_sack(asoc->last_acked_seq,
4863 			    cum_ack,
4864 			    tp1->rec.data.TSN_seq,
4865 			    0,
4866 			    0,
4867 			    SCTP_LOG_FREE_SENT);
4868 		}
4869 		tp1->data = NULL;
4870 		asoc->sent_queue_cnt--;
4871 		sctp_free_a_chunk(stcb, tp1);
4872 		wake_him++;
4873 		tp1 = tp2;
4874 	} while (tp1 != NULL);
4875 
4876 done_with_it:
4877 	/* sa_ignore NO_NULL_CHK */
4878 	if ((wake_him) && (stcb->sctp_socket)) {
4879 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4880 		struct socket *so;
4881 
4882 #endif
4883 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4884 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4885 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
4886 		}
4887 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4888 		so = SCTP_INP_SO(stcb->sctp_ep);
4889 		atomic_add_int(&stcb->asoc.refcnt, 1);
4890 		SCTP_TCB_UNLOCK(stcb);
4891 		SCTP_SOCKET_LOCK(so, 1);
4892 		SCTP_TCB_LOCK(stcb);
4893 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4894 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4895 			/* assoc was freed while we were unlocked */
4896 			SCTP_SOCKET_UNLOCK(so, 1);
4897 			return;
4898 		}
4899 #endif
4900 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4901 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4902 		SCTP_SOCKET_UNLOCK(so, 1);
4903 #endif
4904 	} else {
4905 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4906 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
4907 		}
4908 	}
4909 
4910 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4911 		if (compare_with_wrap(asoc->last_acked_seq,
4912 		    asoc->fast_recovery_tsn, MAX_TSN) ||
4913 		    asoc->last_acked_seq == asoc->fast_recovery_tsn) {
4914 			/* Setup so we will exit RFC2582 fast recovery */
4915 			will_exit_fast_recovery = 1;
4916 		}
4917 	}
4918 	/*
4919 	 * Check for revoked fragments:
4920 	 *
4921 	 * if Previous sack - Had no frags then we can't have any revoked if
4922 	 * Previous sack - Had frag's then - If we now have frags aka
4923 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4924 	 * some of them. else - The peer revoked all ACKED fragments, since
4925 	 * we had some before and now we have NONE.
4926 	 */
4927 
4928 	if (num_seg)
4929 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4930 	else if (asoc->saw_sack_with_frags) {
4931 		int cnt_revoked = 0;
4932 
4933 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
4934 		if (tp1 != NULL) {
4935 			/* Peer revoked all dg's marked or acked */
4936 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4937 				if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4938 					tp1->sent = SCTP_DATAGRAM_SENT;
4939 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4940 						sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4941 						    tp1->whoTo->flight_size,
4942 						    tp1->book_size,
4943 						    (uintptr_t) tp1->whoTo,
4944 						    tp1->rec.data.TSN_seq);
4945 					}
4946 					sctp_flight_size_increase(tp1);
4947 					sctp_total_flight_increase(stcb, tp1);
4948 					tp1->rec.data.chunk_was_revoked = 1;
4949 					/*
4950 					 * To ensure that this increase in
4951 					 * flightsize, which is artificial,
4952 					 * does not throttle the sender, we
4953 					 * also increase the cwnd
4954 					 * artificially.
4955 					 */
4956 					tp1->whoTo->cwnd += tp1->book_size;
4957 					cnt_revoked++;
4958 				}
4959 			}
4960 			if (cnt_revoked) {
4961 				reneged_all = 1;
4962 			}
4963 		}
4964 		asoc->saw_sack_with_frags = 0;
4965 	}
4966 	if (num_seg || num_nr_seg)
4967 		asoc->saw_sack_with_frags = 1;
4968 	else
4969 		asoc->saw_sack_with_frags = 0;
4970 
4971 	/* JRS - Use the congestion control given in the CC module */
4972 	asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4973 
4974 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4975 		/* nothing left in-flight */
4976 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4977 			/* stop all timers */
4978 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4979 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4980 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4981 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4982 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4983 				}
4984 			}
4985 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4986 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4987 			net->flight_size = 0;
4988 			net->partial_bytes_acked = 0;
4989 		}
4990 		asoc->total_flight = 0;
4991 		asoc->total_flight_count = 0;
4992 	}
4993 	/**********************************/
4994 	/* Now what about shutdown issues */
4995 	/**********************************/
4996 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4997 		/* nothing left on sendqueue.. consider done */
4998 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4999 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5000 			    asoc->peers_rwnd, 0, 0, a_rwnd);
5001 		}
5002 		asoc->peers_rwnd = a_rwnd;
5003 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5004 			/* SWS sender side engages */
5005 			asoc->peers_rwnd = 0;
5006 		}
5007 		/* clean up */
5008 		if ((asoc->stream_queue_cnt == 1) &&
5009 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5010 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5011 		    (asoc->locked_on_sending)
5012 		    ) {
5013 			struct sctp_stream_queue_pending *sp;
5014 
5015 			/*
5016 			 * I may be in a state where we got all across.. but
5017 			 * cannot write more due to a shutdown... we abort
5018 			 * since the user did not indicate EOR in this case.
5019 			 */
5020 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
5021 			    sctp_streamhead);
5022 			if ((sp) && (sp->length == 0)) {
5023 				asoc->locked_on_sending = NULL;
5024 				if (sp->msg_is_complete) {
5025 					asoc->stream_queue_cnt--;
5026 				} else {
5027 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5028 					asoc->stream_queue_cnt--;
5029 				}
5030 			}
5031 		}
5032 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5033 		    (asoc->stream_queue_cnt == 0)) {
5034 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5035 				/* Need to abort here */
5036 				struct mbuf *oper;
5037 
5038 		abort_out_now:
5039 				*abort_now = 1;
5040 				/* XXX */
5041 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
5042 				    0, M_DONTWAIT, 1, MT_DATA);
5043 				if (oper) {
5044 					struct sctp_paramhdr *ph;
5045 					uint32_t *ippp;
5046 
5047 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5048 					    sizeof(uint32_t);
5049 					ph = mtod(oper, struct sctp_paramhdr *);
5050 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
5051 					ph->param_length = htons(SCTP_BUF_LEN(oper));
5052 					ippp = (uint32_t *) (ph + 1);
5053 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
5054 				}
5055 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
5056 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
5057 				return;
5058 			} else {
5059 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5060 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5061 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5062 				}
5063 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5064 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5065 				sctp_stop_timers_for_shutdown(stcb);
5066 				sctp_send_shutdown(stcb,
5067 				    stcb->asoc.primary_destination);
5068 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5069 				    stcb->sctp_ep, stcb, asoc->primary_destination);
5070 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5071 				    stcb->sctp_ep, stcb, asoc->primary_destination);
5072 			}
5073 			return;
5074 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5075 		    (asoc->stream_queue_cnt == 0)) {
5076 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5077 				goto abort_out_now;
5078 			}
5079 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5080 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5081 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5082 			sctp_send_shutdown_ack(stcb,
5083 			    stcb->asoc.primary_destination);
5084 
5085 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5086 			    stcb->sctp_ep, stcb, asoc->primary_destination);
5087 			return;
5088 		}
5089 	}
5090 	/*
5091 	 * Now here we are going to recycle net_ack for a different use...
5092 	 * HEADS UP.
5093 	 */
5094 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5095 		net->net_ack = 0;
5096 	}
5097 
5098 	/*
5099 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5100 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5101 	 * automatically ensure that.
5102 	 */
5103 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
5104 		this_sack_lowest_newack = cum_ack;
5105 	}
5106 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5107 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5108 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5109 	}
5110 	/* JRS - Use the congestion control given in the CC module */
5111 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5112 
5113 	/******************************************************************
5114 	 *  Here we do the stuff with ECN Nonce checking.
5115 	 *  We basically check to see if the nonce sum flag was incorrect
5116 	 *  or if resynchronization needs to be done. Also if we catch a
5117 	 *  misbehaving receiver we give him the kick.
5118 	 ******************************************************************/
5119 
5120 	if (asoc->ecn_nonce_allowed) {
5121 		if (asoc->nonce_sum_check) {
5122 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5123 				if (asoc->nonce_wait_for_ecne == 0) {
5124 					struct sctp_tmit_chunk *lchk;
5125 
5126 					lchk = TAILQ_FIRST(&asoc->send_queue);
5127 					asoc->nonce_wait_for_ecne = 1;
5128 					if (lchk) {
5129 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5130 					} else {
5131 						asoc->nonce_wait_tsn = asoc->sending_seq;
5132 					}
5133 				} else {
5134 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5135 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5136 						/*
5137 						 * Misbehaving peer. We need
5138 						 * to react to this guy
5139 						 */
5140 						asoc->ecn_allowed = 0;
5141 						asoc->ecn_nonce_allowed = 0;
5142 					}
5143 				}
5144 			}
5145 		} else {
5146 			/* See if Resynchronization Possible */
5147 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5148 				asoc->nonce_sum_check = 1;
5149 				/*
5150 				 * now we must calculate what the base is.
5151 				 * We do this based on two things, we know
5152 				 * the total's for all the segments
5153 				 * gap-acked in the SACK, its stored in
5154 				 * ecn_seg_sums. We also know the SACK's
5155 				 * nonce sum, its in nonce_sum_flag. So we
5156 				 * can build a truth table to back-calculate
5157 				 * the new value of
5158 				 * asoc->nonce_sum_expect_base:
5159 				 *
5160 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
5161 				 * 1                    0 1 0 1 1 1
5162 				 * 1 0
5163 				 */
5164 				asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5165 			}
5166 		}
5167 	}
5168 	/* Now are we exiting loss recovery ? */
5169 	if (will_exit_fast_recovery) {
5170 		/* Ok, we must exit fast recovery */
5171 		asoc->fast_retran_loss_recovery = 0;
5172 	}
5173 	if ((asoc->sat_t3_loss_recovery) &&
5174 	    ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5175 	    MAX_TSN) ||
5176 	    (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5177 		/* end satellite t3 loss recovery */
5178 		asoc->sat_t3_loss_recovery = 0;
5179 	}
5180 	/*
5181 	 * CMT Fast recovery
5182 	 */
5183 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5184 		if (net->will_exit_fast_recovery) {
5185 			/* Ok, we must exit fast recovery */
5186 			net->fast_retran_loss_recovery = 0;
5187 		}
5188 	}
5189 
5190 	/* Adjust and set the new rwnd value */
5191 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5192 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5193 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5194 	}
5195 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5196 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5197 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5198 		/* SWS sender side engages */
5199 		asoc->peers_rwnd = 0;
5200 	}
5201 	if (asoc->peers_rwnd > old_rwnd) {
5202 		win_probe_recovery = 1;
5203 	}
5204 	/*
5205 	 * Now we must setup so we have a timer up for anyone with
5206 	 * outstanding data.
5207 	 */
5208 	done_once = 0;
5209 again:
5210 	j = 0;
5211 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5212 		if (win_probe_recovery && (net->window_probe)) {
5213 			win_probe_recovered = 1;
5214 			/*-
5215 			 * Find first chunk that was used with
5216 			 * window probe and clear the event. Put
5217 			 * it back into the send queue as if has
5218 			 * not been sent.
5219 			 */
5220 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5221 				if (tp1->window_probe) {
5222 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
5223 					break;
5224 				}
5225 			}
5226 		}
5227 		if (net->flight_size) {
5228 			j++;
5229 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5230 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5231 				    stcb->sctp_ep, stcb, net);
5232 			}
5233 			if (net->window_probe) {
5234 				net->window_probe = 0;
5235 			}
5236 		} else {
5237 			if (net->window_probe) {
5238 				/*
5239 				 * In window probes we must assure a timer
5240 				 * is still running there
5241 				 */
5242 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5243 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5244 					    stcb->sctp_ep, stcb, net);
5245 
5246 				}
5247 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5248 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5249 				    stcb, net,
5250 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5251 			}
5252 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5253 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5254 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5255 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5256 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5257 				}
5258 			}
5259 		}
5260 	}
5261 	if ((j == 0) &&
5262 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5263 	    (asoc->sent_queue_retran_cnt == 0) &&
5264 	    (win_probe_recovered == 0) &&
5265 	    (done_once == 0)) {
5266 		/*
5267 		 * huh, this should not happen unless all packets are
5268 		 * PR-SCTP and marked to skip of course.
5269 		 */
5270 		if (sctp_fs_audit(asoc)) {
5271 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5272 				net->flight_size = 0;
5273 			}
5274 			asoc->total_flight = 0;
5275 			asoc->total_flight_count = 0;
5276 			asoc->sent_queue_retran_cnt = 0;
5277 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5278 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5279 					sctp_flight_size_increase(tp1);
5280 					sctp_total_flight_increase(stcb, tp1);
5281 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5282 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5283 				}
5284 			}
5285 		}
5286 		done_once = 1;
5287 		goto again;
5288 	}
5289 	/*********************************************/
5290 	/* Here we perform PR-SCTP procedures        */
5291 	/* (section 4.2)                             */
5292 	/*********************************************/
5293 	/* C1. update advancedPeerAckPoint */
5294 	if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5295 		asoc->advanced_peer_ack_point = cum_ack;
5296 	}
5297 	/* C2. try to further move advancedPeerAckPoint ahead */
5298 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5299 		struct sctp_tmit_chunk *lchk;
5300 		uint32_t old_adv_peer_ack_point;
5301 
5302 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5303 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5304 		/* C3. See if we need to send a Fwd-TSN */
5305 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5306 		    MAX_TSN)) {
5307 			/*
5308 			 * ISSUE with ECN, see FWD-TSN processing for notes
5309 			 * on issues that will occur when the ECN NONCE
5310 			 * stuff is put into SCTP for cross checking.
5311 			 */
5312 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5313 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5314 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5315 				    old_adv_peer_ack_point);
5316 			}
5317 			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
5318 			    MAX_TSN)) {
5319 
5320 				send_forward_tsn(stcb, asoc);
5321 				/*
5322 				 * ECN Nonce: Disable Nonce Sum check when
5323 				 * FWD TSN is sent and store resync tsn
5324 				 */
5325 				asoc->nonce_sum_check = 0;
5326 				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5327 			} else if (lchk) {
5328 				/* try to FR fwd-tsn's that get lost too */
5329 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5330 					send_forward_tsn(stcb, asoc);
5331 				}
5332 			}
5333 		}
5334 		if (lchk) {
5335 			/* Assure a timer is up */
5336 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5337 			    stcb->sctp_ep, stcb, lchk->whoTo);
5338 		}
5339 	}
5340 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5341 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5342 		    a_rwnd,
5343 		    stcb->asoc.peers_rwnd,
5344 		    stcb->asoc.total_flight,
5345 		    stcb->asoc.total_output_queue_size);
5346 	}
5347 }
5348 
5349 void
5350 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5351     struct sctp_nets *netp, int *abort_flag)
5352 {
5353 	/* Copy cum-ack */
5354 	uint32_t cum_ack, a_rwnd;
5355 
5356 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5357 	/* Arrange so a_rwnd does NOT change */
5358 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5359 
5360 	/* Now call the express sack handling */
5361 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5362 }
5363 
5364 static void
5365 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5366     struct sctp_stream_in *strmin)
5367 {
5368 	struct sctp_queued_to_read *ctl, *nctl;
5369 	struct sctp_association *asoc;
5370 	uint16_t tt;
5371 
5372 	asoc = &stcb->asoc;
5373 	tt = strmin->last_sequence_delivered;
5374 	/*
5375 	 * First deliver anything prior to and including the stream no that
5376 	 * came in
5377 	 */
5378 	ctl = TAILQ_FIRST(&strmin->inqueue);
5379 	while (ctl) {
5380 		nctl = TAILQ_NEXT(ctl, next);
5381 		if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5382 		    (tt == ctl->sinfo_ssn)) {
5383 			/* this is deliverable now */
5384 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5385 			/* subtract pending on streams */
5386 			asoc->size_on_all_streams -= ctl->length;
5387 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5388 			/* deliver it to at least the delivery-q */
5389 			if (stcb->sctp_socket) {
5390 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5391 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5392 				    ctl,
5393 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5394 			}
5395 		} else {
5396 			/* no more delivery now. */
5397 			break;
5398 		}
5399 		ctl = nctl;
5400 	}
5401 	/*
5402 	 * now we must deliver things in queue the normal way  if any are
5403 	 * now ready.
5404 	 */
5405 	tt = strmin->last_sequence_delivered + 1;
5406 	ctl = TAILQ_FIRST(&strmin->inqueue);
5407 	while (ctl) {
5408 		nctl = TAILQ_NEXT(ctl, next);
5409 		if (tt == ctl->sinfo_ssn) {
5410 			/* this is deliverable now */
5411 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5412 			/* subtract pending on streams */
5413 			asoc->size_on_all_streams -= ctl->length;
5414 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5415 			/* deliver it to at least the delivery-q */
5416 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5417 			if (stcb->sctp_socket) {
5418 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5419 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5420 				    ctl,
5421 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5422 
5423 			}
5424 			tt = strmin->last_sequence_delivered + 1;
5425 		} else {
5426 			break;
5427 		}
5428 		ctl = nctl;
5429 	}
5430 }
5431 
5432 static void
5433 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5434     struct sctp_association *asoc,
5435     uint16_t stream, uint16_t seq)
5436 {
5437 	struct sctp_tmit_chunk *chk, *at;
5438 
5439 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5440 		/* For each one on here see if we need to toss it */
5441 		/*
5442 		 * For now large messages held on the reasmqueue that are
5443 		 * complete will be tossed too. We could in theory do more
5444 		 * work to spin through and stop after dumping one msg aka
5445 		 * seeing the start of a new msg at the head, and call the
5446 		 * delivery function... to see if it can be delivered... But
5447 		 * for now we just dump everything on the queue.
5448 		 */
5449 		chk = TAILQ_FIRST(&asoc->reasmqueue);
5450 		while (chk) {
5451 			at = TAILQ_NEXT(chk, sctp_next);
5452 			/*
5453 			 * Do not toss it if on a different stream or marked
5454 			 * for unordered delivery in which case the stream
5455 			 * sequence number has no meaning.
5456 			 */
5457 			if ((chk->rec.data.stream_number != stream) ||
5458 			    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5459 				chk = at;
5460 				continue;
5461 			}
5462 			if (chk->rec.data.stream_seq == seq) {
5463 				/* It needs to be tossed */
5464 				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5465 				if (compare_with_wrap(chk->rec.data.TSN_seq,
5466 				    asoc->tsn_last_delivered, MAX_TSN)) {
5467 					asoc->tsn_last_delivered =
5468 					    chk->rec.data.TSN_seq;
5469 					asoc->str_of_pdapi =
5470 					    chk->rec.data.stream_number;
5471 					asoc->ssn_of_pdapi =
5472 					    chk->rec.data.stream_seq;
5473 					asoc->fragment_flags =
5474 					    chk->rec.data.rcv_flags;
5475 				}
5476 				asoc->size_on_reasm_queue -= chk->send_size;
5477 				sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5478 
5479 				/* Clear up any stream problem */
5480 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5481 				    SCTP_DATA_UNORDERED &&
5482 				    (compare_with_wrap(chk->rec.data.stream_seq,
5483 				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5484 				    MAX_SEQ))) {
5485 					/*
5486 					 * We must dump forward this streams
5487 					 * sequence number if the chunk is
5488 					 * not unordered that is being
5489 					 * skipped. There is a chance that
5490 					 * if the peer does not include the
5491 					 * last fragment in its FWD-TSN we
5492 					 * WILL have a problem here since
5493 					 * you would have a partial chunk in
5494 					 * queue that may not be
5495 					 * deliverable. Also if a Partial
5496 					 * delivery API as started the user
5497 					 * may get a partial chunk. The next
5498 					 * read returning a new chunk...
5499 					 * really ugly but I see no way
5500 					 * around it! Maybe a notify??
5501 					 */
5502 					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5503 					    chk->rec.data.stream_seq;
5504 				}
5505 				if (chk->data) {
5506 					sctp_m_freem(chk->data);
5507 					chk->data = NULL;
5508 				}
5509 				sctp_free_a_chunk(stcb, chk);
5510 			} else if (compare_with_wrap(chk->rec.data.stream_seq, seq, MAX_SEQ)) {
5511 				/*
5512 				 * If the stream_seq is > than the purging
5513 				 * one, we are done
5514 				 */
5515 				break;
5516 			}
5517 			chk = at;
5518 		}
5519 	}
5520 }
5521 
5522 
5523 void
5524 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5525     struct sctp_forward_tsn_chunk *fwd,
5526     int *abort_flag, struct mbuf *m, int offset)
5527 {
5528 	/*
5529 	 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5530 	 * forward TSN, when the SACK comes back that acknowledges the
5531 	 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5532 	 * get quite tricky since we may have sent more data interveneing
5533 	 * and must carefully account for what the SACK says on the nonce
5534 	 * and any gaps that are reported. This work will NOT be done here,
5535 	 * but I note it here since it is really related to PR-SCTP and
5536 	 * FWD-TSN's
5537 	 */
5538 
5539 	/* The pr-sctp fwd tsn */
5540 	/*
5541 	 * here we will perform all the data receiver side steps for
5542 	 * processing FwdTSN, as required in by pr-sctp draft:
5543 	 *
5544 	 * Assume we get FwdTSN(x):
5545 	 *
5546 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5547 	 * others we have 3) examine and update re-ordering queue on
5548 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5549 	 * report where we are.
5550 	 */
5551 	struct sctp_association *asoc;
5552 	uint32_t new_cum_tsn, gap;
5553 	unsigned int i, fwd_sz, cumack_set_flag, m_size;
5554 	uint32_t str_seq;
5555 	struct sctp_stream_in *strm;
5556 	struct sctp_tmit_chunk *chk, *at;
5557 	struct sctp_queued_to_read *ctl, *sv;
5558 
5559 	cumack_set_flag = 0;
5560 	asoc = &stcb->asoc;
5561 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5562 		SCTPDBG(SCTP_DEBUG_INDATA1,
5563 		    "Bad size too small/big fwd-tsn\n");
5564 		return;
5565 	}
5566 	m_size = (stcb->asoc.mapping_array_size << 3);
5567 	/*************************************************************/
5568 	/* 1. Here we update local cumTSN and shift the bitmap array */
5569 	/*************************************************************/
5570 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5571 
5572 	if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
5573 	    asoc->cumulative_tsn == new_cum_tsn) {
5574 		/* Already got there ... */
5575 		return;
5576 	}
5577 	/*
5578 	 * now we know the new TSN is more advanced, let's find the actual
5579 	 * gap
5580 	 */
5581 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5582 	asoc->cumulative_tsn = new_cum_tsn;
5583 	if (gap >= m_size) {
5584 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5585 			struct mbuf *oper;
5586 
5587 			/*
5588 			 * out of range (of single byte chunks in the rwnd I
5589 			 * give out). This must be an attacker.
5590 			 */
5591 			*abort_flag = 1;
5592 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5593 			    0, M_DONTWAIT, 1, MT_DATA);
5594 			if (oper) {
5595 				struct sctp_paramhdr *ph;
5596 				uint32_t *ippp;
5597 
5598 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5599 				    (sizeof(uint32_t) * 3);
5600 				ph = mtod(oper, struct sctp_paramhdr *);
5601 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5602 				ph->param_length = htons(SCTP_BUF_LEN(oper));
5603 				ippp = (uint32_t *) (ph + 1);
5604 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5605 				ippp++;
5606 				*ippp = asoc->highest_tsn_inside_map;
5607 				ippp++;
5608 				*ippp = new_cum_tsn;
5609 			}
5610 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5611 			sctp_abort_an_association(stcb->sctp_ep, stcb,
5612 			    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
5613 			return;
5614 		}
5615 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5616 
5617 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5618 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5619 		asoc->highest_tsn_inside_map = new_cum_tsn;
5620 
5621 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5622 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5623 
5624 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5625 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5626 		}
5627 		asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
5628 	} else {
5629 		SCTP_TCB_LOCK_ASSERT(stcb);
5630 		for (i = 0; i <= gap; i++) {
5631 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5632 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5633 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5634 				if (compare_with_wrap(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
5635 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5636 				}
5637 			}
5638 		}
5639 	}
5640 	/*************************************************************/
5641 	/* 2. Clear up re-assembly queue                             */
5642 	/*************************************************************/
5643 	/*
5644 	 * First service it if pd-api is up, just in case we can progress it
5645 	 * forward
5646 	 */
5647 	if (asoc->fragmented_delivery_inprogress) {
5648 		sctp_service_reassembly(stcb, asoc);
5649 	}
5650 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5651 		/* For each one on here see if we need to toss it */
5652 		/*
5653 		 * For now large messages held on the reasmqueue that are
5654 		 * complete will be tossed too. We could in theory do more
5655 		 * work to spin through and stop after dumping one msg aka
5656 		 * seeing the start of a new msg at the head, and call the
5657 		 * delivery function... to see if it can be delivered... But
5658 		 * for now we just dump everything on the queue.
5659 		 */
5660 		chk = TAILQ_FIRST(&asoc->reasmqueue);
5661 		while (chk) {
5662 			at = TAILQ_NEXT(chk, sctp_next);
5663 			if ((compare_with_wrap(new_cum_tsn,
5664 			    chk->rec.data.TSN_seq, MAX_TSN)) ||
5665 			    (new_cum_tsn == chk->rec.data.TSN_seq)) {
5666 				/* It needs to be tossed */
5667 				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5668 				if (compare_with_wrap(chk->rec.data.TSN_seq,
5669 				    asoc->tsn_last_delivered, MAX_TSN)) {
5670 					asoc->tsn_last_delivered =
5671 					    chk->rec.data.TSN_seq;
5672 					asoc->str_of_pdapi =
5673 					    chk->rec.data.stream_number;
5674 					asoc->ssn_of_pdapi =
5675 					    chk->rec.data.stream_seq;
5676 					asoc->fragment_flags =
5677 					    chk->rec.data.rcv_flags;
5678 				}
5679 				asoc->size_on_reasm_queue -= chk->send_size;
5680 				sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5681 
5682 				/* Clear up any stream problem */
5683 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5684 				    SCTP_DATA_UNORDERED &&
5685 				    (compare_with_wrap(chk->rec.data.stream_seq,
5686 				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5687 				    MAX_SEQ))) {
5688 					/*
5689 					 * We must dump forward this streams
5690 					 * sequence number if the chunk is
5691 					 * not unordered that is being
5692 					 * skipped. There is a chance that
5693 					 * if the peer does not include the
5694 					 * last fragment in its FWD-TSN we
5695 					 * WILL have a problem here since
5696 					 * you would have a partial chunk in
5697 					 * queue that may not be
5698 					 * deliverable. Also if a Partial
5699 					 * delivery API as started the user
5700 					 * may get a partial chunk. The next
5701 					 * read returning a new chunk...
5702 					 * really ugly but I see no way
5703 					 * around it! Maybe a notify??
5704 					 */
5705 					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5706 					    chk->rec.data.stream_seq;
5707 				}
5708 				if (chk->data) {
5709 					sctp_m_freem(chk->data);
5710 					chk->data = NULL;
5711 				}
5712 				sctp_free_a_chunk(stcb, chk);
5713 			} else {
5714 				/*
5715 				 * Ok we have gone beyond the end of the
5716 				 * fwd-tsn's mark.
5717 				 */
5718 				break;
5719 			}
5720 			chk = at;
5721 		}
5722 	}
5723 	/*******************************************************/
5724 	/* 3. Update the PR-stream re-ordering queues and fix  */
5725 	/* delivery issues as needed.                       */
5726 	/*******************************************************/
5727 	fwd_sz -= sizeof(*fwd);
5728 	if (m && fwd_sz) {
5729 		/* New method. */
5730 		unsigned int num_str;
5731 		struct sctp_strseq *stseq, strseqbuf;
5732 
5733 		offset += sizeof(*fwd);
5734 
5735 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5736 		num_str = fwd_sz / sizeof(struct sctp_strseq);
5737 		for (i = 0; i < num_str; i++) {
5738 			uint16_t st;
5739 
5740 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5741 			    sizeof(struct sctp_strseq),
5742 			    (uint8_t *) & strseqbuf);
5743 			offset += sizeof(struct sctp_strseq);
5744 			if (stseq == NULL) {
5745 				break;
5746 			}
5747 			/* Convert */
5748 			st = ntohs(stseq->stream);
5749 			stseq->stream = st;
5750 			st = ntohs(stseq->sequence);
5751 			stseq->sequence = st;
5752 
5753 			/* now process */
5754 
5755 			/*
5756 			 * Ok we now look for the stream/seq on the read
5757 			 * queue where its not all delivered. If we find it
5758 			 * we transmute the read entry into a PDI_ABORTED.
5759 			 */
5760 			if (stseq->stream >= asoc->streamincnt) {
5761 				/* screwed up streams, stop!  */
5762 				break;
5763 			}
5764 			if ((asoc->str_of_pdapi == stseq->stream) &&
5765 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5766 				/*
5767 				 * If this is the one we were partially
5768 				 * delivering now then we no longer are.
5769 				 * Note this will change with the reassembly
5770 				 * re-write.
5771 				 */
5772 				asoc->fragmented_delivery_inprogress = 0;
5773 			}
5774 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5775 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5776 				if ((ctl->sinfo_stream == stseq->stream) &&
5777 				    (ctl->sinfo_ssn == stseq->sequence)) {
5778 					str_seq = (stseq->stream << 16) | stseq->sequence;
5779 					ctl->end_added = 1;
5780 					ctl->pdapi_aborted = 1;
5781 					sv = stcb->asoc.control_pdapi;
5782 					stcb->asoc.control_pdapi = ctl;
5783 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5784 					    stcb,
5785 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5786 					    (void *)&str_seq,
5787 					    SCTP_SO_NOT_LOCKED);
5788 					stcb->asoc.control_pdapi = sv;
5789 					break;
5790 				} else if ((ctl->sinfo_stream == stseq->stream) &&
5791 				    (compare_with_wrap(ctl->sinfo_ssn, stseq->sequence, MAX_SEQ))) {
5792 					/* We are past our victim SSN */
5793 					break;
5794 				}
5795 			}
5796 			strm = &asoc->strmin[stseq->stream];
5797 			if (compare_with_wrap(stseq->sequence,
5798 			    strm->last_sequence_delivered, MAX_SEQ)) {
5799 				/* Update the sequence number */
5800 				strm->last_sequence_delivered =
5801 				    stseq->sequence;
5802 			}
5803 			/* now kick the stream the new way */
5804 			/* sa_ignore NO_NULL_CHK */
5805 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5806 		}
5807 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5808 	}
5809 	/*
5810 	 * Now slide thing forward.
5811 	 */
5812 	sctp_slide_mapping_arrays(stcb);
5813 
5814 	if (TAILQ_FIRST(&asoc->reasmqueue)) {
5815 		/* now lets kick out and check for more fragmented delivery */
5816 		/* sa_ignore NO_NULL_CHK */
5817 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5818 	}
5819 }
5820