xref: /freebsd/sys/netinet/sctp_indata.c (revision 721351876cd4d3a8a700f62d2061331fa951a488)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 
48 
49 /*
50  * NOTES: On the outbound side of things I need to check the sack timer to
51  * see if I should generate a sack into the chunk queue (if I have data to
52  * send that is and will be sending it .. for bundling.
53  *
54  * The callback in sctp_usrreq.c will get called when the socket is read from.
55  * This will cause sctp_service_queues() to get called on the top entry in
56  * the list.
57  */
58 
59 void
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61 {
62 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63 }
64 
65 /* Calculate what the rwnd would be */
66 uint32_t
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68 {
69 	uint32_t calc = 0;
70 
71 	/*
72 	 * This is really set wrong with respect to a 1-2-m socket. Since
73 	 * the sb_cc is the count that everyone as put up. When we re-write
74 	 * sctp_soreceive then we will fix this so that ONLY this
75 	 * associations data is taken into account.
76 	 */
77 	if (stcb->sctp_socket == NULL)
78 		return (calc);
79 
80 	if (stcb->asoc.sb_cc == 0 &&
81 	    asoc->size_on_reasm_queue == 0 &&
82 	    asoc->size_on_all_streams == 0) {
83 		/* Full rwnd granted */
84 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85 		return (calc);
86 	}
87 	/* get actual space */
88 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89 
90 	/*
91 	 * take out what has NOT been put on socket queue and we yet hold
92 	 * for putting up.
93 	 */
94 	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
95 	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
96 
97 	if (calc == 0) {
98 		/* out of space */
99 		return (calc);
100 	}
101 	/* what is the overhead of all these rwnd's */
102 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
103 	/*
104 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
105 	 * even it is 0. SWS engaged
106 	 */
107 	if (calc < stcb->asoc.my_rwnd_control_len) {
108 		calc = 1;
109 	}
110 	return (calc);
111 }
112 
113 
114 
115 /*
116  * Build out our readq entry based on the incoming packet.
117  */
118 struct sctp_queued_to_read *
119 sctp_build_readq_entry(struct sctp_tcb *stcb,
120     struct sctp_nets *net,
121     uint32_t tsn, uint32_t ppid,
122     uint32_t context, uint16_t stream_no,
123     uint16_t stream_seq, uint8_t flags,
124     struct mbuf *dm)
125 {
126 	struct sctp_queued_to_read *read_queue_e = NULL;
127 
128 	sctp_alloc_a_readq(stcb, read_queue_e);
129 	if (read_queue_e == NULL) {
130 		goto failed_build;
131 	}
132 	read_queue_e->sinfo_stream = stream_no;
133 	read_queue_e->sinfo_ssn = stream_seq;
134 	read_queue_e->sinfo_flags = (flags << 8);
135 	read_queue_e->sinfo_ppid = ppid;
136 	read_queue_e->sinfo_context = stcb->asoc.context;
137 	read_queue_e->sinfo_timetolive = 0;
138 	read_queue_e->sinfo_tsn = tsn;
139 	read_queue_e->sinfo_cumtsn = tsn;
140 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
141 	read_queue_e->whoFrom = net;
142 	read_queue_e->length = 0;
143 	atomic_add_int(&net->ref_count, 1);
144 	read_queue_e->data = dm;
145 	read_queue_e->spec_flags = 0;
146 	read_queue_e->tail_mbuf = NULL;
147 	read_queue_e->aux_data = NULL;
148 	read_queue_e->stcb = stcb;
149 	read_queue_e->port_from = stcb->rport;
150 	read_queue_e->do_not_ref_stcb = 0;
151 	read_queue_e->end_added = 0;
152 	read_queue_e->some_taken = 0;
153 	read_queue_e->pdapi_aborted = 0;
154 failed_build:
155 	return (read_queue_e);
156 }
157 
158 
159 /*
160  * Build out our readq entry based on the incoming packet.
161  */
162 static struct sctp_queued_to_read *
163 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
164     struct sctp_tmit_chunk *chk)
165 {
166 	struct sctp_queued_to_read *read_queue_e = NULL;
167 
168 	sctp_alloc_a_readq(stcb, read_queue_e);
169 	if (read_queue_e == NULL) {
170 		goto failed_build;
171 	}
172 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
173 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
174 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
175 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
176 	read_queue_e->sinfo_context = stcb->asoc.context;
177 	read_queue_e->sinfo_timetolive = 0;
178 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
179 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
180 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
181 	read_queue_e->whoFrom = chk->whoTo;
182 	read_queue_e->aux_data = NULL;
183 	read_queue_e->length = 0;
184 	atomic_add_int(&chk->whoTo->ref_count, 1);
185 	read_queue_e->data = chk->data;
186 	read_queue_e->tail_mbuf = NULL;
187 	read_queue_e->stcb = stcb;
188 	read_queue_e->port_from = stcb->rport;
189 	read_queue_e->spec_flags = 0;
190 	read_queue_e->do_not_ref_stcb = 0;
191 	read_queue_e->end_added = 0;
192 	read_queue_e->some_taken = 0;
193 	read_queue_e->pdapi_aborted = 0;
194 failed_build:
195 	return (read_queue_e);
196 }
197 
198 
199 struct mbuf *
200 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
201     struct sctp_sndrcvinfo *sinfo)
202 {
203 	struct sctp_sndrcvinfo *outinfo;
204 	struct cmsghdr *cmh;
205 	struct mbuf *ret;
206 	int len;
207 	int use_extended = 0;
208 
209 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
210 		/* user does not want the sndrcv ctl */
211 		return (NULL);
212 	}
213 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
214 		use_extended = 1;
215 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
216 	} else {
217 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
218 	}
219 
220 
221 	ret = sctp_get_mbuf_for_msg(len,
222 	    0, M_DONTWAIT, 1, MT_DATA);
223 
224 	if (ret == NULL) {
225 		/* No space */
226 		return (ret);
227 	}
228 	/* We need a CMSG header followed by the struct  */
229 	cmh = mtod(ret, struct cmsghdr *);
230 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
231 	cmh->cmsg_level = IPPROTO_SCTP;
232 	if (use_extended) {
233 		cmh->cmsg_type = SCTP_EXTRCV;
234 		cmh->cmsg_len = len;
235 		memcpy(outinfo, sinfo, len);
236 	} else {
237 		cmh->cmsg_type = SCTP_SNDRCV;
238 		cmh->cmsg_len = len;
239 		*outinfo = *sinfo;
240 	}
241 	SCTP_BUF_LEN(ret) = cmh->cmsg_len;
242 	return (ret);
243 }
244 
245 
246 char *
247 sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
248     int *control_len,
249     struct sctp_sndrcvinfo *sinfo)
250 {
251 	struct sctp_sndrcvinfo *outinfo;
252 	struct cmsghdr *cmh;
253 	char *buf;
254 	int len;
255 	int use_extended = 0;
256 
257 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
258 		/* user does not want the sndrcv ctl */
259 		return (NULL);
260 	}
261 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
262 		use_extended = 1;
263 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
264 	} else {
265 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
266 	}
267 	SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
268 	if (buf == NULL) {
269 		/* No space */
270 		return (buf);
271 	}
272 	/* We need a CMSG header followed by the struct  */
273 	cmh = (struct cmsghdr *)buf;
274 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
275 	cmh->cmsg_level = IPPROTO_SCTP;
276 	if (use_extended) {
277 		cmh->cmsg_type = SCTP_EXTRCV;
278 		cmh->cmsg_len = len;
279 		memcpy(outinfo, sinfo, len);
280 	} else {
281 		cmh->cmsg_type = SCTP_SNDRCV;
282 		cmh->cmsg_len = len;
283 		*outinfo = *sinfo;
284 	}
285 	*control_len = len;
286 	return (buf);
287 }
288 
289 
290 /*
291  * We are delivering currently from the reassembly queue. We must continue to
292  * deliver until we either: 1) run out of space. 2) run out of sequential
293  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
294  */
295 static void
296 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
297 {
298 	struct sctp_tmit_chunk *chk;
299 	uint16_t nxt_todel;
300 	uint16_t stream_no;
301 	int end = 0;
302 	int cntDel;
303 	struct sctp_queued_to_read *control, *ctl, *ctlat;
304 
305 	if (stcb == NULL)
306 		return;
307 
308 	cntDel = stream_no = 0;
309 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
310 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
311 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
312 		/* socket above is long gone or going.. */
313 abandon:
314 		asoc->fragmented_delivery_inprogress = 0;
315 		chk = TAILQ_FIRST(&asoc->reasmqueue);
316 		while (chk) {
317 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
318 			asoc->size_on_reasm_queue -= chk->send_size;
319 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
320 			/*
321 			 * Lose the data pointer, since its in the socket
322 			 * buffer
323 			 */
324 			if (chk->data) {
325 				sctp_m_freem(chk->data);
326 				chk->data = NULL;
327 			}
328 			/* Now free the address and data */
329 			sctp_free_a_chunk(stcb, chk);
330 			/* sa_ignore FREED_MEMORY */
331 			chk = TAILQ_FIRST(&asoc->reasmqueue);
332 		}
333 		return;
334 	}
335 	SCTP_TCB_LOCK_ASSERT(stcb);
336 	do {
337 		chk = TAILQ_FIRST(&asoc->reasmqueue);
338 		if (chk == NULL) {
339 			return;
340 		}
341 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
342 			/* Can't deliver more :< */
343 			return;
344 		}
345 		stream_no = chk->rec.data.stream_number;
346 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
347 		if (nxt_todel != chk->rec.data.stream_seq &&
348 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
349 			/*
350 			 * Not the next sequence to deliver in its stream OR
351 			 * unordered
352 			 */
353 			return;
354 		}
355 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
356 
357 			control = sctp_build_readq_entry_chk(stcb, chk);
358 			if (control == NULL) {
359 				/* out of memory? */
360 				return;
361 			}
362 			/* save it off for our future deliveries */
363 			stcb->asoc.control_pdapi = control;
364 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
365 				end = 1;
366 			else
367 				end = 0;
368 			sctp_add_to_readq(stcb->sctp_ep,
369 			    stcb, control, &stcb->sctp_socket->so_rcv, end, SCTP_SO_NOT_LOCKED);
370 			cntDel++;
371 		} else {
372 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
373 				end = 1;
374 			else
375 				end = 0;
376 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
377 			    stcb->asoc.control_pdapi,
378 			    chk->data, end, chk->rec.data.TSN_seq,
379 			    &stcb->sctp_socket->so_rcv)) {
380 				/*
381 				 * something is very wrong, either
382 				 * control_pdapi is NULL, or the tail_mbuf
383 				 * is corrupt, or there is a EOM already on
384 				 * the mbuf chain.
385 				 */
386 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
387 					goto abandon;
388 				} else {
389 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
390 						panic("This should not happen control_pdapi NULL?");
391 					}
392 					/* if we did not panic, it was a EOM */
393 					panic("Bad chunking ??");
394 					return;
395 				}
396 			}
397 			cntDel++;
398 		}
399 		/* pull it we did it */
400 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
401 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
402 			asoc->fragmented_delivery_inprogress = 0;
403 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
404 				asoc->strmin[stream_no].last_sequence_delivered++;
405 			}
406 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
407 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
408 			}
409 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
410 			/*
411 			 * turn the flag back on since we just  delivered
412 			 * yet another one.
413 			 */
414 			asoc->fragmented_delivery_inprogress = 1;
415 		}
416 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
417 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
418 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
419 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
420 
421 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
422 		asoc->size_on_reasm_queue -= chk->send_size;
423 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
424 		/* free up the chk */
425 		chk->data = NULL;
426 		sctp_free_a_chunk(stcb, chk);
427 
428 		if (asoc->fragmented_delivery_inprogress == 0) {
429 			/*
430 			 * Now lets see if we can deliver the next one on
431 			 * the stream
432 			 */
433 			struct sctp_stream_in *strm;
434 
435 			strm = &asoc->strmin[stream_no];
436 			nxt_todel = strm->last_sequence_delivered + 1;
437 			ctl = TAILQ_FIRST(&strm->inqueue);
438 			if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
439 				while (ctl != NULL) {
440 					/* Deliver more if we can. */
441 					if (nxt_todel == ctl->sinfo_ssn) {
442 						ctlat = TAILQ_NEXT(ctl, next);
443 						TAILQ_REMOVE(&strm->inqueue, ctl, next);
444 						asoc->size_on_all_streams -= ctl->length;
445 						sctp_ucount_decr(asoc->cnt_on_all_streams);
446 						strm->last_sequence_delivered++;
447 						sctp_add_to_readq(stcb->sctp_ep, stcb,
448 						    ctl,
449 						    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
450 						ctl = ctlat;
451 					} else {
452 						break;
453 					}
454 					nxt_todel = strm->last_sequence_delivered + 1;
455 				}
456 			}
457 			break;
458 		}
459 		/* sa_ignore FREED_MEMORY */
460 		chk = TAILQ_FIRST(&asoc->reasmqueue);
461 	} while (chk);
462 }
463 
464 /*
465  * Queue the chunk either right into the socket buffer if it is the next one
466  * to go OR put it in the correct place in the delivery queue.  If we do
467  * append to the so_buf, keep doing so until we are out of order. One big
468  * question still remains, what to do when the socket buffer is FULL??
469  */
470 static void
471 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
472     struct sctp_queued_to_read *control, int *abort_flag)
473 {
474 	/*
475 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
476 	 * all the data in one stream this could happen quite rapidly. One
477 	 * could use the TSN to keep track of things, but this scheme breaks
478 	 * down in the other type of stream useage that could occur. Send a
479 	 * single msg to stream 0, send 4Billion messages to stream 1, now
480 	 * send a message to stream 0. You have a situation where the TSN
481 	 * has wrapped but not in the stream. Is this worth worrying about
482 	 * or should we just change our queue sort at the bottom to be by
483 	 * TSN.
484 	 *
485 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
486 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
487 	 * assignment this could happen... and I don't see how this would be
488 	 * a violation. So for now I am undecided an will leave the sort by
489 	 * SSN alone. Maybe a hybred approach is the answer
490 	 *
491 	 */
492 	struct sctp_stream_in *strm;
493 	struct sctp_queued_to_read *at;
494 	int queue_needed;
495 	uint16_t nxt_todel;
496 	struct mbuf *oper;
497 
498 	queue_needed = 1;
499 	asoc->size_on_all_streams += control->length;
500 	sctp_ucount_incr(asoc->cnt_on_all_streams);
501 	strm = &asoc->strmin[control->sinfo_stream];
502 	nxt_todel = strm->last_sequence_delivered + 1;
503 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
504 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
505 	}
506 	SCTPDBG(SCTP_DEBUG_INDATA1,
507 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
508 	    (uint32_t) control->sinfo_stream,
509 	    (uint32_t) strm->last_sequence_delivered,
510 	    (uint32_t) nxt_todel);
511 	if (compare_with_wrap(strm->last_sequence_delivered,
512 	    control->sinfo_ssn, MAX_SEQ) ||
513 	    (strm->last_sequence_delivered == control->sinfo_ssn)) {
514 		/* The incoming sseq is behind where we last delivered? */
515 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
516 		    control->sinfo_ssn, strm->last_sequence_delivered);
517 protocol_error:
518 		/*
519 		 * throw it in the stream so it gets cleaned up in
520 		 * association destruction
521 		 */
522 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
523 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
524 		    0, M_DONTWAIT, 1, MT_DATA);
525 		if (oper) {
526 			struct sctp_paramhdr *ph;
527 			uint32_t *ippp;
528 
529 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
530 			    (sizeof(uint32_t) * 3);
531 			ph = mtod(oper, struct sctp_paramhdr *);
532 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
533 			ph->param_length = htons(SCTP_BUF_LEN(oper));
534 			ippp = (uint32_t *) (ph + 1);
535 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
536 			ippp++;
537 			*ippp = control->sinfo_tsn;
538 			ippp++;
539 			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
540 		}
541 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
542 		sctp_abort_an_association(stcb->sctp_ep, stcb,
543 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
544 
545 		*abort_flag = 1;
546 		return;
547 
548 	}
549 	if (nxt_todel == control->sinfo_ssn) {
550 		/* can be delivered right away? */
551 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
552 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
553 		}
554 		queue_needed = 0;
555 		asoc->size_on_all_streams -= control->length;
556 		sctp_ucount_decr(asoc->cnt_on_all_streams);
557 		strm->last_sequence_delivered++;
558 		sctp_add_to_readq(stcb->sctp_ep, stcb,
559 		    control,
560 		    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
561 		control = TAILQ_FIRST(&strm->inqueue);
562 		while (control != NULL) {
563 			/* all delivered */
564 			nxt_todel = strm->last_sequence_delivered + 1;
565 			if (nxt_todel == control->sinfo_ssn) {
566 				at = TAILQ_NEXT(control, next);
567 				TAILQ_REMOVE(&strm->inqueue, control, next);
568 				asoc->size_on_all_streams -= control->length;
569 				sctp_ucount_decr(asoc->cnt_on_all_streams);
570 				strm->last_sequence_delivered++;
571 				/*
572 				 * We ignore the return of deliver_data here
573 				 * since we always can hold the chunk on the
574 				 * d-queue. And we have a finite number that
575 				 * can be delivered from the strq.
576 				 */
577 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
578 					sctp_log_strm_del(control, NULL,
579 					    SCTP_STR_LOG_FROM_IMMED_DEL);
580 				}
581 				sctp_add_to_readq(stcb->sctp_ep, stcb,
582 				    control,
583 				    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
584 				control = at;
585 				continue;
586 			}
587 			break;
588 		}
589 	}
590 	if (queue_needed) {
591 		/*
592 		 * Ok, we did not deliver this guy, find the correct place
593 		 * to put it on the queue.
594 		 */
595 		if ((compare_with_wrap(asoc->cumulative_tsn,
596 		    control->sinfo_tsn, MAX_TSN)) ||
597 		    (control->sinfo_tsn == asoc->cumulative_tsn)) {
598 			goto protocol_error;
599 		}
600 		if (TAILQ_EMPTY(&strm->inqueue)) {
601 			/* Empty queue */
602 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
603 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
604 			}
605 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
606 		} else {
607 			TAILQ_FOREACH(at, &strm->inqueue, next) {
608 				if (compare_with_wrap(at->sinfo_ssn,
609 				    control->sinfo_ssn, MAX_SEQ)) {
610 					/*
611 					 * one in queue is bigger than the
612 					 * new one, insert before this one
613 					 */
614 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
615 						sctp_log_strm_del(control, at,
616 						    SCTP_STR_LOG_FROM_INSERT_MD);
617 					}
618 					TAILQ_INSERT_BEFORE(at, control, next);
619 					break;
620 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
621 					/*
622 					 * Gak, He sent me a duplicate str
623 					 * seq number
624 					 */
625 					/*
626 					 * foo bar, I guess I will just free
627 					 * this new guy, should we abort
628 					 * too? FIX ME MAYBE? Or it COULD be
629 					 * that the SSN's have wrapped.
630 					 * Maybe I should compare to TSN
631 					 * somehow... sigh for now just blow
632 					 * away the chunk!
633 					 */
634 
635 					if (control->data)
636 						sctp_m_freem(control->data);
637 					control->data = NULL;
638 					asoc->size_on_all_streams -= control->length;
639 					sctp_ucount_decr(asoc->cnt_on_all_streams);
640 					if (control->whoFrom)
641 						sctp_free_remote_addr(control->whoFrom);
642 					control->whoFrom = NULL;
643 					sctp_free_a_readq(stcb, control);
644 					return;
645 				} else {
646 					if (TAILQ_NEXT(at, next) == NULL) {
647 						/*
648 						 * We are at the end, insert
649 						 * it after this one
650 						 */
651 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
652 							sctp_log_strm_del(control, at,
653 							    SCTP_STR_LOG_FROM_INSERT_TL);
654 						}
655 						TAILQ_INSERT_AFTER(&strm->inqueue,
656 						    at, control, next);
657 						break;
658 					}
659 				}
660 			}
661 		}
662 	}
663 }
664 
665 /*
666  * Returns two things: You get the total size of the deliverable parts of the
667  * first fragmented message on the reassembly queue. And you get a 1 back if
668  * all of the message is ready or a 0 back if the message is still incomplete
669  */
670 static int
671 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
672 {
673 	struct sctp_tmit_chunk *chk;
674 	uint32_t tsn;
675 
676 	*t_size = 0;
677 	chk = TAILQ_FIRST(&asoc->reasmqueue);
678 	if (chk == NULL) {
679 		/* nothing on the queue */
680 		return (0);
681 	}
682 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
683 		/* Not a first on the queue */
684 		return (0);
685 	}
686 	tsn = chk->rec.data.TSN_seq;
687 	while (chk) {
688 		if (tsn != chk->rec.data.TSN_seq) {
689 			return (0);
690 		}
691 		*t_size += chk->send_size;
692 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
693 			return (1);
694 		}
695 		tsn++;
696 		chk = TAILQ_NEXT(chk, sctp_next);
697 	}
698 	return (0);
699 }
700 
701 static void
702 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
703 {
704 	struct sctp_tmit_chunk *chk;
705 	uint16_t nxt_todel;
706 	uint32_t tsize;
707 
708 doit_again:
709 	chk = TAILQ_FIRST(&asoc->reasmqueue);
710 	if (chk == NULL) {
711 		/* Huh? */
712 		asoc->size_on_reasm_queue = 0;
713 		asoc->cnt_on_reasm_queue = 0;
714 		return;
715 	}
716 	if (asoc->fragmented_delivery_inprogress == 0) {
717 		nxt_todel =
718 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
719 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
720 		    (nxt_todel == chk->rec.data.stream_seq ||
721 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
722 			/*
723 			 * Yep the first one is here and its ok to deliver
724 			 * but should we?
725 			 */
726 			if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
727 			    (tsize >= stcb->sctp_ep->partial_delivery_point))) {
728 
729 				/*
730 				 * Yes, we setup to start reception, by
731 				 * backing down the TSN just in case we
732 				 * can't deliver. If we
733 				 */
734 				asoc->fragmented_delivery_inprogress = 1;
735 				asoc->tsn_last_delivered =
736 				    chk->rec.data.TSN_seq - 1;
737 				asoc->str_of_pdapi =
738 				    chk->rec.data.stream_number;
739 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
740 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
741 				asoc->fragment_flags = chk->rec.data.rcv_flags;
742 				sctp_service_reassembly(stcb, asoc);
743 			}
744 		}
745 	} else {
746 		/*
747 		 * Service re-assembly will deliver stream data queued at
748 		 * the end of fragmented delivery.. but it wont know to go
749 		 * back and call itself again... we do that here with the
750 		 * got doit_again
751 		 */
752 		sctp_service_reassembly(stcb, asoc);
753 		if (asoc->fragmented_delivery_inprogress == 0) {
754 			/*
755 			 * finished our Fragmented delivery, could be more
756 			 * waiting?
757 			 */
758 			goto doit_again;
759 		}
760 	}
761 }
762 
763 /*
764  * Dump onto the re-assembly queue, in its proper place. After dumping on the
765  * queue, see if anthing can be delivered. If so pull it off (or as much as
766  * we can. If we run out of space then we must dump what we can and set the
767  * appropriate flag to say we queued what we could.
768  */
769 static void
770 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
771     struct sctp_tmit_chunk *chk, int *abort_flag)
772 {
773 	struct mbuf *oper;
774 	uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
775 	u_char last_flags;
776 	struct sctp_tmit_chunk *at, *prev, *next;
777 
778 	prev = next = NULL;
779 	cum_ackp1 = asoc->tsn_last_delivered + 1;
780 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
781 		/* This is the first one on the queue */
782 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
783 		/*
784 		 * we do not check for delivery of anything when only one
785 		 * fragment is here
786 		 */
787 		asoc->size_on_reasm_queue = chk->send_size;
788 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
789 		if (chk->rec.data.TSN_seq == cum_ackp1) {
790 			if (asoc->fragmented_delivery_inprogress == 0 &&
791 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
792 			    SCTP_DATA_FIRST_FRAG) {
793 				/*
794 				 * An empty queue, no delivery inprogress,
795 				 * we hit the next one and it does NOT have
796 				 * a FIRST fragment mark.
797 				 */
798 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
799 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
800 				    0, M_DONTWAIT, 1, MT_DATA);
801 
802 				if (oper) {
803 					struct sctp_paramhdr *ph;
804 					uint32_t *ippp;
805 
806 					SCTP_BUF_LEN(oper) =
807 					    sizeof(struct sctp_paramhdr) +
808 					    (sizeof(uint32_t) * 3);
809 					ph = mtod(oper, struct sctp_paramhdr *);
810 					ph->param_type =
811 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
812 					ph->param_length = htons(SCTP_BUF_LEN(oper));
813 					ippp = (uint32_t *) (ph + 1);
814 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
815 					ippp++;
816 					*ippp = chk->rec.data.TSN_seq;
817 					ippp++;
818 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
819 
820 				}
821 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
822 				sctp_abort_an_association(stcb->sctp_ep, stcb,
823 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
824 				*abort_flag = 1;
825 			} else if (asoc->fragmented_delivery_inprogress &&
826 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
827 				/*
828 				 * We are doing a partial delivery and the
829 				 * NEXT chunk MUST be either the LAST or
830 				 * MIDDLE fragment NOT a FIRST
831 				 */
832 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
833 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
834 				    0, M_DONTWAIT, 1, MT_DATA);
835 				if (oper) {
836 					struct sctp_paramhdr *ph;
837 					uint32_t *ippp;
838 
839 					SCTP_BUF_LEN(oper) =
840 					    sizeof(struct sctp_paramhdr) +
841 					    (3 * sizeof(uint32_t));
842 					ph = mtod(oper, struct sctp_paramhdr *);
843 					ph->param_type =
844 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
845 					ph->param_length = htons(SCTP_BUF_LEN(oper));
846 					ippp = (uint32_t *) (ph + 1);
847 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
848 					ippp++;
849 					*ippp = chk->rec.data.TSN_seq;
850 					ippp++;
851 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
852 				}
853 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
854 				sctp_abort_an_association(stcb->sctp_ep, stcb,
855 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
856 				*abort_flag = 1;
857 			} else if (asoc->fragmented_delivery_inprogress) {
858 				/*
859 				 * Here we are ok with a MIDDLE or LAST
860 				 * piece
861 				 */
862 				if (chk->rec.data.stream_number !=
863 				    asoc->str_of_pdapi) {
864 					/* Got to be the right STR No */
865 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
866 					    chk->rec.data.stream_number,
867 					    asoc->str_of_pdapi);
868 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
869 					    0, M_DONTWAIT, 1, MT_DATA);
870 					if (oper) {
871 						struct sctp_paramhdr *ph;
872 						uint32_t *ippp;
873 
874 						SCTP_BUF_LEN(oper) =
875 						    sizeof(struct sctp_paramhdr) +
876 						    (sizeof(uint32_t) * 3);
877 						ph = mtod(oper,
878 						    struct sctp_paramhdr *);
879 						ph->param_type =
880 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
881 						ph->param_length =
882 						    htons(SCTP_BUF_LEN(oper));
883 						ippp = (uint32_t *) (ph + 1);
884 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
885 						ippp++;
886 						*ippp = chk->rec.data.TSN_seq;
887 						ippp++;
888 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
889 					}
890 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
891 					sctp_abort_an_association(stcb->sctp_ep,
892 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
893 					*abort_flag = 1;
894 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
895 					    SCTP_DATA_UNORDERED &&
896 					    chk->rec.data.stream_seq !=
897 				    asoc->ssn_of_pdapi) {
898 					/* Got to be the right STR Seq */
899 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
900 					    chk->rec.data.stream_seq,
901 					    asoc->ssn_of_pdapi);
902 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
903 					    0, M_DONTWAIT, 1, MT_DATA);
904 					if (oper) {
905 						struct sctp_paramhdr *ph;
906 						uint32_t *ippp;
907 
908 						SCTP_BUF_LEN(oper) =
909 						    sizeof(struct sctp_paramhdr) +
910 						    (3 * sizeof(uint32_t));
911 						ph = mtod(oper,
912 						    struct sctp_paramhdr *);
913 						ph->param_type =
914 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
915 						ph->param_length =
916 						    htons(SCTP_BUF_LEN(oper));
917 						ippp = (uint32_t *) (ph + 1);
918 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
919 						ippp++;
920 						*ippp = chk->rec.data.TSN_seq;
921 						ippp++;
922 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
923 
924 					}
925 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
926 					sctp_abort_an_association(stcb->sctp_ep,
927 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
928 					*abort_flag = 1;
929 				}
930 			}
931 		}
932 		return;
933 	}
934 	/* Find its place */
935 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
936 		if (compare_with_wrap(at->rec.data.TSN_seq,
937 		    chk->rec.data.TSN_seq, MAX_TSN)) {
938 			/*
939 			 * one in queue is bigger than the new one, insert
940 			 * before this one
941 			 */
942 			/* A check */
943 			asoc->size_on_reasm_queue += chk->send_size;
944 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
945 			next = at;
946 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
947 			break;
948 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
949 			/* Gak, He sent me a duplicate str seq number */
950 			/*
951 			 * foo bar, I guess I will just free this new guy,
952 			 * should we abort too? FIX ME MAYBE? Or it COULD be
953 			 * that the SSN's have wrapped. Maybe I should
954 			 * compare to TSN somehow... sigh for now just blow
955 			 * away the chunk!
956 			 */
957 			if (chk->data) {
958 				sctp_m_freem(chk->data);
959 				chk->data = NULL;
960 			}
961 			sctp_free_a_chunk(stcb, chk);
962 			return;
963 		} else {
964 			last_flags = at->rec.data.rcv_flags;
965 			last_tsn = at->rec.data.TSN_seq;
966 			prev = at;
967 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
968 				/*
969 				 * We are at the end, insert it after this
970 				 * one
971 				 */
972 				/* check it first */
973 				asoc->size_on_reasm_queue += chk->send_size;
974 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
975 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
976 				break;
977 			}
978 		}
979 	}
980 	/* Now the audits */
981 	if (prev) {
982 		prev_tsn = chk->rec.data.TSN_seq - 1;
983 		if (prev_tsn == prev->rec.data.TSN_seq) {
984 			/*
985 			 * Ok the one I am dropping onto the end is the
986 			 * NEXT. A bit of valdiation here.
987 			 */
988 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
989 			    SCTP_DATA_FIRST_FRAG ||
990 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
991 			    SCTP_DATA_MIDDLE_FRAG) {
992 				/*
993 				 * Insert chk MUST be a MIDDLE or LAST
994 				 * fragment
995 				 */
996 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
997 				    SCTP_DATA_FIRST_FRAG) {
998 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
999 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1000 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1001 					    0, M_DONTWAIT, 1, MT_DATA);
1002 					if (oper) {
1003 						struct sctp_paramhdr *ph;
1004 						uint32_t *ippp;
1005 
1006 						SCTP_BUF_LEN(oper) =
1007 						    sizeof(struct sctp_paramhdr) +
1008 						    (3 * sizeof(uint32_t));
1009 						ph = mtod(oper,
1010 						    struct sctp_paramhdr *);
1011 						ph->param_type =
1012 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1013 						ph->param_length =
1014 						    htons(SCTP_BUF_LEN(oper));
1015 						ippp = (uint32_t *) (ph + 1);
1016 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1017 						ippp++;
1018 						*ippp = chk->rec.data.TSN_seq;
1019 						ippp++;
1020 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1021 
1022 					}
1023 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1024 					sctp_abort_an_association(stcb->sctp_ep,
1025 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1026 					*abort_flag = 1;
1027 					return;
1028 				}
1029 				if (chk->rec.data.stream_number !=
1030 				    prev->rec.data.stream_number) {
1031 					/*
1032 					 * Huh, need the correct STR here,
1033 					 * they must be the same.
1034 					 */
1035 					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1036 					    chk->rec.data.stream_number,
1037 					    prev->rec.data.stream_number);
1038 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1039 					    0, M_DONTWAIT, 1, MT_DATA);
1040 					if (oper) {
1041 						struct sctp_paramhdr *ph;
1042 						uint32_t *ippp;
1043 
1044 						SCTP_BUF_LEN(oper) =
1045 						    sizeof(struct sctp_paramhdr) +
1046 						    (3 * sizeof(uint32_t));
1047 						ph = mtod(oper,
1048 						    struct sctp_paramhdr *);
1049 						ph->param_type =
1050 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1051 						ph->param_length =
1052 						    htons(SCTP_BUF_LEN(oper));
1053 						ippp = (uint32_t *) (ph + 1);
1054 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1055 						ippp++;
1056 						*ippp = chk->rec.data.TSN_seq;
1057 						ippp++;
1058 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1059 					}
1060 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1061 					sctp_abort_an_association(stcb->sctp_ep,
1062 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1063 
1064 					*abort_flag = 1;
1065 					return;
1066 				}
1067 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1068 				    chk->rec.data.stream_seq !=
1069 				    prev->rec.data.stream_seq) {
1070 					/*
1071 					 * Huh, need the correct STR here,
1072 					 * they must be the same.
1073 					 */
1074 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1075 					    chk->rec.data.stream_seq,
1076 					    prev->rec.data.stream_seq);
1077 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1078 					    0, M_DONTWAIT, 1, MT_DATA);
1079 					if (oper) {
1080 						struct sctp_paramhdr *ph;
1081 						uint32_t *ippp;
1082 
1083 						SCTP_BUF_LEN(oper) =
1084 						    sizeof(struct sctp_paramhdr) +
1085 						    (3 * sizeof(uint32_t));
1086 						ph = mtod(oper,
1087 						    struct sctp_paramhdr *);
1088 						ph->param_type =
1089 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1090 						ph->param_length =
1091 						    htons(SCTP_BUF_LEN(oper));
1092 						ippp = (uint32_t *) (ph + 1);
1093 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1094 						ippp++;
1095 						*ippp = chk->rec.data.TSN_seq;
1096 						ippp++;
1097 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1098 					}
1099 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1100 					sctp_abort_an_association(stcb->sctp_ep,
1101 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1102 
1103 					*abort_flag = 1;
1104 					return;
1105 				}
1106 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1107 			    SCTP_DATA_LAST_FRAG) {
1108 				/* Insert chk MUST be a FIRST */
1109 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1110 				    SCTP_DATA_FIRST_FRAG) {
1111 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1112 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1113 					    0, M_DONTWAIT, 1, MT_DATA);
1114 					if (oper) {
1115 						struct sctp_paramhdr *ph;
1116 						uint32_t *ippp;
1117 
1118 						SCTP_BUF_LEN(oper) =
1119 						    sizeof(struct sctp_paramhdr) +
1120 						    (3 * sizeof(uint32_t));
1121 						ph = mtod(oper,
1122 						    struct sctp_paramhdr *);
1123 						ph->param_type =
1124 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1125 						ph->param_length =
1126 						    htons(SCTP_BUF_LEN(oper));
1127 						ippp = (uint32_t *) (ph + 1);
1128 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1129 						ippp++;
1130 						*ippp = chk->rec.data.TSN_seq;
1131 						ippp++;
1132 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1133 
1134 					}
1135 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1136 					sctp_abort_an_association(stcb->sctp_ep,
1137 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1138 
1139 					*abort_flag = 1;
1140 					return;
1141 				}
1142 			}
1143 		}
1144 	}
1145 	if (next) {
1146 		post_tsn = chk->rec.data.TSN_seq + 1;
1147 		if (post_tsn == next->rec.data.TSN_seq) {
1148 			/*
1149 			 * Ok the one I am inserting ahead of is my NEXT
1150 			 * one. A bit of valdiation here.
1151 			 */
1152 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1153 				/* Insert chk MUST be a last fragment */
1154 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1155 				    != SCTP_DATA_LAST_FRAG) {
1156 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1157 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1158 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1159 					    0, M_DONTWAIT, 1, MT_DATA);
1160 					if (oper) {
1161 						struct sctp_paramhdr *ph;
1162 						uint32_t *ippp;
1163 
1164 						SCTP_BUF_LEN(oper) =
1165 						    sizeof(struct sctp_paramhdr) +
1166 						    (3 * sizeof(uint32_t));
1167 						ph = mtod(oper,
1168 						    struct sctp_paramhdr *);
1169 						ph->param_type =
1170 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1171 						ph->param_length =
1172 						    htons(SCTP_BUF_LEN(oper));
1173 						ippp = (uint32_t *) (ph + 1);
1174 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1175 						ippp++;
1176 						*ippp = chk->rec.data.TSN_seq;
1177 						ippp++;
1178 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1179 					}
1180 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1181 					sctp_abort_an_association(stcb->sctp_ep,
1182 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1183 
1184 					*abort_flag = 1;
1185 					return;
1186 				}
1187 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1188 				    SCTP_DATA_MIDDLE_FRAG ||
1189 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1190 			    SCTP_DATA_LAST_FRAG) {
1191 				/*
1192 				 * Insert chk CAN be MIDDLE or FIRST NOT
1193 				 * LAST
1194 				 */
1195 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1196 				    SCTP_DATA_LAST_FRAG) {
1197 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1198 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1199 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1200 					    0, M_DONTWAIT, 1, MT_DATA);
1201 					if (oper) {
1202 						struct sctp_paramhdr *ph;
1203 						uint32_t *ippp;
1204 
1205 						SCTP_BUF_LEN(oper) =
1206 						    sizeof(struct sctp_paramhdr) +
1207 						    (3 * sizeof(uint32_t));
1208 						ph = mtod(oper,
1209 						    struct sctp_paramhdr *);
1210 						ph->param_type =
1211 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1212 						ph->param_length =
1213 						    htons(SCTP_BUF_LEN(oper));
1214 						ippp = (uint32_t *) (ph + 1);
1215 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1216 						ippp++;
1217 						*ippp = chk->rec.data.TSN_seq;
1218 						ippp++;
1219 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1220 
1221 					}
1222 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1223 					sctp_abort_an_association(stcb->sctp_ep,
1224 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1225 
1226 					*abort_flag = 1;
1227 					return;
1228 				}
1229 				if (chk->rec.data.stream_number !=
1230 				    next->rec.data.stream_number) {
1231 					/*
1232 					 * Huh, need the correct STR here,
1233 					 * they must be the same.
1234 					 */
1235 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1236 					    chk->rec.data.stream_number,
1237 					    next->rec.data.stream_number);
1238 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1239 					    0, M_DONTWAIT, 1, MT_DATA);
1240 					if (oper) {
1241 						struct sctp_paramhdr *ph;
1242 						uint32_t *ippp;
1243 
1244 						SCTP_BUF_LEN(oper) =
1245 						    sizeof(struct sctp_paramhdr) +
1246 						    (3 * sizeof(uint32_t));
1247 						ph = mtod(oper,
1248 						    struct sctp_paramhdr *);
1249 						ph->param_type =
1250 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1251 						ph->param_length =
1252 						    htons(SCTP_BUF_LEN(oper));
1253 						ippp = (uint32_t *) (ph + 1);
1254 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1255 						ippp++;
1256 						*ippp = chk->rec.data.TSN_seq;
1257 						ippp++;
1258 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1259 
1260 					}
1261 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1262 					sctp_abort_an_association(stcb->sctp_ep,
1263 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1264 
1265 					*abort_flag = 1;
1266 					return;
1267 				}
1268 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1269 				    chk->rec.data.stream_seq !=
1270 				    next->rec.data.stream_seq) {
1271 					/*
1272 					 * Huh, need the correct STR here,
1273 					 * they must be the same.
1274 					 */
1275 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1276 					    chk->rec.data.stream_seq,
1277 					    next->rec.data.stream_seq);
1278 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1279 					    0, M_DONTWAIT, 1, MT_DATA);
1280 					if (oper) {
1281 						struct sctp_paramhdr *ph;
1282 						uint32_t *ippp;
1283 
1284 						SCTP_BUF_LEN(oper) =
1285 						    sizeof(struct sctp_paramhdr) +
1286 						    (3 * sizeof(uint32_t));
1287 						ph = mtod(oper,
1288 						    struct sctp_paramhdr *);
1289 						ph->param_type =
1290 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1291 						ph->param_length =
1292 						    htons(SCTP_BUF_LEN(oper));
1293 						ippp = (uint32_t *) (ph + 1);
1294 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1295 						ippp++;
1296 						*ippp = chk->rec.data.TSN_seq;
1297 						ippp++;
1298 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1299 					}
1300 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1301 					sctp_abort_an_association(stcb->sctp_ep,
1302 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1303 
1304 					*abort_flag = 1;
1305 					return;
1306 				}
1307 			}
1308 		}
1309 	}
1310 	/* Do we need to do some delivery? check */
1311 	sctp_deliver_reasm_check(stcb, asoc);
1312 }
1313 
1314 /*
1315  * This is an unfortunate routine. It checks to make sure a evil guy is not
1316  * stuffing us full of bad packet fragments. A broken peer could also do this
1317  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1318  * :< more cycles.
1319  */
1320 static int
1321 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1322     uint32_t TSN_seq)
1323 {
1324 	struct sctp_tmit_chunk *at;
1325 	uint32_t tsn_est;
1326 
1327 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1328 		if (compare_with_wrap(TSN_seq,
1329 		    at->rec.data.TSN_seq, MAX_TSN)) {
1330 			/* is it one bigger? */
1331 			tsn_est = at->rec.data.TSN_seq + 1;
1332 			if (tsn_est == TSN_seq) {
1333 				/* yep. It better be a last then */
1334 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1335 				    SCTP_DATA_LAST_FRAG) {
1336 					/*
1337 					 * Ok this guy belongs next to a guy
1338 					 * that is NOT last, it should be a
1339 					 * middle/last, not a complete
1340 					 * chunk.
1341 					 */
1342 					return (1);
1343 				} else {
1344 					/*
1345 					 * This guy is ok since its a LAST
1346 					 * and the new chunk is a fully
1347 					 * self- contained one.
1348 					 */
1349 					return (0);
1350 				}
1351 			}
1352 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1353 			/* Software error since I have a dup? */
1354 			return (1);
1355 		} else {
1356 			/*
1357 			 * Ok, 'at' is larger than new chunk but does it
1358 			 * need to be right before it.
1359 			 */
1360 			tsn_est = TSN_seq + 1;
1361 			if (tsn_est == at->rec.data.TSN_seq) {
1362 				/* Yep, It better be a first */
1363 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1364 				    SCTP_DATA_FIRST_FRAG) {
1365 					return (1);
1366 				} else {
1367 					return (0);
1368 				}
1369 			}
1370 		}
1371 	}
1372 	return (0);
1373 }
1374 
1375 
1376 static int
1377 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1378     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1379     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1380     int *break_flag, int last_chunk)
1381 {
1382 	/* Process a data chunk */
1383 	/* struct sctp_tmit_chunk *chk; */
1384 	struct sctp_tmit_chunk *chk;
1385 	uint32_t tsn, gap;
1386 	struct mbuf *dmbuf;
1387 	int indx, the_len;
1388 	int need_reasm_check = 0;
1389 	uint16_t strmno, strmseq;
1390 	struct mbuf *oper;
1391 	struct sctp_queued_to_read *control;
1392 	int ordered;
1393 	uint32_t protocol_id;
1394 	uint8_t chunk_flags;
1395 	struct sctp_stream_reset_list *liste;
1396 
1397 	chk = NULL;
1398 	tsn = ntohl(ch->dp.tsn);
1399 	chunk_flags = ch->ch.chunk_flags;
1400 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1401 		asoc->send_sack = 1;
1402 	}
1403 	protocol_id = ch->dp.protocol_id;
1404 	ordered = ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0);
1405 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1406 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1407 	}
1408 	if (stcb == NULL) {
1409 		return (0);
1410 	}
1411 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1412 	if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1413 	    asoc->cumulative_tsn == tsn) {
1414 		/* It is a duplicate */
1415 		SCTP_STAT_INCR(sctps_recvdupdata);
1416 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1417 			/* Record a dup for the next outbound sack */
1418 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1419 			asoc->numduptsns++;
1420 		}
1421 		asoc->send_sack = 1;
1422 		return (0);
1423 	}
1424 	/* Calculate the number of TSN's between the base and this TSN */
1425 	if (tsn >= asoc->mapping_array_base_tsn) {
1426 		gap = tsn - asoc->mapping_array_base_tsn;
1427 	} else {
1428 		gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1;
1429 	}
1430 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1431 		/* Can't hold the bit in the mapping at max array, toss it */
1432 		return (0);
1433 	}
1434 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1435 		SCTP_TCB_LOCK_ASSERT(stcb);
1436 		if (sctp_expand_mapping_array(asoc, gap)) {
1437 			/* Can't expand, drop it */
1438 			return (0);
1439 		}
1440 	}
1441 	if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1442 		*high_tsn = tsn;
1443 	}
1444 	/* See if we have received this one already */
1445 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
1446 		SCTP_STAT_INCR(sctps_recvdupdata);
1447 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1448 			/* Record a dup for the next outbound sack */
1449 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1450 			asoc->numduptsns++;
1451 		}
1452 		asoc->send_sack = 1;
1453 		return (0);
1454 	}
1455 	/*
1456 	 * Check to see about the GONE flag, duplicates would cause a sack
1457 	 * to be sent up above
1458 	 */
1459 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1460 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1461 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1462 	    ) {
1463 		/*
1464 		 * wait a minute, this guy is gone, there is no longer a
1465 		 * receiver. Send peer an ABORT!
1466 		 */
1467 		struct mbuf *op_err;
1468 
1469 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1470 		sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1471 		*abort_flag = 1;
1472 		return (0);
1473 	}
1474 	/*
1475 	 * Now before going further we see if there is room. If NOT then we
1476 	 * MAY let one through only IF this TSN is the one we are waiting
1477 	 * for on a partial delivery API.
1478 	 */
1479 
1480 	/* now do the tests */
1481 	if (((asoc->cnt_on_all_streams +
1482 	    asoc->cnt_on_reasm_queue +
1483 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1484 	    (((int)asoc->my_rwnd) <= 0)) {
1485 		/*
1486 		 * When we have NO room in the rwnd we check to make sure
1487 		 * the reader is doing its job...
1488 		 */
1489 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1490 			/* some to read, wake-up */
1491 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1492 			struct socket *so;
1493 
1494 			so = SCTP_INP_SO(stcb->sctp_ep);
1495 			atomic_add_int(&stcb->asoc.refcnt, 1);
1496 			SCTP_TCB_UNLOCK(stcb);
1497 			SCTP_SOCKET_LOCK(so, 1);
1498 			SCTP_TCB_LOCK(stcb);
1499 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1500 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1501 				/* assoc was freed while we were unlocked */
1502 				SCTP_SOCKET_UNLOCK(so, 1);
1503 				return (0);
1504 			}
1505 #endif
1506 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1507 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1508 			SCTP_SOCKET_UNLOCK(so, 1);
1509 #endif
1510 		}
1511 		/* now is it in the mapping array of what we have accepted? */
1512 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1513 			/* Nope not in the valid range dump it */
1514 			sctp_set_rwnd(stcb, asoc);
1515 			if ((asoc->cnt_on_all_streams +
1516 			    asoc->cnt_on_reasm_queue +
1517 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1518 				SCTP_STAT_INCR(sctps_datadropchklmt);
1519 			} else {
1520 				SCTP_STAT_INCR(sctps_datadroprwnd);
1521 			}
1522 			indx = *break_flag;
1523 			*break_flag = 1;
1524 			return (0);
1525 		}
1526 	}
1527 	strmno = ntohs(ch->dp.stream_id);
1528 	if (strmno >= asoc->streamincnt) {
1529 		struct sctp_paramhdr *phdr;
1530 		struct mbuf *mb;
1531 
1532 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1533 		    0, M_DONTWAIT, 1, MT_DATA);
1534 		if (mb != NULL) {
1535 			/* add some space up front so prepend will work well */
1536 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1537 			phdr = mtod(mb, struct sctp_paramhdr *);
1538 			/*
1539 			 * Error causes are just param's and this one has
1540 			 * two back to back phdr, one with the error type
1541 			 * and size, the other with the streamid and a rsvd
1542 			 */
1543 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1544 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1545 			phdr->param_length =
1546 			    htons(sizeof(struct sctp_paramhdr) * 2);
1547 			phdr++;
1548 			/* We insert the stream in the type field */
1549 			phdr->param_type = ch->dp.stream_id;
1550 			/* And set the length to 0 for the rsvd field */
1551 			phdr->param_length = 0;
1552 			sctp_queue_op_err(stcb, mb);
1553 		}
1554 		SCTP_STAT_INCR(sctps_badsid);
1555 		SCTP_TCB_LOCK_ASSERT(stcb);
1556 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1557 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1558 			/* we have a new high score */
1559 			asoc->highest_tsn_inside_map = tsn;
1560 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1561 				sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
1562 			}
1563 		}
1564 		if (tsn == (asoc->cumulative_tsn + 1)) {
1565 			/* Update cum-ack */
1566 			asoc->cumulative_tsn = tsn;
1567 		}
1568 		return (0);
1569 	}
1570 	/*
1571 	 * Before we continue lets validate that we are not being fooled by
1572 	 * an evil attacker. We can only have 4k chunks based on our TSN
1573 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1574 	 * way our stream sequence numbers could have wrapped. We of course
1575 	 * only validate the FIRST fragment so the bit must be set.
1576 	 */
1577 	strmseq = ntohs(ch->dp.stream_sequence);
1578 #ifdef SCTP_ASOCLOG_OF_TSNS
1579 	SCTP_TCB_LOCK_ASSERT(stcb);
1580 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1581 		asoc->tsn_in_at = 0;
1582 		asoc->tsn_in_wrapped = 1;
1583 	}
1584 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1585 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1586 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1587 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1588 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1589 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1590 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1591 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1592 	asoc->tsn_in_at++;
1593 #endif
1594 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1595 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1596 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1597 	    (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1598 	    strmseq, MAX_SEQ) ||
1599 	    asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1600 		/* The incoming sseq is behind where we last delivered? */
1601 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1602 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1603 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1604 		    0, M_DONTWAIT, 1, MT_DATA);
1605 		if (oper) {
1606 			struct sctp_paramhdr *ph;
1607 			uint32_t *ippp;
1608 
1609 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1610 			    (3 * sizeof(uint32_t));
1611 			ph = mtod(oper, struct sctp_paramhdr *);
1612 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1613 			ph->param_length = htons(SCTP_BUF_LEN(oper));
1614 			ippp = (uint32_t *) (ph + 1);
1615 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1616 			ippp++;
1617 			*ippp = tsn;
1618 			ippp++;
1619 			*ippp = ((strmno << 16) | strmseq);
1620 
1621 		}
1622 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1623 		sctp_abort_an_association(stcb->sctp_ep, stcb,
1624 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1625 		*abort_flag = 1;
1626 		return (0);
1627 	}
1628 	/************************************
1629 	 * From here down we may find ch-> invalid
1630 	 * so its a good idea NOT to use it.
1631 	 *************************************/
1632 
1633 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1634 	if (last_chunk == 0) {
1635 		dmbuf = SCTP_M_COPYM(*m,
1636 		    (offset + sizeof(struct sctp_data_chunk)),
1637 		    the_len, M_DONTWAIT);
1638 #ifdef SCTP_MBUF_LOGGING
1639 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1640 			struct mbuf *mat;
1641 
1642 			mat = dmbuf;
1643 			while (mat) {
1644 				if (SCTP_BUF_IS_EXTENDED(mat)) {
1645 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1646 				}
1647 				mat = SCTP_BUF_NEXT(mat);
1648 			}
1649 		}
1650 #endif
1651 	} else {
1652 		/* We can steal the last chunk */
1653 		int l_len;
1654 
1655 		dmbuf = *m;
1656 		/* lop off the top part */
1657 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1658 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1659 			l_len = SCTP_BUF_LEN(dmbuf);
1660 		} else {
1661 			/*
1662 			 * need to count up the size hopefully does not hit
1663 			 * this to often :-0
1664 			 */
1665 			struct mbuf *lat;
1666 
1667 			l_len = 0;
1668 			lat = dmbuf;
1669 			while (lat) {
1670 				l_len += SCTP_BUF_LEN(lat);
1671 				lat = SCTP_BUF_NEXT(lat);
1672 			}
1673 		}
1674 		if (l_len > the_len) {
1675 			/* Trim the end round bytes off  too */
1676 			m_adj(dmbuf, -(l_len - the_len));
1677 		}
1678 	}
1679 	if (dmbuf == NULL) {
1680 		SCTP_STAT_INCR(sctps_nomem);
1681 		return (0);
1682 	}
1683 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1684 	    asoc->fragmented_delivery_inprogress == 0 &&
1685 	    TAILQ_EMPTY(&asoc->resetHead) &&
1686 	    ((ordered == 0) ||
1687 	    ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1688 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1689 		/* Candidate for express delivery */
1690 		/*
1691 		 * Its not fragmented, No PD-API is up, Nothing in the
1692 		 * delivery queue, Its un-ordered OR ordered and the next to
1693 		 * deliver AND nothing else is stuck on the stream queue,
1694 		 * And there is room for it in the socket buffer. Lets just
1695 		 * stuff it up the buffer....
1696 		 */
1697 
1698 		/* It would be nice to avoid this copy if we could :< */
1699 		sctp_alloc_a_readq(stcb, control);
1700 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1701 		    protocol_id,
1702 		    stcb->asoc.context,
1703 		    strmno, strmseq,
1704 		    chunk_flags,
1705 		    dmbuf);
1706 		if (control == NULL) {
1707 			goto failed_express_del;
1708 		}
1709 		sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
1710 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1711 			/* for ordered, bump what we delivered */
1712 			asoc->strmin[strmno].last_sequence_delivered++;
1713 		}
1714 		SCTP_STAT_INCR(sctps_recvexpress);
1715 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1716 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1717 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1718 		}
1719 		control = NULL;
1720 		goto finish_express_del;
1721 	}
1722 failed_express_del:
1723 	/* If we reach here this is a new chunk */
1724 	chk = NULL;
1725 	control = NULL;
1726 	/* Express for fragmented delivery? */
1727 	if ((asoc->fragmented_delivery_inprogress) &&
1728 	    (stcb->asoc.control_pdapi) &&
1729 	    (asoc->str_of_pdapi == strmno) &&
1730 	    (asoc->ssn_of_pdapi == strmseq)
1731 	    ) {
1732 		control = stcb->asoc.control_pdapi;
1733 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1734 			/* Can't be another first? */
1735 			goto failed_pdapi_express_del;
1736 		}
1737 		if (tsn == (control->sinfo_tsn + 1)) {
1738 			/* Yep, we can add it on */
1739 			int end = 0;
1740 			uint32_t cumack;
1741 
1742 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1743 				end = 1;
1744 			}
1745 			cumack = asoc->cumulative_tsn;
1746 			if ((cumack + 1) == tsn)
1747 				cumack = tsn;
1748 
1749 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1750 			    tsn,
1751 			    &stcb->sctp_socket->so_rcv)) {
1752 				SCTP_PRINTF("Append fails end:%d\n", end);
1753 				goto failed_pdapi_express_del;
1754 			}
1755 			SCTP_STAT_INCR(sctps_recvexpressm);
1756 			control->sinfo_tsn = tsn;
1757 			asoc->tsn_last_delivered = tsn;
1758 			asoc->fragment_flags = chunk_flags;
1759 			asoc->tsn_of_pdapi_last_delivered = tsn;
1760 			asoc->last_flags_delivered = chunk_flags;
1761 			asoc->last_strm_seq_delivered = strmseq;
1762 			asoc->last_strm_no_delivered = strmno;
1763 			if (end) {
1764 				/* clean up the flags and such */
1765 				asoc->fragmented_delivery_inprogress = 0;
1766 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1767 					asoc->strmin[strmno].last_sequence_delivered++;
1768 				}
1769 				stcb->asoc.control_pdapi = NULL;
1770 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1771 					/*
1772 					 * There could be another message
1773 					 * ready
1774 					 */
1775 					need_reasm_check = 1;
1776 				}
1777 			}
1778 			control = NULL;
1779 			goto finish_express_del;
1780 		}
1781 	}
1782 failed_pdapi_express_del:
1783 	control = NULL;
1784 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1785 		sctp_alloc_a_chunk(stcb, chk);
1786 		if (chk == NULL) {
1787 			/* No memory so we drop the chunk */
1788 			SCTP_STAT_INCR(sctps_nomem);
1789 			if (last_chunk == 0) {
1790 				/* we copied it, free the copy */
1791 				sctp_m_freem(dmbuf);
1792 			}
1793 			return (0);
1794 		}
1795 		chk->rec.data.TSN_seq = tsn;
1796 		chk->no_fr_allowed = 0;
1797 		chk->rec.data.stream_seq = strmseq;
1798 		chk->rec.data.stream_number = strmno;
1799 		chk->rec.data.payloadtype = protocol_id;
1800 		chk->rec.data.context = stcb->asoc.context;
1801 		chk->rec.data.doing_fast_retransmit = 0;
1802 		chk->rec.data.rcv_flags = chunk_flags;
1803 		chk->asoc = asoc;
1804 		chk->send_size = the_len;
1805 		chk->whoTo = net;
1806 		atomic_add_int(&net->ref_count, 1);
1807 		chk->data = dmbuf;
1808 	} else {
1809 		sctp_alloc_a_readq(stcb, control);
1810 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1811 		    protocol_id,
1812 		    stcb->asoc.context,
1813 		    strmno, strmseq,
1814 		    chunk_flags,
1815 		    dmbuf);
1816 		if (control == NULL) {
1817 			/* No memory so we drop the chunk */
1818 			SCTP_STAT_INCR(sctps_nomem);
1819 			if (last_chunk == 0) {
1820 				/* we copied it, free the copy */
1821 				sctp_m_freem(dmbuf);
1822 			}
1823 			return (0);
1824 		}
1825 		control->length = the_len;
1826 	}
1827 
1828 	/* Mark it as received */
1829 	/* Now queue it where it belongs */
1830 	if (control != NULL) {
1831 		/* First a sanity check */
1832 		if (asoc->fragmented_delivery_inprogress) {
1833 			/*
1834 			 * Ok, we have a fragmented delivery in progress if
1835 			 * this chunk is next to deliver OR belongs in our
1836 			 * view to the reassembly, the peer is evil or
1837 			 * broken.
1838 			 */
1839 			uint32_t estimate_tsn;
1840 
1841 			estimate_tsn = asoc->tsn_last_delivered + 1;
1842 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1843 			    (estimate_tsn == control->sinfo_tsn)) {
1844 				/* Evil/Broke peer */
1845 				sctp_m_freem(control->data);
1846 				control->data = NULL;
1847 				if (control->whoFrom) {
1848 					sctp_free_remote_addr(control->whoFrom);
1849 					control->whoFrom = NULL;
1850 				}
1851 				sctp_free_a_readq(stcb, control);
1852 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1853 				    0, M_DONTWAIT, 1, MT_DATA);
1854 				if (oper) {
1855 					struct sctp_paramhdr *ph;
1856 					uint32_t *ippp;
1857 
1858 					SCTP_BUF_LEN(oper) =
1859 					    sizeof(struct sctp_paramhdr) +
1860 					    (3 * sizeof(uint32_t));
1861 					ph = mtod(oper, struct sctp_paramhdr *);
1862 					ph->param_type =
1863 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1864 					ph->param_length = htons(SCTP_BUF_LEN(oper));
1865 					ippp = (uint32_t *) (ph + 1);
1866 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1867 					ippp++;
1868 					*ippp = tsn;
1869 					ippp++;
1870 					*ippp = ((strmno << 16) | strmseq);
1871 				}
1872 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1873 				sctp_abort_an_association(stcb->sctp_ep, stcb,
1874 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1875 
1876 				*abort_flag = 1;
1877 				return (0);
1878 			} else {
1879 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1880 					sctp_m_freem(control->data);
1881 					control->data = NULL;
1882 					if (control->whoFrom) {
1883 						sctp_free_remote_addr(control->whoFrom);
1884 						control->whoFrom = NULL;
1885 					}
1886 					sctp_free_a_readq(stcb, control);
1887 
1888 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1889 					    0, M_DONTWAIT, 1, MT_DATA);
1890 					if (oper) {
1891 						struct sctp_paramhdr *ph;
1892 						uint32_t *ippp;
1893 
1894 						SCTP_BUF_LEN(oper) =
1895 						    sizeof(struct sctp_paramhdr) +
1896 						    (3 * sizeof(uint32_t));
1897 						ph = mtod(oper,
1898 						    struct sctp_paramhdr *);
1899 						ph->param_type =
1900 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1901 						ph->param_length =
1902 						    htons(SCTP_BUF_LEN(oper));
1903 						ippp = (uint32_t *) (ph + 1);
1904 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1905 						ippp++;
1906 						*ippp = tsn;
1907 						ippp++;
1908 						*ippp = ((strmno << 16) | strmseq);
1909 					}
1910 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1911 					sctp_abort_an_association(stcb->sctp_ep,
1912 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1913 
1914 					*abort_flag = 1;
1915 					return (0);
1916 				}
1917 			}
1918 		} else {
1919 			/* No PDAPI running */
1920 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1921 				/*
1922 				 * Reassembly queue is NOT empty validate
1923 				 * that this tsn does not need to be in
1924 				 * reasembly queue. If it does then our peer
1925 				 * is broken or evil.
1926 				 */
1927 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1928 					sctp_m_freem(control->data);
1929 					control->data = NULL;
1930 					if (control->whoFrom) {
1931 						sctp_free_remote_addr(control->whoFrom);
1932 						control->whoFrom = NULL;
1933 					}
1934 					sctp_free_a_readq(stcb, control);
1935 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1936 					    0, M_DONTWAIT, 1, MT_DATA);
1937 					if (oper) {
1938 						struct sctp_paramhdr *ph;
1939 						uint32_t *ippp;
1940 
1941 						SCTP_BUF_LEN(oper) =
1942 						    sizeof(struct sctp_paramhdr) +
1943 						    (3 * sizeof(uint32_t));
1944 						ph = mtod(oper,
1945 						    struct sctp_paramhdr *);
1946 						ph->param_type =
1947 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1948 						ph->param_length =
1949 						    htons(SCTP_BUF_LEN(oper));
1950 						ippp = (uint32_t *) (ph + 1);
1951 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
1952 						ippp++;
1953 						*ippp = tsn;
1954 						ippp++;
1955 						*ippp = ((strmno << 16) | strmseq);
1956 					}
1957 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1958 					sctp_abort_an_association(stcb->sctp_ep,
1959 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1960 
1961 					*abort_flag = 1;
1962 					return (0);
1963 				}
1964 			}
1965 		}
1966 		/* ok, if we reach here we have passed the sanity checks */
1967 		if (chunk_flags & SCTP_DATA_UNORDERED) {
1968 			/* queue directly into socket buffer */
1969 			sctp_add_to_readq(stcb->sctp_ep, stcb,
1970 			    control,
1971 			    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
1972 		} else {
1973 			/*
1974 			 * Special check for when streams are resetting. We
1975 			 * could be more smart about this and check the
1976 			 * actual stream to see if it is not being reset..
1977 			 * that way we would not create a HOLB when amongst
1978 			 * streams being reset and those not being reset.
1979 			 *
1980 			 * We take complete messages that have a stream reset
1981 			 * intervening (aka the TSN is after where our
1982 			 * cum-ack needs to be) off and put them on a
1983 			 * pending_reply_queue. The reassembly ones we do
1984 			 * not have to worry about since they are all sorted
1985 			 * and proceessed by TSN order. It is only the
1986 			 * singletons I must worry about.
1987 			 */
1988 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1989 			    ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)))
1990 			    ) {
1991 				/*
1992 				 * yep its past where we need to reset... go
1993 				 * ahead and queue it.
1994 				 */
1995 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
1996 					/* first one on */
1997 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1998 				} else {
1999 					struct sctp_queued_to_read *ctlOn;
2000 					unsigned char inserted = 0;
2001 
2002 					ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2003 					while (ctlOn) {
2004 						if (compare_with_wrap(control->sinfo_tsn,
2005 						    ctlOn->sinfo_tsn, MAX_TSN)) {
2006 							ctlOn = TAILQ_NEXT(ctlOn, next);
2007 						} else {
2008 							/* found it */
2009 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2010 							inserted = 1;
2011 							break;
2012 						}
2013 					}
2014 					if (inserted == 0) {
2015 						/*
2016 						 * must be put at end, use
2017 						 * prevP (all setup from
2018 						 * loop) to setup nextP.
2019 						 */
2020 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2021 					}
2022 				}
2023 			} else {
2024 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2025 				if (*abort_flag) {
2026 					return (0);
2027 				}
2028 			}
2029 		}
2030 	} else {
2031 		/* Into the re-assembly queue */
2032 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2033 		if (*abort_flag) {
2034 			/*
2035 			 * the assoc is now gone and chk was put onto the
2036 			 * reasm queue, which has all been freed.
2037 			 */
2038 			*m = NULL;
2039 			return (0);
2040 		}
2041 	}
2042 finish_express_del:
2043 	if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
2044 		/* we have a new high score */
2045 		asoc->highest_tsn_inside_map = tsn;
2046 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2047 			sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2048 		}
2049 	}
2050 	if (tsn == (asoc->cumulative_tsn + 1)) {
2051 		/* Update cum-ack */
2052 		asoc->cumulative_tsn = tsn;
2053 	}
2054 	if (last_chunk) {
2055 		*m = NULL;
2056 	}
2057 	if (ordered) {
2058 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2059 	} else {
2060 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2061 	}
2062 	SCTP_STAT_INCR(sctps_recvdata);
2063 	/* Set it present please */
2064 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2065 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2066 	}
2067 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2068 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2069 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2070 	}
2071 	SCTP_TCB_LOCK_ASSERT(stcb);
2072 	SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2073 	/* check the special flag for stream resets */
2074 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2075 	    ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2076 	    (asoc->cumulative_tsn == liste->tsn))
2077 	    ) {
2078 		/*
2079 		 * we have finished working through the backlogged TSN's now
2080 		 * time to reset streams. 1: call reset function. 2: free
2081 		 * pending_reply space 3: distribute any chunks in
2082 		 * pending_reply_queue.
2083 		 */
2084 		struct sctp_queued_to_read *ctl;
2085 
2086 		sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2087 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2088 		SCTP_FREE(liste, SCTP_M_STRESET);
2089 		/* sa_ignore FREED_MEMORY */
2090 		liste = TAILQ_FIRST(&asoc->resetHead);
2091 		ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2092 		if (ctl && (liste == NULL)) {
2093 			/* All can be removed */
2094 			while (ctl) {
2095 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2096 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2097 				if (*abort_flag) {
2098 					return (0);
2099 				}
2100 				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2101 			}
2102 		} else if (ctl) {
2103 			/* more than one in queue */
2104 			while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2105 				/*
2106 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2107 				 * process it which is the NOT of
2108 				 * ctl->sinfo_tsn > liste->tsn
2109 				 */
2110 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2111 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2112 				if (*abort_flag) {
2113 					return (0);
2114 				}
2115 				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2116 			}
2117 		}
2118 		/*
2119 		 * Now service re-assembly to pick up anything that has been
2120 		 * held on reassembly queue?
2121 		 */
2122 		sctp_deliver_reasm_check(stcb, asoc);
2123 		need_reasm_check = 0;
2124 	}
2125 	if (need_reasm_check) {
2126 		/* Another one waits ? */
2127 		sctp_deliver_reasm_check(stcb, asoc);
2128 	}
2129 	return (1);
2130 }
2131 
2132 int8_t sctp_map_lookup_tab[256] = {
2133 	-1, 0, -1, 1, -1, 0, -1, 2,
2134 	-1, 0, -1, 1, -1, 0, -1, 3,
2135 	-1, 0, -1, 1, -1, 0, -1, 2,
2136 	-1, 0, -1, 1, -1, 0, -1, 4,
2137 	-1, 0, -1, 1, -1, 0, -1, 2,
2138 	-1, 0, -1, 1, -1, 0, -1, 3,
2139 	-1, 0, -1, 1, -1, 0, -1, 2,
2140 	-1, 0, -1, 1, -1, 0, -1, 5,
2141 	-1, 0, -1, 1, -1, 0, -1, 2,
2142 	-1, 0, -1, 1, -1, 0, -1, 3,
2143 	-1, 0, -1, 1, -1, 0, -1, 2,
2144 	-1, 0, -1, 1, -1, 0, -1, 4,
2145 	-1, 0, -1, 1, -1, 0, -1, 2,
2146 	-1, 0, -1, 1, -1, 0, -1, 3,
2147 	-1, 0, -1, 1, -1, 0, -1, 2,
2148 	-1, 0, -1, 1, -1, 0, -1, 6,
2149 	-1, 0, -1, 1, -1, 0, -1, 2,
2150 	-1, 0, -1, 1, -1, 0, -1, 3,
2151 	-1, 0, -1, 1, -1, 0, -1, 2,
2152 	-1, 0, -1, 1, -1, 0, -1, 4,
2153 	-1, 0, -1, 1, -1, 0, -1, 2,
2154 	-1, 0, -1, 1, -1, 0, -1, 3,
2155 	-1, 0, -1, 1, -1, 0, -1, 2,
2156 	-1, 0, -1, 1, -1, 0, -1, 5,
2157 	-1, 0, -1, 1, -1, 0, -1, 2,
2158 	-1, 0, -1, 1, -1, 0, -1, 3,
2159 	-1, 0, -1, 1, -1, 0, -1, 2,
2160 	-1, 0, -1, 1, -1, 0, -1, 4,
2161 	-1, 0, -1, 1, -1, 0, -1, 2,
2162 	-1, 0, -1, 1, -1, 0, -1, 3,
2163 	-1, 0, -1, 1, -1, 0, -1, 2,
2164 	-1, 0, -1, 1, -1, 0, -1, 7,
2165 };
2166 
2167 
2168 void
2169 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
2170 {
2171 	/*
2172 	 * Now we also need to check the mapping array in a couple of ways.
2173 	 * 1) Did we move the cum-ack point?
2174 	 */
2175 	struct sctp_association *asoc;
2176 	int at;
2177 	int last_all_ones = 0;
2178 	int slide_from, slide_end, lgap, distance;
2179 	uint32_t old_cumack, old_base, old_highest;
2180 	unsigned char aux_array[64];
2181 
2182 
2183 	asoc = &stcb->asoc;
2184 	at = 0;
2185 
2186 	old_cumack = asoc->cumulative_tsn;
2187 	old_base = asoc->mapping_array_base_tsn;
2188 	old_highest = asoc->highest_tsn_inside_map;
2189 	if (asoc->mapping_array_size < 64)
2190 		memcpy(aux_array, asoc->mapping_array,
2191 		    asoc->mapping_array_size);
2192 	else
2193 		memcpy(aux_array, asoc->mapping_array, 64);
2194 
2195 	/*
2196 	 * We could probably improve this a small bit by calculating the
2197 	 * offset of the current cum-ack as the starting point.
2198 	 */
2199 	at = 0;
2200 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2201 
2202 		if (asoc->mapping_array[slide_from] == 0xff) {
2203 			at += 8;
2204 			last_all_ones = 1;
2205 		} else {
2206 			/* there is a 0 bit */
2207 			at += sctp_map_lookup_tab[asoc->mapping_array[slide_from]];
2208 			last_all_ones = 0;
2209 			break;
2210 		}
2211 	}
2212 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - last_all_ones);
2213 	/* at is one off, since in the table a embedded -1 is present */
2214 	at++;
2215 
2216 	if (compare_with_wrap(asoc->cumulative_tsn,
2217 	    asoc->highest_tsn_inside_map,
2218 	    MAX_TSN)) {
2219 #ifdef INVARIANTS
2220 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2221 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2222 #else
2223 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2224 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2225 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2226 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2227 		}
2228 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2229 #endif
2230 	}
2231 	if ((asoc->cumulative_tsn == asoc->highest_tsn_inside_map) && (at >= 8)) {
2232 		/* The complete array was completed by a single FR */
2233 		/* higest becomes the cum-ack */
2234 		int clr;
2235 
2236 		asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
2237 		/* clear the array */
2238 		clr = (at >> 3) + 1;
2239 		if (clr > asoc->mapping_array_size) {
2240 			clr = asoc->mapping_array_size;
2241 		}
2242 		memset(asoc->mapping_array, 0, clr);
2243 		/* base becomes one ahead of the cum-ack */
2244 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2245 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2246 			sctp_log_map(old_base, old_cumack, old_highest,
2247 			    SCTP_MAP_PREPARE_SLIDE);
2248 			sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2249 			    asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
2250 		}
2251 	} else if (at >= 8) {
2252 		/* we can slide the mapping array down */
2253 		/* slide_from holds where we hit the first NON 0xff byte */
2254 
2255 		/*
2256 		 * now calculate the ceiling of the move using our highest
2257 		 * TSN value
2258 		 */
2259 		if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
2260 			lgap = asoc->highest_tsn_inside_map -
2261 			    asoc->mapping_array_base_tsn;
2262 		} else {
2263 			lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
2264 			    asoc->highest_tsn_inside_map + 1;
2265 		}
2266 		slide_end = lgap >> 3;
2267 		if (slide_end < slide_from) {
2268 #ifdef INVARIANTS
2269 			panic("impossible slide");
2270 #else
2271 			printf("impossible slide?\n");
2272 			return;
2273 #endif
2274 		}
2275 		if (slide_end > asoc->mapping_array_size) {
2276 #ifdef INVARIANTS
2277 			panic("would overrun buffer");
2278 #else
2279 			printf("Gak, would have overrun map end:%d slide_end:%d\n",
2280 			    asoc->mapping_array_size, slide_end);
2281 			slide_end = asoc->mapping_array_size;
2282 #endif
2283 		}
2284 		distance = (slide_end - slide_from) + 1;
2285 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2286 			sctp_log_map(old_base, old_cumack, old_highest,
2287 			    SCTP_MAP_PREPARE_SLIDE);
2288 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2289 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2290 		}
2291 		if (distance + slide_from > asoc->mapping_array_size ||
2292 		    distance < 0) {
2293 			/*
2294 			 * Here we do NOT slide forward the array so that
2295 			 * hopefully when more data comes in to fill it up
2296 			 * we will be able to slide it forward. Really I
2297 			 * don't think this should happen :-0
2298 			 */
2299 
2300 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2301 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2302 				    (uint32_t) asoc->mapping_array_size,
2303 				    SCTP_MAP_SLIDE_NONE);
2304 			}
2305 		} else {
2306 			int ii;
2307 
2308 			for (ii = 0; ii < distance; ii++) {
2309 				asoc->mapping_array[ii] =
2310 				    asoc->mapping_array[slide_from + ii];
2311 			}
2312 			for (ii = distance; ii <= slide_end; ii++) {
2313 				asoc->mapping_array[ii] = 0;
2314 			}
2315 			asoc->mapping_array_base_tsn += (slide_from << 3);
2316 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2317 				sctp_log_map(asoc->mapping_array_base_tsn,
2318 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2319 				    SCTP_MAP_SLIDE_RESULT);
2320 			}
2321 		}
2322 	}
2323 	/*
2324 	 * Now we need to see if we need to queue a sack or just start the
2325 	 * timer (if allowed).
2326 	 */
2327 	if (ok_to_sack) {
2328 		if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2329 			/*
2330 			 * Ok special case, in SHUTDOWN-SENT case. here we
2331 			 * maker sure SACK timer is off and instead send a
2332 			 * SHUTDOWN and a SACK
2333 			 */
2334 			if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2335 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2336 				    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2337 			}
2338 			sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2339 			sctp_send_sack(stcb);
2340 		} else {
2341 			int is_a_gap;
2342 
2343 			/* is there a gap now ? */
2344 			is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2345 			    stcb->asoc.cumulative_tsn, MAX_TSN);
2346 
2347 			/*
2348 			 * CMT DAC algorithm: increase number of packets
2349 			 * received since last ack
2350 			 */
2351 			stcb->asoc.cmt_dac_pkts_rcvd++;
2352 
2353 			if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2354 								 * SACK */
2355 			    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2356 								 * longer is one */
2357 			    (stcb->asoc.numduptsns) ||	/* we have dup's */
2358 			    (is_a_gap) ||	/* is still a gap */
2359 			    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2360 			    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2361 			    ) {
2362 
2363 				if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off)) &&
2364 				    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2365 				    (stcb->asoc.send_sack == 0) &&
2366 				    (stcb->asoc.numduptsns == 0) &&
2367 				    (stcb->asoc.delayed_ack) &&
2368 				    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2369 
2370 					/*
2371 					 * CMT DAC algorithm: With CMT,
2372 					 * delay acks even in the face of
2373 					 *
2374 					 * reordering. Therefore, if acks that
2375 					 * do not have to be sent because of
2376 					 * the above reasons, will be
2377 					 * delayed. That is, acks that would
2378 					 * have been sent due to gap reports
2379 					 * will be delayed with DAC. Start
2380 					 * the delayed ack timer.
2381 					 */
2382 					sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2383 					    stcb->sctp_ep, stcb, NULL);
2384 				} else {
2385 					/*
2386 					 * Ok we must build a SACK since the
2387 					 * timer is pending, we got our
2388 					 * first packet OR there are gaps or
2389 					 * duplicates.
2390 					 */
2391 					(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2392 					sctp_send_sack(stcb);
2393 				}
2394 			} else {
2395 				if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2396 					sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2397 					    stcb->sctp_ep, stcb, NULL);
2398 				}
2399 			}
2400 		}
2401 	}
2402 }
2403 
2404 void
2405 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2406 {
2407 	struct sctp_tmit_chunk *chk;
2408 	uint32_t tsize;
2409 	uint16_t nxt_todel;
2410 
2411 	if (asoc->fragmented_delivery_inprogress) {
2412 		sctp_service_reassembly(stcb, asoc);
2413 	}
2414 	/* Can we proceed further, i.e. the PD-API is complete */
2415 	if (asoc->fragmented_delivery_inprogress) {
2416 		/* no */
2417 		return;
2418 	}
2419 	/*
2420 	 * Now is there some other chunk I can deliver from the reassembly
2421 	 * queue.
2422 	 */
2423 doit_again:
2424 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2425 	if (chk == NULL) {
2426 		asoc->size_on_reasm_queue = 0;
2427 		asoc->cnt_on_reasm_queue = 0;
2428 		return;
2429 	}
2430 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2431 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2432 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2433 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2434 		/*
2435 		 * Yep the first one is here. We setup to start reception,
2436 		 * by backing down the TSN just in case we can't deliver.
2437 		 */
2438 
2439 		/*
2440 		 * Before we start though either all of the message should
2441 		 * be here or 1/4 the socket buffer max or nothing on the
2442 		 * delivery queue and something can be delivered.
2443 		 */
2444 		if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
2445 		    (tsize >= stcb->sctp_ep->partial_delivery_point))) {
2446 			asoc->fragmented_delivery_inprogress = 1;
2447 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2448 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2449 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2450 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2451 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2452 			sctp_service_reassembly(stcb, asoc);
2453 			if (asoc->fragmented_delivery_inprogress == 0) {
2454 				goto doit_again;
2455 			}
2456 		}
2457 	}
2458 }
2459 
2460 int
2461 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2462     struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2463     struct sctp_nets *net, uint32_t * high_tsn)
2464 {
2465 	struct sctp_data_chunk *ch, chunk_buf;
2466 	struct sctp_association *asoc;
2467 	int num_chunks = 0;	/* number of control chunks processed */
2468 	int stop_proc = 0;
2469 	int chk_length, break_flag, last_chunk;
2470 	int abort_flag = 0, was_a_gap = 0;
2471 	struct mbuf *m;
2472 
2473 	/* set the rwnd */
2474 	sctp_set_rwnd(stcb, &stcb->asoc);
2475 
2476 	m = *mm;
2477 	SCTP_TCB_LOCK_ASSERT(stcb);
2478 	asoc = &stcb->asoc;
2479 	if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2480 	    stcb->asoc.cumulative_tsn, MAX_TSN)) {
2481 		/* there was a gap before this data was processed */
2482 		was_a_gap = 1;
2483 	}
2484 	/*
2485 	 * setup where we got the last DATA packet from for any SACK that
2486 	 * may need to go out. Don't bump the net. This is done ONLY when a
2487 	 * chunk is assigned.
2488 	 */
2489 	asoc->last_data_chunk_from = net;
2490 
2491 	/*-
2492 	 * Now before we proceed we must figure out if this is a wasted
2493 	 * cluster... i.e. it is a small packet sent in and yet the driver
2494 	 * underneath allocated a full cluster for it. If so we must copy it
2495 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2496 	 * with cluster starvation. Note for __Panda__ we don't do this
2497 	 * since it has clusters all the way down to 64 bytes.
2498 	 */
2499 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2500 		/* we only handle mbufs that are singletons.. not chains */
2501 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2502 		if (m) {
2503 			/* ok lets see if we can copy the data up */
2504 			caddr_t *from, *to;
2505 
2506 			/* get the pointers and copy */
2507 			to = mtod(m, caddr_t *);
2508 			from = mtod((*mm), caddr_t *);
2509 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2510 			/* copy the length and free up the old */
2511 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2512 			sctp_m_freem(*mm);
2513 			/* sucess, back copy */
2514 			*mm = m;
2515 		} else {
2516 			/* We are in trouble in the mbuf world .. yikes */
2517 			m = *mm;
2518 		}
2519 	}
2520 	/* get pointer to the first chunk header */
2521 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2522 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2523 	if (ch == NULL) {
2524 		return (1);
2525 	}
2526 	/*
2527 	 * process all DATA chunks...
2528 	 */
2529 	*high_tsn = asoc->cumulative_tsn;
2530 	break_flag = 0;
2531 	asoc->data_pkts_seen++;
2532 	while (stop_proc == 0) {
2533 		/* validate chunk length */
2534 		chk_length = ntohs(ch->ch.chunk_length);
2535 		if (length - *offset < chk_length) {
2536 			/* all done, mutulated chunk */
2537 			stop_proc = 1;
2538 			break;
2539 		}
2540 		if (ch->ch.chunk_type == SCTP_DATA) {
2541 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2542 				/*
2543 				 * Need to send an abort since we had a
2544 				 * invalid data chunk.
2545 				 */
2546 				struct mbuf *op_err;
2547 
2548 				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2549 				    0, M_DONTWAIT, 1, MT_DATA);
2550 
2551 				if (op_err) {
2552 					struct sctp_paramhdr *ph;
2553 					uint32_t *ippp;
2554 
2555 					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2556 					    (2 * sizeof(uint32_t));
2557 					ph = mtod(op_err, struct sctp_paramhdr *);
2558 					ph->param_type =
2559 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2560 					ph->param_length = htons(SCTP_BUF_LEN(op_err));
2561 					ippp = (uint32_t *) (ph + 1);
2562 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2563 					ippp++;
2564 					*ippp = asoc->cumulative_tsn;
2565 
2566 				}
2567 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2568 				sctp_abort_association(inp, stcb, m, iphlen, sh,
2569 				    op_err, 0, net->port);
2570 				return (2);
2571 			}
2572 #ifdef SCTP_AUDITING_ENABLED
2573 			sctp_audit_log(0xB1, 0);
2574 #endif
2575 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2576 				last_chunk = 1;
2577 			} else {
2578 				last_chunk = 0;
2579 			}
2580 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2581 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2582 			    last_chunk)) {
2583 				num_chunks++;
2584 			}
2585 			if (abort_flag)
2586 				return (2);
2587 
2588 			if (break_flag) {
2589 				/*
2590 				 * Set because of out of rwnd space and no
2591 				 * drop rep space left.
2592 				 */
2593 				stop_proc = 1;
2594 				break;
2595 			}
2596 		} else {
2597 			/* not a data chunk in the data region */
2598 			switch (ch->ch.chunk_type) {
2599 			case SCTP_INITIATION:
2600 			case SCTP_INITIATION_ACK:
2601 			case SCTP_SELECTIVE_ACK:
2602 			case SCTP_HEARTBEAT_REQUEST:
2603 			case SCTP_HEARTBEAT_ACK:
2604 			case SCTP_ABORT_ASSOCIATION:
2605 			case SCTP_SHUTDOWN:
2606 			case SCTP_SHUTDOWN_ACK:
2607 			case SCTP_OPERATION_ERROR:
2608 			case SCTP_COOKIE_ECHO:
2609 			case SCTP_COOKIE_ACK:
2610 			case SCTP_ECN_ECHO:
2611 			case SCTP_ECN_CWR:
2612 			case SCTP_SHUTDOWN_COMPLETE:
2613 			case SCTP_AUTHENTICATION:
2614 			case SCTP_ASCONF_ACK:
2615 			case SCTP_PACKET_DROPPED:
2616 			case SCTP_STREAM_RESET:
2617 			case SCTP_FORWARD_CUM_TSN:
2618 			case SCTP_ASCONF:
2619 				/*
2620 				 * Now, what do we do with KNOWN chunks that
2621 				 * are NOT in the right place?
2622 				 *
2623 				 * For now, I do nothing but ignore them. We
2624 				 * may later want to add sysctl stuff to
2625 				 * switch out and do either an ABORT() or
2626 				 * possibly process them.
2627 				 */
2628 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2629 					struct mbuf *op_err;
2630 
2631 					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2632 					sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
2633 					return (2);
2634 				}
2635 				break;
2636 			default:
2637 				/* unknown chunk type, use bit rules */
2638 				if (ch->ch.chunk_type & 0x40) {
2639 					/* Add a error report to the queue */
2640 					struct mbuf *merr;
2641 					struct sctp_paramhdr *phd;
2642 
2643 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2644 					if (merr) {
2645 						phd = mtod(merr, struct sctp_paramhdr *);
2646 						/*
2647 						 * We cheat and use param
2648 						 * type since we did not
2649 						 * bother to define a error
2650 						 * cause struct. They are
2651 						 * the same basic format
2652 						 * with different names.
2653 						 */
2654 						phd->param_type =
2655 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2656 						phd->param_length =
2657 						    htons(chk_length + sizeof(*phd));
2658 						SCTP_BUF_LEN(merr) = sizeof(*phd);
2659 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
2660 						    SCTP_SIZE32(chk_length),
2661 						    M_DONTWAIT);
2662 						if (SCTP_BUF_NEXT(merr)) {
2663 							sctp_queue_op_err(stcb, merr);
2664 						} else {
2665 							sctp_m_freem(merr);
2666 						}
2667 					}
2668 				}
2669 				if ((ch->ch.chunk_type & 0x80) == 0) {
2670 					/* discard the rest of this packet */
2671 					stop_proc = 1;
2672 				}	/* else skip this bad chunk and
2673 					 * continue... */
2674 				break;
2675 			};	/* switch of chunk type */
2676 		}
2677 		*offset += SCTP_SIZE32(chk_length);
2678 		if ((*offset >= length) || stop_proc) {
2679 			/* no more data left in the mbuf chain */
2680 			stop_proc = 1;
2681 			continue;
2682 		}
2683 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2684 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2685 		if (ch == NULL) {
2686 			*offset = length;
2687 			stop_proc = 1;
2688 			break;
2689 
2690 		}
2691 	}			/* while */
2692 	if (break_flag) {
2693 		/*
2694 		 * we need to report rwnd overrun drops.
2695 		 */
2696 		sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2697 	}
2698 	if (num_chunks) {
2699 		/*
2700 		 * Did we get data, if so update the time for auto-close and
2701 		 * give peer credit for being alive.
2702 		 */
2703 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2704 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2705 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2706 			    stcb->asoc.overall_error_count,
2707 			    0,
2708 			    SCTP_FROM_SCTP_INDATA,
2709 			    __LINE__);
2710 		}
2711 		stcb->asoc.overall_error_count = 0;
2712 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2713 	}
2714 	/* now service all of the reassm queue if needed */
2715 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2716 		sctp_service_queues(stcb, asoc);
2717 
2718 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2719 		/* Assure that we ack right away */
2720 		stcb->asoc.send_sack = 1;
2721 	}
2722 	/* Start a sack timer or QUEUE a SACK for sending */
2723 	if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) &&
2724 	    (stcb->asoc.mapping_array[0] != 0xff)) {
2725 		if ((stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) ||
2726 		    (stcb->asoc.delayed_ack == 0) ||
2727 		    (stcb->asoc.numduptsns) ||
2728 		    (stcb->asoc.send_sack == 1)) {
2729 			if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2730 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2731 			}
2732 			sctp_send_sack(stcb);
2733 		} else {
2734 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2735 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2736 				    stcb->sctp_ep, stcb, NULL);
2737 			}
2738 		}
2739 	} else {
2740 		sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
2741 	}
2742 	if (abort_flag)
2743 		return (2);
2744 
2745 	return (0);
2746 }
2747 
2748 static void
2749 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2750     struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2751     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2752     int num_seg, int *ecn_seg_sums)
2753 {
2754 	/************************************************/
2755 	/* process fragments and update sendqueue        */
2756 	/************************************************/
2757 	struct sctp_sack *sack;
2758 	struct sctp_gap_ack_block *frag, block;
2759 	struct sctp_tmit_chunk *tp1;
2760 	int i;
2761 	unsigned int j;
2762 	int num_frs = 0;
2763 
2764 	uint16_t frag_strt, frag_end, primary_flag_set;
2765 	u_long last_frag_high;
2766 
2767 	/*
2768 	 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
2769 	 */
2770 	if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
2771 		primary_flag_set = 1;
2772 	} else {
2773 		primary_flag_set = 0;
2774 	}
2775 	sack = &ch->sack;
2776 
2777 	frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
2778 	    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
2779 	*offset += sizeof(block);
2780 	if (frag == NULL) {
2781 		return;
2782 	}
2783 	tp1 = NULL;
2784 	last_frag_high = 0;
2785 	for (i = 0; i < num_seg; i++) {
2786 		frag_strt = ntohs(frag->start);
2787 		frag_end = ntohs(frag->end);
2788 		/* some sanity checks on the fargment offsets */
2789 		if (frag_strt > frag_end) {
2790 			/* this one is malformed, skip */
2791 			frag++;
2792 			continue;
2793 		}
2794 		if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
2795 		    MAX_TSN))
2796 			*biggest_tsn_acked = frag_end + last_tsn;
2797 
2798 		/* mark acked dgs and find out the highestTSN being acked */
2799 		if (tp1 == NULL) {
2800 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
2801 
2802 			/* save the locations of the last frags */
2803 			last_frag_high = frag_end + last_tsn;
2804 		} else {
2805 			/*
2806 			 * now lets see if we need to reset the queue due to
2807 			 * a out-of-order SACK fragment
2808 			 */
2809 			if (compare_with_wrap(frag_strt + last_tsn,
2810 			    last_frag_high, MAX_TSN)) {
2811 				/*
2812 				 * if the new frag starts after the last TSN
2813 				 * frag covered, we are ok and this one is
2814 				 * beyond the last one
2815 				 */
2816 				;
2817 			} else {
2818 				/*
2819 				 * ok, they have reset us, so we need to
2820 				 * reset the queue this will cause extra
2821 				 * hunting but hey, they chose the
2822 				 * performance hit when they failed to order
2823 				 * there gaps..
2824 				 */
2825 				tp1 = TAILQ_FIRST(&asoc->sent_queue);
2826 			}
2827 			last_frag_high = frag_end + last_tsn;
2828 		}
2829 		for (j = frag_strt + last_tsn; (compare_with_wrap((frag_end + last_tsn), j, MAX_TSN)); j++) {
2830 			while (tp1) {
2831 				if (tp1->rec.data.doing_fast_retransmit)
2832 					num_frs++;
2833 
2834 				/*
2835 				 * CMT: CUCv2 algorithm. For each TSN being
2836 				 * processed from the sent queue, track the
2837 				 * next expected pseudo-cumack, or
2838 				 * rtx_pseudo_cumack, if required. Separate
2839 				 * cumack trackers for first transmissions,
2840 				 * and retransmissions.
2841 				 */
2842 				if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2843 				    (tp1->snd_count == 1)) {
2844 					tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2845 					tp1->whoTo->find_pseudo_cumack = 0;
2846 				}
2847 				if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2848 				    (tp1->snd_count > 1)) {
2849 					tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2850 					tp1->whoTo->find_rtx_pseudo_cumack = 0;
2851 				}
2852 				if (tp1->rec.data.TSN_seq == j) {
2853 					if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2854 						/*
2855 						 * must be held until
2856 						 * cum-ack passes
2857 						 */
2858 						/*
2859 						 * ECN Nonce: Add the nonce
2860 						 * value to the sender's
2861 						 * nonce sum
2862 						 */
2863 						if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2864 							/*-
2865 							 * If it is less than RESEND, it is
2866 							 * now no-longer in flight.
2867 							 * Higher values may already be set
2868 							 * via previous Gap Ack Blocks...
2869 							 * i.e. ACKED or RESEND.
2870 							 */
2871 							if (compare_with_wrap(tp1->rec.data.TSN_seq,
2872 							    *biggest_newly_acked_tsn, MAX_TSN)) {
2873 								*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2874 							}
2875 							/*
2876 							 * CMT: SFR algo
2877 							 * (and HTNA) - set
2878 							 * saw_newack to 1
2879 							 * for dest being
2880 							 * newly acked.
2881 							 * update
2882 							 * this_sack_highest_
2883 							 * newack if
2884 							 * appropriate.
2885 							 */
2886 							if (tp1->rec.data.chunk_was_revoked == 0)
2887 								tp1->whoTo->saw_newack = 1;
2888 
2889 							if (compare_with_wrap(tp1->rec.data.TSN_seq,
2890 							    tp1->whoTo->this_sack_highest_newack,
2891 							    MAX_TSN)) {
2892 								tp1->whoTo->this_sack_highest_newack =
2893 								    tp1->rec.data.TSN_seq;
2894 							}
2895 							/*
2896 							 * CMT DAC algo:
2897 							 * also update
2898 							 * this_sack_lowest_n
2899 							 * ewack
2900 							 */
2901 							if (*this_sack_lowest_newack == 0) {
2902 								if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2903 									sctp_log_sack(*this_sack_lowest_newack,
2904 									    last_tsn,
2905 									    tp1->rec.data.TSN_seq,
2906 									    0,
2907 									    0,
2908 									    SCTP_LOG_TSN_ACKED);
2909 								}
2910 								*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2911 							}
2912 							/*
2913 							 * CMT: CUCv2
2914 							 * algorithm. If
2915 							 * (rtx-)pseudo-cumac
2916 							 * k for corresp
2917 							 * dest is being
2918 							 * acked, then we
2919 							 * have a new
2920 							 * (rtx-)pseudo-cumac
2921 							 * k. Set
2922 							 * new_(rtx_)pseudo_c
2923 							 * umack to TRUE so
2924 							 * that the cwnd for
2925 							 * this dest can be
2926 							 * updated. Also
2927 							 * trigger search
2928 							 * for the next
2929 							 * expected
2930 							 * (rtx-)pseudo-cumac
2931 							 * k. Separate
2932 							 * pseudo_cumack
2933 							 * trackers for
2934 							 * first
2935 							 * transmissions and
2936 							 * retransmissions.
2937 							 */
2938 							if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2939 								if (tp1->rec.data.chunk_was_revoked == 0) {
2940 									tp1->whoTo->new_pseudo_cumack = 1;
2941 								}
2942 								tp1->whoTo->find_pseudo_cumack = 1;
2943 							}
2944 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2945 								sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2946 							}
2947 							if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2948 								if (tp1->rec.data.chunk_was_revoked == 0) {
2949 									tp1->whoTo->new_pseudo_cumack = 1;
2950 								}
2951 								tp1->whoTo->find_rtx_pseudo_cumack = 1;
2952 							}
2953 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2954 								sctp_log_sack(*biggest_newly_acked_tsn,
2955 								    last_tsn,
2956 								    tp1->rec.data.TSN_seq,
2957 								    frag_strt,
2958 								    frag_end,
2959 								    SCTP_LOG_TSN_ACKED);
2960 							}
2961 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2962 								sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2963 								    tp1->whoTo->flight_size,
2964 								    tp1->book_size,
2965 								    (uintptr_t) tp1->whoTo,
2966 								    tp1->rec.data.TSN_seq);
2967 							}
2968 							sctp_flight_size_decrease(tp1);
2969 							sctp_total_flight_decrease(stcb, tp1);
2970 
2971 							tp1->whoTo->net_ack += tp1->send_size;
2972 							if (tp1->snd_count < 2) {
2973 								/*
2974 								 * True
2975 								 * non-retran
2976 								 * smited
2977 								 * chunk */
2978 								tp1->whoTo->net_ack2 += tp1->send_size;
2979 
2980 								/*
2981 								 * update RTO
2982 								 * too ? */
2983 								if (tp1->do_rtt) {
2984 									tp1->whoTo->RTO =
2985 									    sctp_calculate_rto(stcb,
2986 									    asoc,
2987 									    tp1->whoTo,
2988 									    &tp1->sent_rcv_time,
2989 									    sctp_align_safe_nocopy);
2990 									tp1->do_rtt = 0;
2991 								}
2992 							}
2993 						}
2994 						if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2995 							(*ecn_seg_sums) += tp1->rec.data.ect_nonce;
2996 							(*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
2997 							if (compare_with_wrap(tp1->rec.data.TSN_seq,
2998 							    asoc->this_sack_highest_gap,
2999 							    MAX_TSN)) {
3000 								asoc->this_sack_highest_gap =
3001 								    tp1->rec.data.TSN_seq;
3002 							}
3003 							if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3004 								sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3005 #ifdef SCTP_AUDITING_ENABLED
3006 								sctp_audit_log(0xB2,
3007 								    (asoc->sent_queue_retran_cnt & 0x000000ff));
3008 #endif
3009 							}
3010 						}
3011 						/*
3012 						 * All chunks NOT UNSENT
3013 						 * fall through here and are
3014 						 * marked
3015 						 */
3016 						tp1->sent = SCTP_DATAGRAM_MARKED;
3017 						if (tp1->rec.data.chunk_was_revoked) {
3018 							/* deflate the cwnd */
3019 							tp1->whoTo->cwnd -= tp1->book_size;
3020 							tp1->rec.data.chunk_was_revoked = 0;
3021 						}
3022 					}
3023 					break;
3024 				}	/* if (tp1->TSN_seq == j) */
3025 				if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
3026 				    MAX_TSN))
3027 					break;
3028 
3029 				tp1 = TAILQ_NEXT(tp1, sctp_next);
3030 			}	/* end while (tp1) */
3031 		}		/* end for (j = fragStart */
3032 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3033 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3034 		*offset += sizeof(block);
3035 		if (frag == NULL) {
3036 			break;
3037 		}
3038 	}
3039 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3040 		if (num_frs)
3041 			sctp_log_fr(*biggest_tsn_acked,
3042 			    *biggest_newly_acked_tsn,
3043 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3044 	}
3045 }
3046 
3047 static void
3048 sctp_check_for_revoked(struct sctp_tcb *stcb,
3049     struct sctp_association *asoc, uint32_t cumack,
3050     u_long biggest_tsn_acked)
3051 {
3052 	struct sctp_tmit_chunk *tp1;
3053 	int tot_revoked = 0;
3054 
3055 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3056 	while (tp1) {
3057 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3058 		    MAX_TSN)) {
3059 			/*
3060 			 * ok this guy is either ACK or MARKED. If it is
3061 			 * ACKED it has been previously acked but not this
3062 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3063 			 * again.
3064 			 */
3065 			if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3066 			    MAX_TSN))
3067 				break;
3068 
3069 
3070 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3071 				/* it has been revoked */
3072 				tp1->sent = SCTP_DATAGRAM_SENT;
3073 				tp1->rec.data.chunk_was_revoked = 1;
3074 				/*
3075 				 * We must add this stuff back in to assure
3076 				 * timers and such get started.
3077 				 */
3078 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3079 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3080 					    tp1->whoTo->flight_size,
3081 					    tp1->book_size,
3082 					    (uintptr_t) tp1->whoTo,
3083 					    tp1->rec.data.TSN_seq);
3084 				}
3085 				sctp_flight_size_increase(tp1);
3086 				sctp_total_flight_increase(stcb, tp1);
3087 				/*
3088 				 * We inflate the cwnd to compensate for our
3089 				 * artificial inflation of the flight_size.
3090 				 */
3091 				tp1->whoTo->cwnd += tp1->book_size;
3092 				tot_revoked++;
3093 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3094 					sctp_log_sack(asoc->last_acked_seq,
3095 					    cumack,
3096 					    tp1->rec.data.TSN_seq,
3097 					    0,
3098 					    0,
3099 					    SCTP_LOG_TSN_REVOKED);
3100 				}
3101 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3102 				/* it has been re-acked in this SACK */
3103 				tp1->sent = SCTP_DATAGRAM_ACKED;
3104 			}
3105 		}
3106 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3107 			break;
3108 		tp1 = TAILQ_NEXT(tp1, sctp_next);
3109 	}
3110 	if (tot_revoked > 0) {
3111 		/*
3112 		 * Setup the ecn nonce re-sync point. We do this since once
3113 		 * data is revoked we begin to retransmit things, which do
3114 		 * NOT have the ECN bits set. This means we are now out of
3115 		 * sync and must wait until we get back in sync with the
3116 		 * peer to check ECN bits.
3117 		 */
3118 		tp1 = TAILQ_FIRST(&asoc->send_queue);
3119 		if (tp1 == NULL) {
3120 			asoc->nonce_resync_tsn = asoc->sending_seq;
3121 		} else {
3122 			asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3123 		}
3124 		asoc->nonce_wait_for_ecne = 0;
3125 		asoc->nonce_sum_check = 0;
3126 	}
3127 }
3128 
3129 static void
3130 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3131     u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved)
3132 {
3133 	struct sctp_tmit_chunk *tp1;
3134 	int strike_flag = 0;
3135 	struct timeval now;
3136 	int tot_retrans = 0;
3137 	uint32_t sending_seq;
3138 	struct sctp_nets *net;
3139 	int num_dests_sacked = 0;
3140 
3141 	/*
3142 	 * select the sending_seq, this is either the next thing ready to be
3143 	 * sent but not transmitted, OR, the next seq we assign.
3144 	 */
3145 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3146 	if (tp1 == NULL) {
3147 		sending_seq = asoc->sending_seq;
3148 	} else {
3149 		sending_seq = tp1->rec.data.TSN_seq;
3150 	}
3151 
3152 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3153 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3154 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3155 			if (net->saw_newack)
3156 				num_dests_sacked++;
3157 		}
3158 	}
3159 	if (stcb->asoc.peer_supports_prsctp) {
3160 		(void)SCTP_GETTIME_TIMEVAL(&now);
3161 	}
3162 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3163 	while (tp1) {
3164 		strike_flag = 0;
3165 		if (tp1->no_fr_allowed) {
3166 			/* this one had a timeout or something */
3167 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3168 			continue;
3169 		}
3170 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3171 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3172 				sctp_log_fr(biggest_tsn_newly_acked,
3173 				    tp1->rec.data.TSN_seq,
3174 				    tp1->sent,
3175 				    SCTP_FR_LOG_CHECK_STRIKE);
3176 		}
3177 		if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3178 		    MAX_TSN) ||
3179 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3180 			/* done */
3181 			break;
3182 		}
3183 		if (stcb->asoc.peer_supports_prsctp) {
3184 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3185 				/* Is it expired? */
3186 				if (
3187 				    (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3188 				    ) {
3189 					/* Yes so drop it */
3190 					if (tp1->data != NULL) {
3191 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3192 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3193 						    &asoc->sent_queue, SCTP_SO_NOT_LOCKED);
3194 					}
3195 					tp1 = TAILQ_NEXT(tp1, sctp_next);
3196 					continue;
3197 				}
3198 			}
3199 			if ((PR_SCTP_RTX_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3200 				/* Has it been retransmitted tv_sec times? */
3201 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3202 					/* Yes, so drop it */
3203 					if (tp1->data != NULL) {
3204 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3205 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3206 						    &asoc->sent_queue, SCTP_SO_NOT_LOCKED);
3207 					}
3208 					tp1 = TAILQ_NEXT(tp1, sctp_next);
3209 					continue;
3210 				}
3211 			}
3212 		}
3213 		if (compare_with_wrap(tp1->rec.data.TSN_seq,
3214 		    asoc->this_sack_highest_gap, MAX_TSN)) {
3215 			/* we are beyond the tsn in the sack  */
3216 			break;
3217 		}
3218 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3219 			/* either a RESEND, ACKED, or MARKED */
3220 			/* skip */
3221 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3222 			continue;
3223 		}
3224 		/*
3225 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3226 		 */
3227 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3228 			/*
3229 			 * No new acks were receieved for data sent to this
3230 			 * dest. Therefore, according to the SFR algo for
3231 			 * CMT, no data sent to this dest can be marked for
3232 			 * FR using this SACK.
3233 			 */
3234 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3235 			continue;
3236 		} else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq,
3237 		    tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3238 			/*
3239 			 * CMT: New acks were receieved for data sent to
3240 			 * this dest. But no new acks were seen for data
3241 			 * sent after tp1. Therefore, according to the SFR
3242 			 * algo for CMT, tp1 cannot be marked for FR using
3243 			 * this SACK. This step covers part of the DAC algo
3244 			 * and the HTNA algo as well.
3245 			 */
3246 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3247 			continue;
3248 		}
3249 		/*
3250 		 * Here we check to see if we were have already done a FR
3251 		 * and if so we see if the biggest TSN we saw in the sack is
3252 		 * smaller than the recovery point. If so we don't strike
3253 		 * the tsn... otherwise we CAN strike the TSN.
3254 		 */
3255 		/*
3256 		 * @@@ JRI: Check for CMT if (accum_moved &&
3257 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3258 		 * 0)) {
3259 		 */
3260 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3261 			/*
3262 			 * Strike the TSN if in fast-recovery and cum-ack
3263 			 * moved.
3264 			 */
3265 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3266 				sctp_log_fr(biggest_tsn_newly_acked,
3267 				    tp1->rec.data.TSN_seq,
3268 				    tp1->sent,
3269 				    SCTP_FR_LOG_STRIKE_CHUNK);
3270 			}
3271 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3272 				tp1->sent++;
3273 			}
3274 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3275 				/*
3276 				 * CMT DAC algorithm: If SACK flag is set to
3277 				 * 0, then lowest_newack test will not pass
3278 				 * because it would have been set to the
3279 				 * cumack earlier. If not already to be
3280 				 * rtx'd, If not a mixed sack and if tp1 is
3281 				 * not between two sacked TSNs, then mark by
3282 				 * one more. NOTE that we are marking by one
3283 				 * additional time since the SACK DAC flag
3284 				 * indicates that two packets have been
3285 				 * received after this missing TSN.
3286 				 */
3287 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3288 				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3289 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3290 						sctp_log_fr(16 + num_dests_sacked,
3291 						    tp1->rec.data.TSN_seq,
3292 						    tp1->sent,
3293 						    SCTP_FR_LOG_STRIKE_CHUNK);
3294 					}
3295 					tp1->sent++;
3296 				}
3297 			}
3298 		} else if ((tp1->rec.data.doing_fast_retransmit) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
3299 			/*
3300 			 * For those that have done a FR we must take
3301 			 * special consideration if we strike. I.e the
3302 			 * biggest_newly_acked must be higher than the
3303 			 * sending_seq at the time we did the FR.
3304 			 */
3305 			if (
3306 #ifdef SCTP_FR_TO_ALTERNATE
3307 			/*
3308 			 * If FR's go to new networks, then we must only do
3309 			 * this for singly homed asoc's. However if the FR's
3310 			 * go to the same network (Armando's work) then its
3311 			 * ok to FR multiple times.
3312 			 */
3313 			    (asoc->numnets < 2)
3314 #else
3315 			    (1)
3316 #endif
3317 			    ) {
3318 
3319 				if ((compare_with_wrap(biggest_tsn_newly_acked,
3320 				    tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3321 				    (biggest_tsn_newly_acked ==
3322 				    tp1->rec.data.fast_retran_tsn)) {
3323 					/*
3324 					 * Strike the TSN, since this ack is
3325 					 * beyond where things were when we
3326 					 * did a FR.
3327 					 */
3328 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3329 						sctp_log_fr(biggest_tsn_newly_acked,
3330 						    tp1->rec.data.TSN_seq,
3331 						    tp1->sent,
3332 						    SCTP_FR_LOG_STRIKE_CHUNK);
3333 					}
3334 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3335 						tp1->sent++;
3336 					}
3337 					strike_flag = 1;
3338 					if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3339 						/*
3340 						 * CMT DAC algorithm: If
3341 						 * SACK flag is set to 0,
3342 						 * then lowest_newack test
3343 						 * will not pass because it
3344 						 * would have been set to
3345 						 * the cumack earlier. If
3346 						 * not already to be rtx'd,
3347 						 * If not a mixed sack and
3348 						 * if tp1 is not between two
3349 						 * sacked TSNs, then mark by
3350 						 * one more. NOTE that we
3351 						 * are marking by one
3352 						 * additional time since the
3353 						 * SACK DAC flag indicates
3354 						 * that two packets have
3355 						 * been received after this
3356 						 * missing TSN.
3357 						 */
3358 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3359 						    (num_dests_sacked == 1) &&
3360 						    compare_with_wrap(this_sack_lowest_newack,
3361 						    tp1->rec.data.TSN_seq, MAX_TSN)) {
3362 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3363 								sctp_log_fr(32 + num_dests_sacked,
3364 								    tp1->rec.data.TSN_seq,
3365 								    tp1->sent,
3366 								    SCTP_FR_LOG_STRIKE_CHUNK);
3367 							}
3368 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3369 								tp1->sent++;
3370 							}
3371 						}
3372 					}
3373 				}
3374 			}
3375 			/*
3376 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3377 			 * algo covers HTNA.
3378 			 */
3379 		} else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3380 		    biggest_tsn_newly_acked, MAX_TSN)) {
3381 			/*
3382 			 * We don't strike these: This is the  HTNA
3383 			 * algorithm i.e. we don't strike If our TSN is
3384 			 * larger than the Highest TSN Newly Acked.
3385 			 */
3386 			;
3387 		} else {
3388 			/* Strike the TSN */
3389 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3390 				sctp_log_fr(biggest_tsn_newly_acked,
3391 				    tp1->rec.data.TSN_seq,
3392 				    tp1->sent,
3393 				    SCTP_FR_LOG_STRIKE_CHUNK);
3394 			}
3395 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3396 				tp1->sent++;
3397 			}
3398 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3399 				/*
3400 				 * CMT DAC algorithm: If SACK flag is set to
3401 				 * 0, then lowest_newack test will not pass
3402 				 * because it would have been set to the
3403 				 * cumack earlier. If not already to be
3404 				 * rtx'd, If not a mixed sack and if tp1 is
3405 				 * not between two sacked TSNs, then mark by
3406 				 * one more. NOTE that we are marking by one
3407 				 * additional time since the SACK DAC flag
3408 				 * indicates that two packets have been
3409 				 * received after this missing TSN.
3410 				 */
3411 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3412 				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3413 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3414 						sctp_log_fr(48 + num_dests_sacked,
3415 						    tp1->rec.data.TSN_seq,
3416 						    tp1->sent,
3417 						    SCTP_FR_LOG_STRIKE_CHUNK);
3418 					}
3419 					tp1->sent++;
3420 				}
3421 			}
3422 		}
3423 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3424 			/* Increment the count to resend */
3425 			struct sctp_nets *alt;
3426 
3427 			/* printf("OK, we are now ready to FR this guy\n"); */
3428 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3429 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3430 				    0, SCTP_FR_MARKED);
3431 			}
3432 			if (strike_flag) {
3433 				/* This is a subsequent FR */
3434 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3435 			}
3436 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3437 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
3438 				/*
3439 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3440 				 * If CMT is being used, then pick dest with
3441 				 * largest ssthresh for any retransmission.
3442 				 */
3443 				tp1->no_fr_allowed = 1;
3444 				alt = tp1->whoTo;
3445 				/* sa_ignore NO_NULL_CHK */
3446 				if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
3447 					/*
3448 					 * JRS 5/18/07 - If CMT PF is on,
3449 					 * use the PF version of
3450 					 * find_alt_net()
3451 					 */
3452 					alt = sctp_find_alternate_net(stcb, alt, 2);
3453 				} else {
3454 					/*
3455 					 * JRS 5/18/07 - If only CMT is on,
3456 					 * use the CMT version of
3457 					 * find_alt_net()
3458 					 */
3459 					/* sa_ignore NO_NULL_CHK */
3460 					alt = sctp_find_alternate_net(stcb, alt, 1);
3461 				}
3462 				if (alt == NULL) {
3463 					alt = tp1->whoTo;
3464 				}
3465 				/*
3466 				 * CUCv2: If a different dest is picked for
3467 				 * the retransmission, then new
3468 				 * (rtx-)pseudo_cumack needs to be tracked
3469 				 * for orig dest. Let CUCv2 track new (rtx-)
3470 				 * pseudo-cumack always.
3471 				 */
3472 				if (tp1->whoTo) {
3473 					tp1->whoTo->find_pseudo_cumack = 1;
3474 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3475 				}
3476 			} else {/* CMT is OFF */
3477 
3478 #ifdef SCTP_FR_TO_ALTERNATE
3479 				/* Can we find an alternate? */
3480 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3481 #else
3482 				/*
3483 				 * default behavior is to NOT retransmit
3484 				 * FR's to an alternate. Armando Caro's
3485 				 * paper details why.
3486 				 */
3487 				alt = tp1->whoTo;
3488 #endif
3489 			}
3490 
3491 			tp1->rec.data.doing_fast_retransmit = 1;
3492 			tot_retrans++;
3493 			/* mark the sending seq for possible subsequent FR's */
3494 			/*
3495 			 * printf("Marking TSN for FR new value %x\n",
3496 			 * (uint32_t)tpi->rec.data.TSN_seq);
3497 			 */
3498 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3499 				/*
3500 				 * If the queue of send is empty then its
3501 				 * the next sequence number that will be
3502 				 * assigned so we subtract one from this to
3503 				 * get the one we last sent.
3504 				 */
3505 				tp1->rec.data.fast_retran_tsn = sending_seq;
3506 			} else {
3507 				/*
3508 				 * If there are chunks on the send queue
3509 				 * (unsent data that has made it from the
3510 				 * stream queues but not out the door, we
3511 				 * take the first one (which will have the
3512 				 * lowest TSN) and subtract one to get the
3513 				 * one we last sent.
3514 				 */
3515 				struct sctp_tmit_chunk *ttt;
3516 
3517 				ttt = TAILQ_FIRST(&asoc->send_queue);
3518 				tp1->rec.data.fast_retran_tsn =
3519 				    ttt->rec.data.TSN_seq;
3520 			}
3521 
3522 			if (tp1->do_rtt) {
3523 				/*
3524 				 * this guy had a RTO calculation pending on
3525 				 * it, cancel it
3526 				 */
3527 				tp1->do_rtt = 0;
3528 			}
3529 			/* fix counts and things */
3530 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3531 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3532 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3533 				    tp1->book_size,
3534 				    (uintptr_t) tp1->whoTo,
3535 				    tp1->rec.data.TSN_seq);
3536 			}
3537 			if (tp1->whoTo) {
3538 				tp1->whoTo->net_ack++;
3539 				sctp_flight_size_decrease(tp1);
3540 			}
3541 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3542 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3543 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3544 			}
3545 			/* add back to the rwnd */
3546 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3547 
3548 			/* remove from the total flight */
3549 			sctp_total_flight_decrease(stcb, tp1);
3550 			if (alt != tp1->whoTo) {
3551 				/* yes, there is an alternate. */
3552 				sctp_free_remote_addr(tp1->whoTo);
3553 				/* sa_ignore FREED_MEMORY */
3554 				tp1->whoTo = alt;
3555 				atomic_add_int(&alt->ref_count, 1);
3556 			}
3557 		}
3558 		tp1 = TAILQ_NEXT(tp1, sctp_next);
3559 	}			/* while (tp1) */
3560 
3561 	if (tot_retrans > 0) {
3562 		/*
3563 		 * Setup the ecn nonce re-sync point. We do this since once
3564 		 * we go to FR something we introduce a Karn's rule scenario
3565 		 * and won't know the totals for the ECN bits.
3566 		 */
3567 		asoc->nonce_resync_tsn = sending_seq;
3568 		asoc->nonce_wait_for_ecne = 0;
3569 		asoc->nonce_sum_check = 0;
3570 	}
3571 }
3572 
3573 struct sctp_tmit_chunk *
3574 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3575     struct sctp_association *asoc)
3576 {
3577 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3578 	struct timeval now;
3579 	int now_filled = 0;
3580 
3581 	if (asoc->peer_supports_prsctp == 0) {
3582 		return (NULL);
3583 	}
3584 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3585 	while (tp1) {
3586 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3587 		    tp1->sent != SCTP_DATAGRAM_RESEND) {
3588 			/* no chance to advance, out of here */
3589 			break;
3590 		}
3591 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3592 			/*
3593 			 * We can't fwd-tsn past any that are reliable aka
3594 			 * retransmitted until the asoc fails.
3595 			 */
3596 			break;
3597 		}
3598 		if (!now_filled) {
3599 			(void)SCTP_GETTIME_TIMEVAL(&now);
3600 			now_filled = 1;
3601 		}
3602 		tp2 = TAILQ_NEXT(tp1, sctp_next);
3603 		/*
3604 		 * now we got a chunk which is marked for another
3605 		 * retransmission to a PR-stream but has run out its chances
3606 		 * already maybe OR has been marked to skip now. Can we skip
3607 		 * it if its a resend?
3608 		 */
3609 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3610 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3611 			/*
3612 			 * Now is this one marked for resend and its time is
3613 			 * now up?
3614 			 */
3615 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3616 				/* Yes so drop it */
3617 				if (tp1->data) {
3618 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3619 					    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3620 					    &asoc->sent_queue, SCTP_SO_NOT_LOCKED);
3621 				}
3622 			} else {
3623 				/*
3624 				 * No, we are done when hit one for resend
3625 				 * whos time as not expired.
3626 				 */
3627 				break;
3628 			}
3629 		}
3630 		/*
3631 		 * Ok now if this chunk is marked to drop it we can clean up
3632 		 * the chunk, advance our peer ack point and we can check
3633 		 * the next chunk.
3634 		 */
3635 		if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3636 			/* advance PeerAckPoint goes forward */
3637 			asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3638 			a_adv = tp1;
3639 			/*
3640 			 * we don't want to de-queue it here. Just wait for
3641 			 * the next peer SACK to come with a new cumTSN and
3642 			 * then the chunk will be droped in the normal
3643 			 * fashion.
3644 			 */
3645 			if (tp1->data) {
3646 				sctp_free_bufspace(stcb, asoc, tp1, 1);
3647 				/*
3648 				 * Maybe there should be another
3649 				 * notification type
3650 				 */
3651 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3652 				    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3653 				    tp1, SCTP_SO_NOT_LOCKED);
3654 				sctp_m_freem(tp1->data);
3655 				tp1->data = NULL;
3656 				if (stcb->sctp_socket) {
3657 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3658 					struct socket *so;
3659 
3660 					so = SCTP_INP_SO(stcb->sctp_ep);
3661 					atomic_add_int(&stcb->asoc.refcnt, 1);
3662 					SCTP_TCB_UNLOCK(stcb);
3663 					SCTP_SOCKET_LOCK(so, 1);
3664 					SCTP_TCB_LOCK(stcb);
3665 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
3666 					if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3667 						/*
3668 						 * assoc was freed while we
3669 						 * were unlocked
3670 						 */
3671 						SCTP_SOCKET_UNLOCK(so, 1);
3672 						return (NULL);
3673 					}
3674 #endif
3675 					sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3676 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3677 					SCTP_SOCKET_UNLOCK(so, 1);
3678 #endif
3679 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3680 						sctp_wakeup_log(stcb, tp1->rec.data.TSN_seq, 1, SCTP_WAKESND_FROM_FWDTSN);
3681 					}
3682 				}
3683 			}
3684 		} else {
3685 			/*
3686 			 * If it is still in RESEND we can advance no
3687 			 * further
3688 			 */
3689 			break;
3690 		}
3691 		/*
3692 		 * If we hit here we just dumped tp1, move to next tsn on
3693 		 * sent queue.
3694 		 */
3695 		tp1 = tp2;
3696 	}
3697 	return (a_adv);
3698 }
3699 
3700 static void
3701 sctp_fs_audit(struct sctp_association *asoc)
3702 {
3703 	struct sctp_tmit_chunk *chk;
3704 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3705 
3706 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3707 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3708 			inflight++;
3709 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3710 			resend++;
3711 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3712 			inbetween++;
3713 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3714 			above++;
3715 		} else {
3716 			acked++;
3717 		}
3718 	}
3719 
3720 	if ((inflight > 0) || (inbetween > 0)) {
3721 #ifdef INVARIANTS
3722 		panic("Flight size-express incorrect? \n");
3723 #else
3724 		SCTP_PRINTF("Flight size-express incorrect inflight:%d inbetween:%d\n",
3725 		    inflight, inbetween);
3726 #endif
3727 	}
3728 }
3729 
3730 
3731 static void
3732 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3733     struct sctp_association *asoc,
3734     struct sctp_nets *net,
3735     struct sctp_tmit_chunk *tp1)
3736 {
3737 	struct sctp_tmit_chunk *chk;
3738 
3739 	/* First setup this one and get it moved back */
3740 	tp1->sent = SCTP_DATAGRAM_UNSENT;
3741 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3742 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3743 		    tp1->whoTo->flight_size,
3744 		    tp1->book_size,
3745 		    (uintptr_t) tp1->whoTo,
3746 		    tp1->rec.data.TSN_seq);
3747 	}
3748 	sctp_flight_size_decrease(tp1);
3749 	sctp_total_flight_decrease(stcb, tp1);
3750 	TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3751 	TAILQ_INSERT_HEAD(&asoc->send_queue, tp1, sctp_next);
3752 	asoc->sent_queue_cnt--;
3753 	asoc->send_queue_cnt++;
3754 	/*
3755 	 * Now all guys marked for RESEND on the sent_queue must be moved
3756 	 * back too.
3757 	 */
3758 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3759 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
3760 			/* Another chunk to move */
3761 			chk->sent = SCTP_DATAGRAM_UNSENT;
3762 			/* It should not be in flight */
3763 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3764 			TAILQ_INSERT_AFTER(&asoc->send_queue, tp1, chk, sctp_next);
3765 			asoc->sent_queue_cnt--;
3766 			asoc->send_queue_cnt++;
3767 			sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3768 		}
3769 	}
3770 }
3771 
3772 void
3773 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3774     uint32_t rwnd, int nonce_sum_flag, int *abort_now)
3775 {
3776 	struct sctp_nets *net;
3777 	struct sctp_association *asoc;
3778 	struct sctp_tmit_chunk *tp1, *tp2;
3779 	uint32_t old_rwnd;
3780 	int win_probe_recovery = 0;
3781 	int win_probe_recovered = 0;
3782 	int j, done_once = 0;
3783 
3784 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3785 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3786 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3787 	}
3788 	SCTP_TCB_LOCK_ASSERT(stcb);
3789 #ifdef SCTP_ASOCLOG_OF_TSNS
3790 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3791 	stcb->asoc.cumack_log_at++;
3792 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3793 		stcb->asoc.cumack_log_at = 0;
3794 	}
3795 #endif
3796 	asoc = &stcb->asoc;
3797 	old_rwnd = asoc->peers_rwnd;
3798 	if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
3799 		/* old ack */
3800 		return;
3801 	} else if (asoc->last_acked_seq == cumack) {
3802 		/* Window update sack */
3803 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3804 		    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3805 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3806 			/* SWS sender side engages */
3807 			asoc->peers_rwnd = 0;
3808 		}
3809 		if (asoc->peers_rwnd > old_rwnd) {
3810 			goto again;
3811 		}
3812 		return;
3813 	}
3814 	/* First setup for CC stuff */
3815 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3816 		net->prev_cwnd = net->cwnd;
3817 		net->net_ack = 0;
3818 		net->net_ack2 = 0;
3819 
3820 		/*
3821 		 * CMT: Reset CUC and Fast recovery algo variables before
3822 		 * SACK processing
3823 		 */
3824 		net->new_pseudo_cumack = 0;
3825 		net->will_exit_fast_recovery = 0;
3826 	}
3827 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3828 		uint32_t send_s;
3829 
3830 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3831 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3832 			    sctpchunk_listhead);
3833 			send_s = tp1->rec.data.TSN_seq + 1;
3834 		} else {
3835 			send_s = asoc->sending_seq;
3836 		}
3837 		if ((cumack == send_s) ||
3838 		    compare_with_wrap(cumack, send_s, MAX_TSN)) {
3839 #ifndef INVARIANTS
3840 			struct mbuf *oper;
3841 
3842 #endif
3843 #ifdef INVARIANTS
3844 			panic("Impossible sack 1");
3845 #else
3846 			*abort_now = 1;
3847 			/* XXX */
3848 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3849 			    0, M_DONTWAIT, 1, MT_DATA);
3850 			if (oper) {
3851 				struct sctp_paramhdr *ph;
3852 				uint32_t *ippp;
3853 
3854 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3855 				    sizeof(uint32_t);
3856 				ph = mtod(oper, struct sctp_paramhdr *);
3857 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3858 				ph->param_length = htons(SCTP_BUF_LEN(oper));
3859 				ippp = (uint32_t *) (ph + 1);
3860 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3861 			}
3862 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3863 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
3864 			return;
3865 #endif
3866 		}
3867 	}
3868 	asoc->this_sack_highest_gap = cumack;
3869 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3870 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3871 		    stcb->asoc.overall_error_count,
3872 		    0,
3873 		    SCTP_FROM_SCTP_INDATA,
3874 		    __LINE__);
3875 	}
3876 	stcb->asoc.overall_error_count = 0;
3877 	if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
3878 		/* process the new consecutive TSN first */
3879 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
3880 		while (tp1) {
3881 			tp2 = TAILQ_NEXT(tp1, sctp_next);
3882 			if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
3883 			    MAX_TSN) ||
3884 			    cumack == tp1->rec.data.TSN_seq) {
3885 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3886 					printf("Warning, an unsent is now acked?\n");
3887 				}
3888 				/*
3889 				 * ECN Nonce: Add the nonce to the sender's
3890 				 * nonce sum
3891 				 */
3892 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
3893 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3894 					/*
3895 					 * If it is less than ACKED, it is
3896 					 * now no-longer in flight. Higher
3897 					 * values may occur during marking
3898 					 */
3899 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3900 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3901 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3902 							    tp1->whoTo->flight_size,
3903 							    tp1->book_size,
3904 							    (uintptr_t) tp1->whoTo,
3905 							    tp1->rec.data.TSN_seq);
3906 						}
3907 						sctp_flight_size_decrease(tp1);
3908 						/* sa_ignore NO_NULL_CHK */
3909 						sctp_total_flight_decrease(stcb, tp1);
3910 					}
3911 					tp1->whoTo->net_ack += tp1->send_size;
3912 					if (tp1->snd_count < 2) {
3913 						/*
3914 						 * True non-retransmited
3915 						 * chunk
3916 						 */
3917 						tp1->whoTo->net_ack2 +=
3918 						    tp1->send_size;
3919 
3920 						/* update RTO too? */
3921 						if (tp1->do_rtt) {
3922 							tp1->whoTo->RTO =
3923 							/*
3924 							 * sa_ignore
3925 							 * NO_NULL_CHK
3926 							 */
3927 							    sctp_calculate_rto(stcb,
3928 							    asoc, tp1->whoTo,
3929 							    &tp1->sent_rcv_time,
3930 							    sctp_align_safe_nocopy);
3931 							tp1->do_rtt = 0;
3932 						}
3933 					}
3934 					/*
3935 					 * CMT: CUCv2 algorithm. From the
3936 					 * cumack'd TSNs, for each TSN being
3937 					 * acked for the first time, set the
3938 					 * following variables for the
3939 					 * corresp destination.
3940 					 * new_pseudo_cumack will trigger a
3941 					 * cwnd update.
3942 					 * find_(rtx_)pseudo_cumack will
3943 					 * trigger search for the next
3944 					 * expected (rtx-)pseudo-cumack.
3945 					 */
3946 					tp1->whoTo->new_pseudo_cumack = 1;
3947 					tp1->whoTo->find_pseudo_cumack = 1;
3948 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3949 
3950 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3951 						/* sa_ignore NO_NULL_CHK */
3952 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3953 					}
3954 				}
3955 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3956 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3957 				}
3958 				if (tp1->rec.data.chunk_was_revoked) {
3959 					/* deflate the cwnd */
3960 					tp1->whoTo->cwnd -= tp1->book_size;
3961 					tp1->rec.data.chunk_was_revoked = 0;
3962 				}
3963 				tp1->sent = SCTP_DATAGRAM_ACKED;
3964 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3965 				if (tp1->data) {
3966 					/* sa_ignore NO_NULL_CHK */
3967 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3968 					sctp_m_freem(tp1->data);
3969 				}
3970 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3971 					sctp_log_sack(asoc->last_acked_seq,
3972 					    cumack,
3973 					    tp1->rec.data.TSN_seq,
3974 					    0,
3975 					    0,
3976 					    SCTP_LOG_FREE_SENT);
3977 				}
3978 				tp1->data = NULL;
3979 				asoc->sent_queue_cnt--;
3980 				sctp_free_a_chunk(stcb, tp1);
3981 				tp1 = tp2;
3982 			} else {
3983 				break;
3984 			}
3985 		}
3986 
3987 	}
3988 	/* sa_ignore NO_NULL_CHK */
3989 	if (stcb->sctp_socket) {
3990 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3991 		struct socket *so;
3992 
3993 #endif
3994 
3995 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3996 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3997 			/* sa_ignore NO_NULL_CHK */
3998 			sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
3999 		}
4000 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4001 		so = SCTP_INP_SO(stcb->sctp_ep);
4002 		atomic_add_int(&stcb->asoc.refcnt, 1);
4003 		SCTP_TCB_UNLOCK(stcb);
4004 		SCTP_SOCKET_LOCK(so, 1);
4005 		SCTP_TCB_LOCK(stcb);
4006 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4007 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4008 			/* assoc was freed while we were unlocked */
4009 			SCTP_SOCKET_UNLOCK(so, 1);
4010 			return;
4011 		}
4012 #endif
4013 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4014 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4015 		SCTP_SOCKET_UNLOCK(so, 1);
4016 #endif
4017 	} else {
4018 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4019 			sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4020 		}
4021 	}
4022 
4023 	/* JRS - Use the congestion control given in the CC module */
4024 	if (asoc->last_acked_seq != cumack)
4025 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4026 
4027 	asoc->last_acked_seq = cumack;
4028 
4029 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4030 		/* nothing left in-flight */
4031 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4032 			net->flight_size = 0;
4033 			net->partial_bytes_acked = 0;
4034 		}
4035 		asoc->total_flight = 0;
4036 		asoc->total_flight_count = 0;
4037 	}
4038 	/* Fix up the a-p-a-p for future PR-SCTP sends */
4039 	if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4040 		asoc->advanced_peer_ack_point = cumack;
4041 	}
4042 	/* ECN Nonce updates */
4043 	if (asoc->ecn_nonce_allowed) {
4044 		if (asoc->nonce_sum_check) {
4045 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4046 				if (asoc->nonce_wait_for_ecne == 0) {
4047 					struct sctp_tmit_chunk *lchk;
4048 
4049 					lchk = TAILQ_FIRST(&asoc->send_queue);
4050 					asoc->nonce_wait_for_ecne = 1;
4051 					if (lchk) {
4052 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4053 					} else {
4054 						asoc->nonce_wait_tsn = asoc->sending_seq;
4055 					}
4056 				} else {
4057 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4058 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4059 						/*
4060 						 * Misbehaving peer. We need
4061 						 * to react to this guy
4062 						 */
4063 						asoc->ecn_allowed = 0;
4064 						asoc->ecn_nonce_allowed = 0;
4065 					}
4066 				}
4067 			}
4068 		} else {
4069 			/* See if Resynchronization Possible */
4070 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4071 				asoc->nonce_sum_check = 1;
4072 				/*
4073 				 * now we must calculate what the base is.
4074 				 * We do this based on two things, we know
4075 				 * the total's for all the segments
4076 				 * gap-acked in the SACK (none), We also
4077 				 * know the SACK's nonce sum, its in
4078 				 * nonce_sum_flag. So we can build a truth
4079 				 * table to back-calculate the new value of
4080 				 * asoc->nonce_sum_expect_base:
4081 				 *
4082 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
4083 				 * 1                    0 1 0 1 1 1
4084 				 * 1 0
4085 				 */
4086 				asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4087 			}
4088 		}
4089 	}
4090 	/* RWND update */
4091 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4092 	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4093 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4094 		/* SWS sender side engages */
4095 		asoc->peers_rwnd = 0;
4096 	}
4097 	if (asoc->peers_rwnd > old_rwnd) {
4098 		win_probe_recovery = 1;
4099 	}
4100 	/* Now assure a timer where data is queued at */
4101 again:
4102 	j = 0;
4103 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4104 		if (win_probe_recovery && (net->window_probe)) {
4105 			net->window_probe = 0;
4106 			win_probe_recovered = 1;
4107 			/*
4108 			 * Find first chunk that was used with window probe
4109 			 * and clear the sent
4110 			 */
4111 			/* sa_ignore FREED_MEMORY */
4112 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4113 				if (tp1->window_probe) {
4114 					/* move back to data send queue */
4115 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
4116 					break;
4117 				}
4118 			}
4119 		}
4120 		if (net->flight_size) {
4121 			int to_ticks;
4122 
4123 			if (net->RTO == 0) {
4124 				to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4125 			} else {
4126 				to_ticks = MSEC_TO_TICKS(net->RTO);
4127 			}
4128 			j++;
4129 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4130 			    sctp_timeout_handler, &net->rxt_timer);
4131 		} else {
4132 			if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4133 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4134 				    stcb, net,
4135 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4136 			}
4137 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4138 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4139 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4140 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4141 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4142 				}
4143 			}
4144 		}
4145 	}
4146 	if ((j == 0) &&
4147 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4148 	    (asoc->sent_queue_retran_cnt == 0) &&
4149 	    (win_probe_recovered == 0) &&
4150 	    (done_once == 0)) {
4151 		/* huh, this should not happen */
4152 		sctp_fs_audit(asoc);
4153 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4154 			net->flight_size = 0;
4155 		}
4156 		asoc->total_flight = 0;
4157 		asoc->total_flight_count = 0;
4158 		asoc->sent_queue_retran_cnt = 0;
4159 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4160 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4161 				sctp_flight_size_increase(tp1);
4162 				sctp_total_flight_increase(stcb, tp1);
4163 			} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4164 				asoc->sent_queue_retran_cnt++;
4165 			}
4166 		}
4167 		done_once = 1;
4168 		goto again;
4169 	}
4170 	/**********************************/
4171 	/* Now what about shutdown issues */
4172 	/**********************************/
4173 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4174 		/* nothing left on sendqueue.. consider done */
4175 		/* clean up */
4176 		if ((asoc->stream_queue_cnt == 1) &&
4177 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4178 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4179 		    (asoc->locked_on_sending)
4180 		    ) {
4181 			struct sctp_stream_queue_pending *sp;
4182 
4183 			/*
4184 			 * I may be in a state where we got all across.. but
4185 			 * cannot write more due to a shutdown... we abort
4186 			 * since the user did not indicate EOR in this case.
4187 			 * The sp will be cleaned during free of the asoc.
4188 			 */
4189 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4190 			    sctp_streamhead);
4191 			if ((sp) && (sp->length == 0)) {
4192 				/* Let cleanup code purge it */
4193 				if (sp->msg_is_complete) {
4194 					asoc->stream_queue_cnt--;
4195 				} else {
4196 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4197 					asoc->locked_on_sending = NULL;
4198 					asoc->stream_queue_cnt--;
4199 				}
4200 			}
4201 		}
4202 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4203 		    (asoc->stream_queue_cnt == 0)) {
4204 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4205 				/* Need to abort here */
4206 				struct mbuf *oper;
4207 
4208 		abort_out_now:
4209 				*abort_now = 1;
4210 				/* XXX */
4211 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4212 				    0, M_DONTWAIT, 1, MT_DATA);
4213 				if (oper) {
4214 					struct sctp_paramhdr *ph;
4215 					uint32_t *ippp;
4216 
4217 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4218 					    sizeof(uint32_t);
4219 					ph = mtod(oper, struct sctp_paramhdr *);
4220 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4221 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4222 					ippp = (uint32_t *) (ph + 1);
4223 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4224 				}
4225 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4226 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4227 			} else {
4228 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4229 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4230 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4231 				}
4232 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4233 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4234 				sctp_stop_timers_for_shutdown(stcb);
4235 				sctp_send_shutdown(stcb,
4236 				    stcb->asoc.primary_destination);
4237 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4238 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4239 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4240 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4241 			}
4242 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4243 		    (asoc->stream_queue_cnt == 0)) {
4244 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4245 				goto abort_out_now;
4246 			}
4247 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4248 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4249 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4250 			sctp_send_shutdown_ack(stcb,
4251 			    stcb->asoc.primary_destination);
4252 
4253 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4254 			    stcb->sctp_ep, stcb, asoc->primary_destination);
4255 		}
4256 	}
4257 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4258 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4259 		    rwnd,
4260 		    stcb->asoc.peers_rwnd,
4261 		    stcb->asoc.total_flight,
4262 		    stcb->asoc.total_output_queue_size);
4263 	}
4264 }
4265 
4266 void
4267 sctp_handle_sack(struct mbuf *m, int offset,
4268     struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
4269     struct sctp_nets *net_from, int *abort_now, int sack_len, uint32_t rwnd)
4270 {
4271 	struct sctp_association *asoc;
4272 	struct sctp_sack *sack;
4273 	struct sctp_tmit_chunk *tp1, *tp2;
4274 	uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
4275 	         this_sack_lowest_newack;
4276 	uint32_t sav_cum_ack;
4277 	uint16_t num_seg, num_dup;
4278 	uint16_t wake_him = 0;
4279 	unsigned int sack_length;
4280 	uint32_t send_s = 0;
4281 	long j;
4282 	int accum_moved = 0;
4283 	int will_exit_fast_recovery = 0;
4284 	uint32_t a_rwnd, old_rwnd;
4285 	int win_probe_recovery = 0;
4286 	int win_probe_recovered = 0;
4287 	struct sctp_nets *net = NULL;
4288 	int nonce_sum_flag, ecn_seg_sums = 0;
4289 	int done_once;
4290 	uint8_t reneged_all = 0;
4291 	uint8_t cmt_dac_flag;
4292 
4293 	/*
4294 	 * we take any chance we can to service our queues since we cannot
4295 	 * get awoken when the socket is read from :<
4296 	 */
4297 	/*
4298 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4299 	 * old sack, if so discard. 2) If there is nothing left in the send
4300 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4301 	 * too, update any rwnd change and verify no timers are running.
4302 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4303 	 * moved process these first and note that it moved. 4) Process any
4304 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4305 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4306 	 * sync up flightsizes and things, stop all timers and also check
4307 	 * for shutdown_pending state. If so then go ahead and send off the
4308 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4309 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4310 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4311 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4312 	 * if in shutdown_recv state.
4313 	 */
4314 	SCTP_TCB_LOCK_ASSERT(stcb);
4315 	sack = &ch->sack;
4316 	/* CMT DAC algo */
4317 	this_sack_lowest_newack = 0;
4318 	j = 0;
4319 	sack_length = (unsigned int)sack_len;
4320 	/* ECN Nonce */
4321 	SCTP_STAT_INCR(sctps_slowpath_sack);
4322 	nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
4323 	cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
4324 #ifdef SCTP_ASOCLOG_OF_TSNS
4325 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4326 	stcb->asoc.cumack_log_at++;
4327 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4328 		stcb->asoc.cumack_log_at = 0;
4329 	}
4330 #endif
4331 	num_seg = ntohs(sack->num_gap_ack_blks);
4332 	a_rwnd = rwnd;
4333 
4334 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4335 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4336 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4337 	}
4338 	/* CMT DAC algo */
4339 	cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
4340 	num_dup = ntohs(sack->num_dup_tsns);
4341 
4342 	old_rwnd = stcb->asoc.peers_rwnd;
4343 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4344 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4345 		    stcb->asoc.overall_error_count,
4346 		    0,
4347 		    SCTP_FROM_SCTP_INDATA,
4348 		    __LINE__);
4349 	}
4350 	stcb->asoc.overall_error_count = 0;
4351 	asoc = &stcb->asoc;
4352 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4353 		sctp_log_sack(asoc->last_acked_seq,
4354 		    cum_ack,
4355 		    0,
4356 		    num_seg,
4357 		    num_dup,
4358 		    SCTP_LOG_NEW_SACK);
4359 	}
4360 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4361 		int off_to_dup, iii;
4362 		uint32_t *dupdata, dblock;
4363 
4364 		off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk);
4365 		if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) {
4366 			dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
4367 			    sizeof(uint32_t), (uint8_t *) & dblock);
4368 			off_to_dup += sizeof(uint32_t);
4369 			if (dupdata) {
4370 				for (iii = 0; iii < num_dup; iii++) {
4371 					sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4372 					dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
4373 					    sizeof(uint32_t), (uint8_t *) & dblock);
4374 					if (dupdata == NULL)
4375 						break;
4376 					off_to_dup += sizeof(uint32_t);
4377 				}
4378 			}
4379 		} else {
4380 			SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n",
4381 			    off_to_dup, num_dup, sack_length, num_seg);
4382 		}
4383 	}
4384 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4385 		/* reality check */
4386 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4387 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4388 			    sctpchunk_listhead);
4389 			send_s = tp1->rec.data.TSN_seq + 1;
4390 		} else {
4391 			send_s = asoc->sending_seq;
4392 		}
4393 		if (cum_ack == send_s ||
4394 		    compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4395 #ifndef INVARIANTS
4396 			struct mbuf *oper;
4397 
4398 #endif
4399 #ifdef INVARIANTS
4400 	hopeless_peer:
4401 			panic("Impossible sack 1");
4402 #else
4403 
4404 
4405 			/*
4406 			 * no way, we have not even sent this TSN out yet.
4407 			 * Peer is hopelessly messed up with us.
4408 			 */
4409 	hopeless_peer:
4410 			*abort_now = 1;
4411 			/* XXX */
4412 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4413 			    0, M_DONTWAIT, 1, MT_DATA);
4414 			if (oper) {
4415 				struct sctp_paramhdr *ph;
4416 				uint32_t *ippp;
4417 
4418 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4419 				    sizeof(uint32_t);
4420 				ph = mtod(oper, struct sctp_paramhdr *);
4421 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4422 				ph->param_length = htons(SCTP_BUF_LEN(oper));
4423 				ippp = (uint32_t *) (ph + 1);
4424 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4425 			}
4426 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4427 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4428 			return;
4429 #endif
4430 		}
4431 	}
4432 	/**********************/
4433 	/* 1) check the range */
4434 	/**********************/
4435 	if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4436 		/* acking something behind */
4437 		return;
4438 	}
4439 	sav_cum_ack = asoc->last_acked_seq;
4440 
4441 	/* update the Rwnd of the peer */
4442 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4443 	    TAILQ_EMPTY(&asoc->send_queue) &&
4444 	    (asoc->stream_queue_cnt == 0)
4445 	    ) {
4446 		/* nothing left on send/sent and strmq */
4447 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4448 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4449 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4450 		}
4451 		asoc->peers_rwnd = a_rwnd;
4452 		if (asoc->sent_queue_retran_cnt) {
4453 			asoc->sent_queue_retran_cnt = 0;
4454 		}
4455 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4456 			/* SWS sender side engages */
4457 			asoc->peers_rwnd = 0;
4458 		}
4459 		/* stop any timers */
4460 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4461 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4462 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4463 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4464 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4465 					SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4466 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4467 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4468 				}
4469 			}
4470 			net->partial_bytes_acked = 0;
4471 			net->flight_size = 0;
4472 		}
4473 		asoc->total_flight = 0;
4474 		asoc->total_flight_count = 0;
4475 		return;
4476 	}
4477 	/*
4478 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4479 	 * things. The total byte count acked is tracked in netAckSz AND
4480 	 * netAck2 is used to track the total bytes acked that are un-
4481 	 * amibguious and were never retransmitted. We track these on a per
4482 	 * destination address basis.
4483 	 */
4484 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4485 		net->prev_cwnd = net->cwnd;
4486 		net->net_ack = 0;
4487 		net->net_ack2 = 0;
4488 
4489 		/*
4490 		 * CMT: Reset CUC and Fast recovery algo variables before
4491 		 * SACK processing
4492 		 */
4493 		net->new_pseudo_cumack = 0;
4494 		net->will_exit_fast_recovery = 0;
4495 	}
4496 	/* process the new consecutive TSN first */
4497 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4498 	while (tp1) {
4499 		if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
4500 		    MAX_TSN) ||
4501 		    last_tsn == tp1->rec.data.TSN_seq) {
4502 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4503 				/*
4504 				 * ECN Nonce: Add the nonce to the sender's
4505 				 * nonce sum
4506 				 */
4507 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4508 				accum_moved = 1;
4509 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4510 					/*
4511 					 * If it is less than ACKED, it is
4512 					 * now no-longer in flight. Higher
4513 					 * values may occur during marking
4514 					 */
4515 					if ((tp1->whoTo->dest_state &
4516 					    SCTP_ADDR_UNCONFIRMED) &&
4517 					    (tp1->snd_count < 2)) {
4518 						/*
4519 						 * If there was no retran
4520 						 * and the address is
4521 						 * un-confirmed and we sent
4522 						 * there and are now
4523 						 * sacked.. its confirmed,
4524 						 * mark it so.
4525 						 */
4526 						tp1->whoTo->dest_state &=
4527 						    ~SCTP_ADDR_UNCONFIRMED;
4528 					}
4529 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4530 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4531 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4532 							    tp1->whoTo->flight_size,
4533 							    tp1->book_size,
4534 							    (uintptr_t) tp1->whoTo,
4535 							    tp1->rec.data.TSN_seq);
4536 						}
4537 						sctp_flight_size_decrease(tp1);
4538 						sctp_total_flight_decrease(stcb, tp1);
4539 					}
4540 					tp1->whoTo->net_ack += tp1->send_size;
4541 
4542 					/* CMT SFR and DAC algos */
4543 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4544 					tp1->whoTo->saw_newack = 1;
4545 
4546 					if (tp1->snd_count < 2) {
4547 						/*
4548 						 * True non-retransmited
4549 						 * chunk
4550 						 */
4551 						tp1->whoTo->net_ack2 +=
4552 						    tp1->send_size;
4553 
4554 						/* update RTO too? */
4555 						if (tp1->do_rtt) {
4556 							tp1->whoTo->RTO =
4557 							    sctp_calculate_rto(stcb,
4558 							    asoc, tp1->whoTo,
4559 							    &tp1->sent_rcv_time,
4560 							    sctp_align_safe_nocopy);
4561 							tp1->do_rtt = 0;
4562 						}
4563 					}
4564 					/*
4565 					 * CMT: CUCv2 algorithm. From the
4566 					 * cumack'd TSNs, for each TSN being
4567 					 * acked for the first time, set the
4568 					 * following variables for the
4569 					 * corresp destination.
4570 					 * new_pseudo_cumack will trigger a
4571 					 * cwnd update.
4572 					 * find_(rtx_)pseudo_cumack will
4573 					 * trigger search for the next
4574 					 * expected (rtx-)pseudo-cumack.
4575 					 */
4576 					tp1->whoTo->new_pseudo_cumack = 1;
4577 					tp1->whoTo->find_pseudo_cumack = 1;
4578 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4579 
4580 
4581 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4582 						sctp_log_sack(asoc->last_acked_seq,
4583 						    cum_ack,
4584 						    tp1->rec.data.TSN_seq,
4585 						    0,
4586 						    0,
4587 						    SCTP_LOG_TSN_ACKED);
4588 					}
4589 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4590 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4591 					}
4592 				}
4593 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4594 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4595 #ifdef SCTP_AUDITING_ENABLED
4596 					sctp_audit_log(0xB3,
4597 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4598 #endif
4599 				}
4600 				if (tp1->rec.data.chunk_was_revoked) {
4601 					/* deflate the cwnd */
4602 					tp1->whoTo->cwnd -= tp1->book_size;
4603 					tp1->rec.data.chunk_was_revoked = 0;
4604 				}
4605 				tp1->sent = SCTP_DATAGRAM_ACKED;
4606 			}
4607 		} else {
4608 			break;
4609 		}
4610 		tp1 = TAILQ_NEXT(tp1, sctp_next);
4611 	}
4612 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4613 	/* always set this up to cum-ack */
4614 	asoc->this_sack_highest_gap = last_tsn;
4615 
4616 	/* Move offset up to point to gaps/dups */
4617 	offset += sizeof(struct sctp_sack_chunk);
4618 	if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) {
4619 
4620 		/* skip corrupt segments */
4621 		goto skip_segments;
4622 	}
4623 	if (num_seg > 0) {
4624 
4625 		/*
4626 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4627 		 * to be greater than the cumack. Also reset saw_newack to 0
4628 		 * for all dests.
4629 		 */
4630 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4631 			net->saw_newack = 0;
4632 			net->this_sack_highest_newack = last_tsn;
4633 		}
4634 
4635 		/*
4636 		 * thisSackHighestGap will increase while handling NEW
4637 		 * segments this_sack_highest_newack will increase while
4638 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4639 		 * used for CMT DAC algo. saw_newack will also change.
4640 		 */
4641 		sctp_handle_segments(m, &offset, stcb, asoc, ch, last_tsn,
4642 		    &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4643 		    num_seg, &ecn_seg_sums);
4644 
4645 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4646 			/*
4647 			 * validate the biggest_tsn_acked in the gap acks if
4648 			 * strict adherence is wanted.
4649 			 */
4650 			if ((biggest_tsn_acked == send_s) ||
4651 			    (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
4652 				/*
4653 				 * peer is either confused or we are under
4654 				 * attack. We must abort.
4655 				 */
4656 				goto hopeless_peer;
4657 			}
4658 		}
4659 	}
4660 skip_segments:
4661 	/*******************************************/
4662 	/* cancel ALL T3-send timer if accum moved */
4663 	/*******************************************/
4664 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
4665 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4666 			if (net->new_pseudo_cumack)
4667 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4668 				    stcb, net,
4669 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4670 
4671 		}
4672 	} else {
4673 		if (accum_moved) {
4674 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4675 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4676 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4677 			}
4678 		}
4679 	}
4680 	/********************************************/
4681 	/* drop the acked chunks from the sendqueue */
4682 	/********************************************/
4683 	asoc->last_acked_seq = cum_ack;
4684 
4685 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4686 	if (tp1 == NULL)
4687 		goto done_with_it;
4688 	do {
4689 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
4690 		    MAX_TSN)) {
4691 			break;
4692 		}
4693 		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4694 			/* no more sent on list */
4695 			printf("Warning, tp1->sent == %d and its now acked?\n",
4696 			    tp1->sent);
4697 		}
4698 		tp2 = TAILQ_NEXT(tp1, sctp_next);
4699 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4700 		if (tp1->pr_sctp_on) {
4701 			if (asoc->pr_sctp_cnt != 0)
4702 				asoc->pr_sctp_cnt--;
4703 		}
4704 		if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
4705 		    (asoc->total_flight > 0)) {
4706 #ifdef INVARIANTS
4707 			panic("Warning flight size is postive and should be 0");
4708 #else
4709 			SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4710 			    asoc->total_flight);
4711 #endif
4712 			asoc->total_flight = 0;
4713 		}
4714 		if (tp1->data) {
4715 			/* sa_ignore NO_NULL_CHK */
4716 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4717 			sctp_m_freem(tp1->data);
4718 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4719 				asoc->sent_queue_cnt_removeable--;
4720 			}
4721 		}
4722 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4723 			sctp_log_sack(asoc->last_acked_seq,
4724 			    cum_ack,
4725 			    tp1->rec.data.TSN_seq,
4726 			    0,
4727 			    0,
4728 			    SCTP_LOG_FREE_SENT);
4729 		}
4730 		tp1->data = NULL;
4731 		asoc->sent_queue_cnt--;
4732 		sctp_free_a_chunk(stcb, tp1);
4733 		wake_him++;
4734 		tp1 = tp2;
4735 	} while (tp1 != NULL);
4736 
4737 done_with_it:
4738 	/* sa_ignore NO_NULL_CHK */
4739 	if ((wake_him) && (stcb->sctp_socket)) {
4740 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4741 		struct socket *so;
4742 
4743 #endif
4744 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4745 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4746 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
4747 		}
4748 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4749 		so = SCTP_INP_SO(stcb->sctp_ep);
4750 		atomic_add_int(&stcb->asoc.refcnt, 1);
4751 		SCTP_TCB_UNLOCK(stcb);
4752 		SCTP_SOCKET_LOCK(so, 1);
4753 		SCTP_TCB_LOCK(stcb);
4754 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4755 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4756 			/* assoc was freed while we were unlocked */
4757 			SCTP_SOCKET_UNLOCK(so, 1);
4758 			return;
4759 		}
4760 #endif
4761 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4762 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4763 		SCTP_SOCKET_UNLOCK(so, 1);
4764 #endif
4765 	} else {
4766 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4767 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
4768 		}
4769 	}
4770 
4771 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4772 		if (compare_with_wrap(asoc->last_acked_seq,
4773 		    asoc->fast_recovery_tsn, MAX_TSN) ||
4774 		    asoc->last_acked_seq == asoc->fast_recovery_tsn) {
4775 			/* Setup so we will exit RFC2582 fast recovery */
4776 			will_exit_fast_recovery = 1;
4777 		}
4778 	}
4779 	/*
4780 	 * Check for revoked fragments:
4781 	 *
4782 	 * if Previous sack - Had no frags then we can't have any revoked if
4783 	 * Previous sack - Had frag's then - If we now have frags aka
4784 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4785 	 * some of them. else - The peer revoked all ACKED fragments, since
4786 	 * we had some before and now we have NONE.
4787 	 */
4788 
4789 	if (num_seg)
4790 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4791 	else if (asoc->saw_sack_with_frags) {
4792 		int cnt_revoked = 0;
4793 
4794 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
4795 		if (tp1 != NULL) {
4796 			/* Peer revoked all dg's marked or acked */
4797 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4798 				if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
4799 				    (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
4800 					tp1->sent = SCTP_DATAGRAM_SENT;
4801 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4802 						sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4803 						    tp1->whoTo->flight_size,
4804 						    tp1->book_size,
4805 						    (uintptr_t) tp1->whoTo,
4806 						    tp1->rec.data.TSN_seq);
4807 					}
4808 					sctp_flight_size_increase(tp1);
4809 					sctp_total_flight_increase(stcb, tp1);
4810 					tp1->rec.data.chunk_was_revoked = 1;
4811 					/*
4812 					 * To ensure that this increase in
4813 					 * flightsize, which is artificial,
4814 					 * does not throttle the sender, we
4815 					 * also increase the cwnd
4816 					 * artificially.
4817 					 */
4818 					tp1->whoTo->cwnd += tp1->book_size;
4819 					cnt_revoked++;
4820 				}
4821 			}
4822 			if (cnt_revoked) {
4823 				reneged_all = 1;
4824 			}
4825 		}
4826 		asoc->saw_sack_with_frags = 0;
4827 	}
4828 	if (num_seg)
4829 		asoc->saw_sack_with_frags = 1;
4830 	else
4831 		asoc->saw_sack_with_frags = 0;
4832 
4833 	/* JRS - Use the congestion control given in the CC module */
4834 	asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4835 
4836 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4837 		/* nothing left in-flight */
4838 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4839 			/* stop all timers */
4840 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4841 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4842 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4843 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4844 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4845 				}
4846 			}
4847 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4848 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4849 			net->flight_size = 0;
4850 			net->partial_bytes_acked = 0;
4851 		}
4852 		asoc->total_flight = 0;
4853 		asoc->total_flight_count = 0;
4854 	}
4855 	/**********************************/
4856 	/* Now what about shutdown issues */
4857 	/**********************************/
4858 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4859 		/* nothing left on sendqueue.. consider done */
4860 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4861 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4862 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4863 		}
4864 		asoc->peers_rwnd = a_rwnd;
4865 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4866 			/* SWS sender side engages */
4867 			asoc->peers_rwnd = 0;
4868 		}
4869 		/* clean up */
4870 		if ((asoc->stream_queue_cnt == 1) &&
4871 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4872 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4873 		    (asoc->locked_on_sending)
4874 		    ) {
4875 			struct sctp_stream_queue_pending *sp;
4876 
4877 			/*
4878 			 * I may be in a state where we got all across.. but
4879 			 * cannot write more due to a shutdown... we abort
4880 			 * since the user did not indicate EOR in this case.
4881 			 */
4882 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4883 			    sctp_streamhead);
4884 			if ((sp) && (sp->length == 0)) {
4885 				asoc->locked_on_sending = NULL;
4886 				if (sp->msg_is_complete) {
4887 					asoc->stream_queue_cnt--;
4888 				} else {
4889 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4890 					asoc->stream_queue_cnt--;
4891 				}
4892 			}
4893 		}
4894 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4895 		    (asoc->stream_queue_cnt == 0)) {
4896 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4897 				/* Need to abort here */
4898 				struct mbuf *oper;
4899 
4900 		abort_out_now:
4901 				*abort_now = 1;
4902 				/* XXX */
4903 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4904 				    0, M_DONTWAIT, 1, MT_DATA);
4905 				if (oper) {
4906 					struct sctp_paramhdr *ph;
4907 					uint32_t *ippp;
4908 
4909 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4910 					    sizeof(uint32_t);
4911 					ph = mtod(oper, struct sctp_paramhdr *);
4912 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4913 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4914 					ippp = (uint32_t *) (ph + 1);
4915 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4916 				}
4917 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4918 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4919 				return;
4920 			} else {
4921 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4922 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4923 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4924 				}
4925 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4926 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4927 				sctp_stop_timers_for_shutdown(stcb);
4928 				sctp_send_shutdown(stcb,
4929 				    stcb->asoc.primary_destination);
4930 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4931 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4932 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4933 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4934 			}
4935 			return;
4936 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4937 		    (asoc->stream_queue_cnt == 0)) {
4938 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4939 				goto abort_out_now;
4940 			}
4941 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4942 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4943 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4944 			sctp_send_shutdown_ack(stcb,
4945 			    stcb->asoc.primary_destination);
4946 
4947 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4948 			    stcb->sctp_ep, stcb, asoc->primary_destination);
4949 			return;
4950 		}
4951 	}
4952 	/*
4953 	 * Now here we are going to recycle net_ack for a different use...
4954 	 * HEADS UP.
4955 	 */
4956 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4957 		net->net_ack = 0;
4958 	}
4959 
4960 	/*
4961 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4962 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4963 	 * automatically ensure that.
4964 	 */
4965 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
4966 		this_sack_lowest_newack = cum_ack;
4967 	}
4968 	if (num_seg > 0) {
4969 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4970 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4971 	}
4972 	/*********************************************/
4973 	/* Here we perform PR-SCTP procedures        */
4974 	/* (section 4.2)                             */
4975 	/*********************************************/
4976 	/* C1. update advancedPeerAckPoint */
4977 	if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4978 		asoc->advanced_peer_ack_point = cum_ack;
4979 	}
4980 	/* C2. try to further move advancedPeerAckPoint ahead */
4981 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4982 		struct sctp_tmit_chunk *lchk;
4983 
4984 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4985 		/* C3. See if we need to send a Fwd-TSN */
4986 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
4987 		    MAX_TSN)) {
4988 			/*
4989 			 * ISSUE with ECN, see FWD-TSN processing for notes
4990 			 * on issues that will occur when the ECN NONCE
4991 			 * stuff is put into SCTP for cross checking.
4992 			 */
4993 			send_forward_tsn(stcb, asoc);
4994 
4995 			/*
4996 			 * ECN Nonce: Disable Nonce Sum check when FWD TSN
4997 			 * is sent and store resync tsn
4998 			 */
4999 			asoc->nonce_sum_check = 0;
5000 			asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5001 			if (lchk) {
5002 				/* Assure a timer is up */
5003 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5004 				    stcb->sctp_ep, stcb, lchk->whoTo);
5005 			}
5006 		}
5007 	}
5008 	/* JRS - Use the congestion control given in the CC module */
5009 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5010 
5011 	/******************************************************************
5012 	 *  Here we do the stuff with ECN Nonce checking.
5013 	 *  We basically check to see if the nonce sum flag was incorrect
5014 	 *  or if resynchronization needs to be done. Also if we catch a
5015 	 *  misbehaving receiver we give him the kick.
5016 	 ******************************************************************/
5017 
5018 	if (asoc->ecn_nonce_allowed) {
5019 		if (asoc->nonce_sum_check) {
5020 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5021 				if (asoc->nonce_wait_for_ecne == 0) {
5022 					struct sctp_tmit_chunk *lchk;
5023 
5024 					lchk = TAILQ_FIRST(&asoc->send_queue);
5025 					asoc->nonce_wait_for_ecne = 1;
5026 					if (lchk) {
5027 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5028 					} else {
5029 						asoc->nonce_wait_tsn = asoc->sending_seq;
5030 					}
5031 				} else {
5032 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5033 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5034 						/*
5035 						 * Misbehaving peer. We need
5036 						 * to react to this guy
5037 						 */
5038 						asoc->ecn_allowed = 0;
5039 						asoc->ecn_nonce_allowed = 0;
5040 					}
5041 				}
5042 			}
5043 		} else {
5044 			/* See if Resynchronization Possible */
5045 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5046 				asoc->nonce_sum_check = 1;
5047 				/*
5048 				 * now we must calculate what the base is.
5049 				 * We do this based on two things, we know
5050 				 * the total's for all the segments
5051 				 * gap-acked in the SACK, its stored in
5052 				 * ecn_seg_sums. We also know the SACK's
5053 				 * nonce sum, its in nonce_sum_flag. So we
5054 				 * can build a truth table to back-calculate
5055 				 * the new value of
5056 				 * asoc->nonce_sum_expect_base:
5057 				 *
5058 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
5059 				 * 1                    0 1 0 1 1 1
5060 				 * 1 0
5061 				 */
5062 				asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5063 			}
5064 		}
5065 	}
5066 	/* Now are we exiting loss recovery ? */
5067 	if (will_exit_fast_recovery) {
5068 		/* Ok, we must exit fast recovery */
5069 		asoc->fast_retran_loss_recovery = 0;
5070 	}
5071 	if ((asoc->sat_t3_loss_recovery) &&
5072 	    ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5073 	    MAX_TSN) ||
5074 	    (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5075 		/* end satellite t3 loss recovery */
5076 		asoc->sat_t3_loss_recovery = 0;
5077 	}
5078 	/*
5079 	 * CMT Fast recovery
5080 	 */
5081 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5082 		if (net->will_exit_fast_recovery) {
5083 			/* Ok, we must exit fast recovery */
5084 			net->fast_retran_loss_recovery = 0;
5085 		}
5086 	}
5087 
5088 	/* Adjust and set the new rwnd value */
5089 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5090 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5091 		    asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5092 	}
5093 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5094 	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5095 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5096 		/* SWS sender side engages */
5097 		asoc->peers_rwnd = 0;
5098 	}
5099 	if (asoc->peers_rwnd > old_rwnd) {
5100 		win_probe_recovery = 1;
5101 	}
5102 	/*
5103 	 * Now we must setup so we have a timer up for anyone with
5104 	 * outstanding data.
5105 	 */
5106 	done_once = 0;
5107 again:
5108 	j = 0;
5109 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5110 		if (win_probe_recovery && (net->window_probe)) {
5111 			net->window_probe = 0;
5112 			win_probe_recovered = 1;
5113 			/*-
5114 			 * Find first chunk that was used with
5115 			 * window probe and clear the event. Put
5116 			 * it back into the send queue as if has
5117 			 * not been sent.
5118 			 */
5119 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5120 				if (tp1->window_probe) {
5121 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
5122 					break;
5123 				}
5124 			}
5125 		}
5126 		if (net->flight_size) {
5127 			j++;
5128 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5129 			    stcb->sctp_ep, stcb, net);
5130 		} else {
5131 			if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5132 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5133 				    stcb, net,
5134 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5135 			}
5136 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5137 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5138 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5139 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5140 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5141 				}
5142 			}
5143 		}
5144 	}
5145 	if ((j == 0) &&
5146 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5147 	    (asoc->sent_queue_retran_cnt == 0) &&
5148 	    (win_probe_recovered == 0) &&
5149 	    (done_once == 0)) {
5150 		/* huh, this should not happen */
5151 		sctp_fs_audit(asoc);
5152 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5153 			net->flight_size = 0;
5154 		}
5155 		asoc->total_flight = 0;
5156 		asoc->total_flight_count = 0;
5157 		asoc->sent_queue_retran_cnt = 0;
5158 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5159 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5160 				sctp_flight_size_increase(tp1);
5161 				sctp_total_flight_increase(stcb, tp1);
5162 			} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5163 				asoc->sent_queue_retran_cnt++;
5164 			}
5165 		}
5166 		done_once = 1;
5167 		goto again;
5168 	}
5169 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5170 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5171 		    a_rwnd,
5172 		    stcb->asoc.peers_rwnd,
5173 		    stcb->asoc.total_flight,
5174 		    stcb->asoc.total_output_queue_size);
5175 	}
5176 }
5177 
5178 void
5179 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5180     struct sctp_nets *netp, int *abort_flag)
5181 {
5182 	/* Copy cum-ack */
5183 	uint32_t cum_ack, a_rwnd;
5184 
5185 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5186 	/* Arrange so a_rwnd does NOT change */
5187 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5188 
5189 	/* Now call the express sack handling */
5190 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5191 }
5192 
5193 static void
5194 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5195     struct sctp_stream_in *strmin)
5196 {
5197 	struct sctp_queued_to_read *ctl, *nctl;
5198 	struct sctp_association *asoc;
5199 	int tt;
5200 
5201 	asoc = &stcb->asoc;
5202 	tt = strmin->last_sequence_delivered;
5203 	/*
5204 	 * First deliver anything prior to and including the stream no that
5205 	 * came in
5206 	 */
5207 	ctl = TAILQ_FIRST(&strmin->inqueue);
5208 	while (ctl) {
5209 		nctl = TAILQ_NEXT(ctl, next);
5210 		if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5211 		    (tt == ctl->sinfo_ssn)) {
5212 			/* this is deliverable now */
5213 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5214 			/* subtract pending on streams */
5215 			asoc->size_on_all_streams -= ctl->length;
5216 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5217 			/* deliver it to at least the delivery-q */
5218 			if (stcb->sctp_socket) {
5219 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5220 				    ctl,
5221 				    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
5222 			}
5223 		} else {
5224 			/* no more delivery now. */
5225 			break;
5226 		}
5227 		ctl = nctl;
5228 	}
5229 	/*
5230 	 * now we must deliver things in queue the normal way  if any are
5231 	 * now ready.
5232 	 */
5233 	tt = strmin->last_sequence_delivered + 1;
5234 	ctl = TAILQ_FIRST(&strmin->inqueue);
5235 	while (ctl) {
5236 		nctl = TAILQ_NEXT(ctl, next);
5237 		if (tt == ctl->sinfo_ssn) {
5238 			/* this is deliverable now */
5239 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5240 			/* subtract pending on streams */
5241 			asoc->size_on_all_streams -= ctl->length;
5242 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5243 			/* deliver it to at least the delivery-q */
5244 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5245 			if (stcb->sctp_socket) {
5246 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5247 				    ctl,
5248 				    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
5249 			}
5250 			tt = strmin->last_sequence_delivered + 1;
5251 		} else {
5252 			break;
5253 		}
5254 		ctl = nctl;
5255 	}
5256 }
5257 
5258 void
5259 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5260     struct sctp_forward_tsn_chunk *fwd, int *abort_flag, struct mbuf *m, int offset)
5261 {
5262 	/*
5263 	 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5264 	 * forward TSN, when the SACK comes back that acknowledges the
5265 	 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5266 	 * get quite tricky since we may have sent more data interveneing
5267 	 * and must carefully account for what the SACK says on the nonce
5268 	 * and any gaps that are reported. This work will NOT be done here,
5269 	 * but I note it here since it is really related to PR-SCTP and
5270 	 * FWD-TSN's
5271 	 */
5272 
5273 	/* The pr-sctp fwd tsn */
5274 	/*
5275 	 * here we will perform all the data receiver side steps for
5276 	 * processing FwdTSN, as required in by pr-sctp draft:
5277 	 *
5278 	 * Assume we get FwdTSN(x):
5279 	 *
5280 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5281 	 * others we have 3) examine and update re-ordering queue on
5282 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5283 	 * report where we are.
5284 	 */
5285 	struct sctp_association *asoc;
5286 	uint32_t new_cum_tsn, gap;
5287 	unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size;
5288 	struct sctp_stream_in *strm;
5289 	struct sctp_tmit_chunk *chk, *at;
5290 
5291 	cumack_set_flag = 0;
5292 	asoc = &stcb->asoc;
5293 	cnt_gone = 0;
5294 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5295 		SCTPDBG(SCTP_DEBUG_INDATA1,
5296 		    "Bad size too small/big fwd-tsn\n");
5297 		return;
5298 	}
5299 	m_size = (stcb->asoc.mapping_array_size << 3);
5300 	/*************************************************************/
5301 	/* 1. Here we update local cumTSN and shift the bitmap array */
5302 	/*************************************************************/
5303 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5304 
5305 	if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
5306 	    asoc->cumulative_tsn == new_cum_tsn) {
5307 		/* Already got there ... */
5308 		return;
5309 	}
5310 	if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
5311 	    MAX_TSN)) {
5312 		asoc->highest_tsn_inside_map = new_cum_tsn;
5313 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5314 			sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5315 		}
5316 	}
5317 	/*
5318 	 * now we know the new TSN is more advanced, let's find the actual
5319 	 * gap
5320 	 */
5321 	if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn,
5322 	    MAX_TSN)) ||
5323 	    (new_cum_tsn == asoc->mapping_array_base_tsn)) {
5324 		gap = new_cum_tsn - asoc->mapping_array_base_tsn;
5325 	} else {
5326 		/* try to prevent underflow here */
5327 		gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
5328 	}
5329 
5330 	if (gap >= m_size) {
5331 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5332 			sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5333 		}
5334 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5335 			struct mbuf *oper;
5336 
5337 			/*
5338 			 * out of range (of single byte chunks in the rwnd I
5339 			 * give out). This must be an attacker.
5340 			 */
5341 			*abort_flag = 1;
5342 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5343 			    0, M_DONTWAIT, 1, MT_DATA);
5344 			if (oper) {
5345 				struct sctp_paramhdr *ph;
5346 				uint32_t *ippp;
5347 
5348 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5349 				    (sizeof(uint32_t) * 3);
5350 				ph = mtod(oper, struct sctp_paramhdr *);
5351 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5352 				ph->param_length = htons(SCTP_BUF_LEN(oper));
5353 				ippp = (uint32_t *) (ph + 1);
5354 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5355 				ippp++;
5356 				*ippp = asoc->highest_tsn_inside_map;
5357 				ippp++;
5358 				*ippp = new_cum_tsn;
5359 			}
5360 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5361 			sctp_abort_an_association(stcb->sctp_ep, stcb,
5362 			    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
5363 			return;
5364 		}
5365 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5366 slide_out:
5367 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5368 		cumack_set_flag = 1;
5369 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5370 		asoc->cumulative_tsn = asoc->highest_tsn_inside_map = new_cum_tsn;
5371 
5372 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5373 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5374 		}
5375 		asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
5376 	} else {
5377 		SCTP_TCB_LOCK_ASSERT(stcb);
5378 		if ((compare_with_wrap(((uint32_t) asoc->cumulative_tsn + gap), asoc->highest_tsn_inside_map, MAX_TSN)) ||
5379 		    (((uint32_t) asoc->cumulative_tsn + gap) == asoc->highest_tsn_inside_map)) {
5380 			goto slide_out;
5381 		} else {
5382 			for (i = 0; i <= gap; i++) {
5383 				SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
5384 			}
5385 		}
5386 		/*
5387 		 * Now after marking all, slide thing forward but no sack
5388 		 * please.
5389 		 */
5390 		sctp_sack_check(stcb, 0, 0, abort_flag);
5391 		if (*abort_flag)
5392 			return;
5393 	}
5394 
5395 	/*************************************************************/
5396 	/* 2. Clear up re-assembly queue                             */
5397 	/*************************************************************/
5398 	/*
5399 	 * First service it if pd-api is up, just in case we can progress it
5400 	 * forward
5401 	 */
5402 	if (asoc->fragmented_delivery_inprogress) {
5403 		sctp_service_reassembly(stcb, asoc);
5404 	}
5405 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5406 		/* For each one on here see if we need to toss it */
5407 		/*
5408 		 * For now large messages held on the reasmqueue that are
5409 		 * complete will be tossed too. We could in theory do more
5410 		 * work to spin through and stop after dumping one msg aka
5411 		 * seeing the start of a new msg at the head, and call the
5412 		 * delivery function... to see if it can be delivered... But
5413 		 * for now we just dump everything on the queue.
5414 		 */
5415 		chk = TAILQ_FIRST(&asoc->reasmqueue);
5416 		while (chk) {
5417 			at = TAILQ_NEXT(chk, sctp_next);
5418 			if (compare_with_wrap(asoc->cumulative_tsn,
5419 			    chk->rec.data.TSN_seq, MAX_TSN) ||
5420 			    asoc->cumulative_tsn == chk->rec.data.TSN_seq) {
5421 				/* It needs to be tossed */
5422 				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5423 				if (compare_with_wrap(chk->rec.data.TSN_seq,
5424 				    asoc->tsn_last_delivered, MAX_TSN)) {
5425 					asoc->tsn_last_delivered =
5426 					    chk->rec.data.TSN_seq;
5427 					asoc->str_of_pdapi =
5428 					    chk->rec.data.stream_number;
5429 					asoc->ssn_of_pdapi =
5430 					    chk->rec.data.stream_seq;
5431 					asoc->fragment_flags =
5432 					    chk->rec.data.rcv_flags;
5433 				}
5434 				asoc->size_on_reasm_queue -= chk->send_size;
5435 				sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5436 				cnt_gone++;
5437 
5438 				/* Clear up any stream problem */
5439 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
5440 				    SCTP_DATA_UNORDERED &&
5441 				    (compare_with_wrap(chk->rec.data.stream_seq,
5442 				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
5443 				    MAX_SEQ))) {
5444 					/*
5445 					 * We must dump forward this streams
5446 					 * sequence number if the chunk is
5447 					 * not unordered that is being
5448 					 * skipped. There is a chance that
5449 					 * if the peer does not include the
5450 					 * last fragment in its FWD-TSN we
5451 					 * WILL have a problem here since
5452 					 * you would have a partial chunk in
5453 					 * queue that may not be
5454 					 * deliverable. Also if a Partial
5455 					 * delivery API as started the user
5456 					 * may get a partial chunk. The next
5457 					 * read returning a new chunk...
5458 					 * really ugly but I see no way
5459 					 * around it! Maybe a notify??
5460 					 */
5461 					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
5462 					    chk->rec.data.stream_seq;
5463 				}
5464 				if (chk->data) {
5465 					sctp_m_freem(chk->data);
5466 					chk->data = NULL;
5467 				}
5468 				sctp_free_a_chunk(stcb, chk);
5469 			} else {
5470 				/*
5471 				 * Ok we have gone beyond the end of the
5472 				 * fwd-tsn's mark. Some checks...
5473 				 */
5474 				if ((asoc->fragmented_delivery_inprogress) &&
5475 				    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
5476 					uint32_t str_seq;
5477 
5478 					/*
5479 					 * Special case PD-API is up and
5480 					 * what we fwd-tsn' over includes
5481 					 * one that had the LAST_FRAG. We no
5482 					 * longer need to do the PD-API.
5483 					 */
5484 					asoc->fragmented_delivery_inprogress = 0;
5485 
5486 					str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi;
5487 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5488 					    stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq, SCTP_SO_NOT_LOCKED);
5489 
5490 				}
5491 				break;
5492 			}
5493 			chk = at;
5494 		}
5495 	}
5496 	if (asoc->fragmented_delivery_inprogress) {
5497 		/*
5498 		 * Ok we removed cnt_gone chunks in the PD-API queue that
5499 		 * were being delivered. So now we must turn off the flag.
5500 		 */
5501 		uint32_t str_seq;
5502 
5503 		str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi;
5504 		sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5505 		    stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq, SCTP_SO_NOT_LOCKED);
5506 		asoc->fragmented_delivery_inprogress = 0;
5507 	}
5508 	/*************************************************************/
5509 	/* 3. Update the PR-stream re-ordering queues                */
5510 	/*************************************************************/
5511 	fwd_sz -= sizeof(*fwd);
5512 	if (m && fwd_sz) {
5513 		/* New method. */
5514 		unsigned int num_str;
5515 		struct sctp_strseq *stseq, strseqbuf;
5516 
5517 		offset += sizeof(*fwd);
5518 
5519 		num_str = fwd_sz / sizeof(struct sctp_strseq);
5520 		for (i = 0; i < num_str; i++) {
5521 			uint16_t st;
5522 
5523 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5524 			    sizeof(struct sctp_strseq),
5525 			    (uint8_t *) & strseqbuf);
5526 			offset += sizeof(struct sctp_strseq);
5527 			if (stseq == NULL) {
5528 				break;
5529 			}
5530 			/* Convert */
5531 			st = ntohs(stseq->stream);
5532 			stseq->stream = st;
5533 			st = ntohs(stseq->sequence);
5534 			stseq->sequence = st;
5535 			/* now process */
5536 			if (stseq->stream >= asoc->streamincnt) {
5537 				/* screwed up streams, stop!  */
5538 				break;
5539 			}
5540 			strm = &asoc->strmin[stseq->stream];
5541 			if (compare_with_wrap(stseq->sequence,
5542 			    strm->last_sequence_delivered, MAX_SEQ)) {
5543 				/* Update the sequence number */
5544 				strm->last_sequence_delivered =
5545 				    stseq->sequence;
5546 			}
5547 			/* now kick the stream the new way */
5548 			/* sa_ignore NO_NULL_CHK */
5549 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5550 		}
5551 	}
5552 	if (TAILQ_FIRST(&asoc->reasmqueue)) {
5553 		/* now lets kick out and check for more fragmented delivery */
5554 		/* sa_ignore NO_NULL_CHK */
5555 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5556 	}
5557 }
5558