xref: /freebsd/sys/netinet/sctp_indata.c (revision 101a0f09e8baf8293e1eeb591de18caf15e49e00)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <netinet/sctp_os.h>
36 #include <sys/proc.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_header.h>
40 #include <netinet/sctp_pcb.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_uio.h>
44 #include <netinet/sctp_auth.h>
45 #include <netinet/sctp_timer.h>
46 #include <netinet/sctp_asconf.h>
47 #include <netinet/sctp_indata.h>
48 #include <netinet/sctp_bsd_addr.h>
49 #include <netinet/sctp_input.h>
50 #include <netinet/sctp_crc32.h>
51 #include <netinet/sctp_lock_bsd.h>
52 /*
53  * NOTES: On the outbound side of things I need to check the sack timer to
54  * see if I should generate a sack into the chunk queue (if I have data to
55  * send that is and will be sending it .. for bundling.
56  *
57  * The callback in sctp_usrreq.c will get called when the socket is read from.
58  * This will cause sctp_service_queues() to get called on the top entry in
59  * the list.
60  */
61 static uint32_t
62 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
63     struct sctp_stream_in *strm,
64     struct sctp_tcb *stcb,
65     struct sctp_association *asoc,
66     struct sctp_tmit_chunk *chk, int hold_rlock);
67 
68 void
sctp_set_rwnd(struct sctp_tcb * stcb,struct sctp_association * asoc)69 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
70 {
71 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
72 }
73 
74 /* Calculate what the rwnd would be */
75 uint32_t
sctp_calc_rwnd(struct sctp_tcb * stcb,struct sctp_association * asoc)76 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
77 {
78 	uint32_t calc = 0;
79 
80 	/*
81 	 * This is really set wrong with respect to a 1-2-m socket. Since
82 	 * the sb_cc is the count that everyone as put up. When we re-write
83 	 * sctp_soreceive then we will fix this so that ONLY this
84 	 * associations data is taken into account.
85 	 */
86 	if (stcb->sctp_socket == NULL) {
87 		return (calc);
88 	}
89 
90 	KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
91 	    ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
92 	KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
93 	    ("size_on_all_streams is %u", asoc->size_on_all_streams));
94 	if (stcb->asoc.sb_cc == 0 &&
95 	    asoc->cnt_on_reasm_queue == 0 &&
96 	    asoc->cnt_on_all_streams == 0) {
97 		/* Full rwnd granted */
98 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
99 		return (calc);
100 	}
101 	/* get actual space */
102 	calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
103 	/*
104 	 * take out what has NOT been put on socket queue and we yet hold
105 	 * for putting up.
106 	 */
107 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
108 	    asoc->cnt_on_reasm_queue * MSIZE));
109 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
110 	    asoc->cnt_on_all_streams * MSIZE));
111 	if (calc == 0) {
112 		/* out of space */
113 		return (calc);
114 	}
115 
116 	/* what is the overhead of all these rwnd's */
117 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
118 	/*
119 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
120 	 * even it is 0. SWS engaged
121 	 */
122 	if (calc < stcb->asoc.my_rwnd_control_len) {
123 		calc = 1;
124 	}
125 	return (calc);
126 }
127 
128 /*
129  * Build out our readq entry based on the incoming packet.
130  */
131 struct sctp_queued_to_read *
sctp_build_readq_entry(struct sctp_tcb * stcb,struct sctp_nets * net,uint32_t tsn,uint32_t ppid,uint32_t context,uint16_t sid,uint32_t mid,uint8_t flags,struct mbuf * dm)132 sctp_build_readq_entry(struct sctp_tcb *stcb,
133     struct sctp_nets *net,
134     uint32_t tsn, uint32_t ppid,
135     uint32_t context, uint16_t sid,
136     uint32_t mid, uint8_t flags,
137     struct mbuf *dm)
138 {
139 	struct sctp_queued_to_read *read_queue_e = NULL;
140 
141 	sctp_alloc_a_readq(stcb, read_queue_e);
142 	if (read_queue_e == NULL) {
143 		goto failed_build;
144 	}
145 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
146 	read_queue_e->sinfo_stream = sid;
147 	read_queue_e->sinfo_flags = (flags << 8);
148 	read_queue_e->sinfo_ppid = ppid;
149 	read_queue_e->sinfo_context = context;
150 	read_queue_e->sinfo_tsn = tsn;
151 	read_queue_e->sinfo_cumtsn = tsn;
152 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
153 	read_queue_e->mid = mid;
154 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
155 	TAILQ_INIT(&read_queue_e->reasm);
156 	read_queue_e->whoFrom = net;
157 	atomic_add_int(&net->ref_count, 1);
158 	read_queue_e->data = dm;
159 	read_queue_e->stcb = stcb;
160 	read_queue_e->port_from = stcb->rport;
161 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
162 		read_queue_e->do_not_ref_stcb = 1;
163 	}
164 failed_build:
165 	return (read_queue_e);
166 }
167 
168 struct mbuf *
sctp_build_ctl_nchunk(struct sctp_inpcb * inp,struct sctp_sndrcvinfo * sinfo)169 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
170 {
171 	struct sctp_extrcvinfo *seinfo;
172 	struct sctp_sndrcvinfo *outinfo;
173 	struct sctp_rcvinfo *rcvinfo;
174 	struct sctp_nxtinfo *nxtinfo;
175 	struct cmsghdr *cmh;
176 	struct mbuf *ret;
177 	int len;
178 	int use_extended;
179 	int provide_nxt;
180 
181 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
182 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
183 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
184 		/* user does not want any ancillary data */
185 		return (NULL);
186 	}
187 
188 	len = 0;
189 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
190 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
191 	}
192 	seinfo = (struct sctp_extrcvinfo *)sinfo;
193 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
194 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
195 		provide_nxt = 1;
196 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
197 	} else {
198 		provide_nxt = 0;
199 	}
200 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
201 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
202 			use_extended = 1;
203 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
204 		} else {
205 			use_extended = 0;
206 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
207 		}
208 	} else {
209 		use_extended = 0;
210 	}
211 
212 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
213 	if (ret == NULL) {
214 		/* No space */
215 		return (ret);
216 	}
217 	SCTP_BUF_LEN(ret) = 0;
218 
219 	/* We need a CMSG header followed by the struct */
220 	cmh = mtod(ret, struct cmsghdr *);
221 	/*
222 	 * Make sure that there is no un-initialized padding between the
223 	 * cmsg header and cmsg data and after the cmsg data.
224 	 */
225 	memset(cmh, 0, len);
226 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
227 		cmh->cmsg_level = IPPROTO_SCTP;
228 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
229 		cmh->cmsg_type = SCTP_RCVINFO;
230 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
231 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
232 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
233 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
234 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
235 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
236 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
237 		rcvinfo->rcv_context = sinfo->sinfo_context;
238 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
239 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
240 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
241 	}
242 	if (provide_nxt) {
243 		cmh->cmsg_level = IPPROTO_SCTP;
244 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
245 		cmh->cmsg_type = SCTP_NXTINFO;
246 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
247 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
248 		nxtinfo->nxt_flags = 0;
249 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
250 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
251 		}
252 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
253 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
254 		}
255 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
256 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
257 		}
258 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
259 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
260 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
261 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
262 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
263 	}
264 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
265 		cmh->cmsg_level = IPPROTO_SCTP;
266 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
267 		if (use_extended) {
268 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
269 			cmh->cmsg_type = SCTP_EXTRCV;
270 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
271 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
272 		} else {
273 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
274 			cmh->cmsg_type = SCTP_SNDRCV;
275 			*outinfo = *sinfo;
276 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
277 		}
278 	}
279 	return (ret);
280 }
281 
282 static void
sctp_mark_non_revokable(struct sctp_association * asoc,uint32_t tsn)283 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
284 {
285 	uint32_t gap, i;
286 	int in_r, in_nr;
287 
288 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
289 		return;
290 	}
291 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
292 		/*
293 		 * This tsn is behind the cum ack and thus we don't need to
294 		 * worry about it being moved from one to the other.
295 		 */
296 		return;
297 	}
298 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
299 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
300 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
301 	KASSERT(in_r || in_nr, ("%s: Things are really messed up now", __func__));
302 	if (!in_nr) {
303 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
304 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
305 			asoc->highest_tsn_inside_nr_map = tsn;
306 		}
307 	}
308 	if (in_r) {
309 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
310 		if (tsn == asoc->highest_tsn_inside_map) {
311 			/* We must back down to see what the new highest is. */
312 			for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
313 				SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
314 				if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
315 					asoc->highest_tsn_inside_map = i;
316 					break;
317 				}
318 			}
319 			if (!SCTP_TSN_GE(i, asoc->mapping_array_base_tsn)) {
320 				asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
321 			}
322 		}
323 	}
324 }
325 
326 static int
sctp_place_control_in_stream(struct sctp_stream_in * strm,struct sctp_association * asoc,struct sctp_queued_to_read * control)327 sctp_place_control_in_stream(struct sctp_stream_in *strm,
328     struct sctp_association *asoc,
329     struct sctp_queued_to_read *control)
330 {
331 	struct sctp_queued_to_read *at;
332 	struct sctp_readhead *q;
333 	uint8_t flags, unordered;
334 
335 	flags = (control->sinfo_flags >> 8);
336 	unordered = flags & SCTP_DATA_UNORDERED;
337 	if (unordered) {
338 		q = &strm->uno_inqueue;
339 		if (asoc->idata_supported == 0) {
340 			if (!TAILQ_EMPTY(q)) {
341 				/*
342 				 * Only one stream can be here in old style
343 				 * -- abort
344 				 */
345 				return (-1);
346 			}
347 			TAILQ_INSERT_TAIL(q, control, next_instrm);
348 			control->on_strm_q = SCTP_ON_UNORDERED;
349 			return (0);
350 		}
351 	} else {
352 		q = &strm->inqueue;
353 	}
354 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
355 		control->end_added = 1;
356 		control->first_frag_seen = 1;
357 		control->last_frag_seen = 1;
358 	}
359 	if (TAILQ_EMPTY(q)) {
360 		/* Empty queue */
361 		TAILQ_INSERT_HEAD(q, control, next_instrm);
362 		if (unordered) {
363 			control->on_strm_q = SCTP_ON_UNORDERED;
364 		} else {
365 			control->on_strm_q = SCTP_ON_ORDERED;
366 		}
367 		return (0);
368 	} else {
369 		TAILQ_FOREACH(at, q, next_instrm) {
370 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
371 				/*
372 				 * one in queue is bigger than the new one,
373 				 * insert before this one
374 				 */
375 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
376 				if (unordered) {
377 					control->on_strm_q = SCTP_ON_UNORDERED;
378 				} else {
379 					control->on_strm_q = SCTP_ON_ORDERED;
380 				}
381 				break;
382 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
383 				/*
384 				 * Gak, He sent me a duplicate msg id
385 				 * number?? return -1 to abort.
386 				 */
387 				return (-1);
388 			} else {
389 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
390 					/*
391 					 * We are at the end, insert it
392 					 * after this one
393 					 */
394 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
395 						sctp_log_strm_del(control, at,
396 						    SCTP_STR_LOG_FROM_INSERT_TL);
397 					}
398 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
399 					if (unordered) {
400 						control->on_strm_q = SCTP_ON_UNORDERED;
401 					} else {
402 						control->on_strm_q = SCTP_ON_ORDERED;
403 					}
404 					break;
405 				}
406 			}
407 		}
408 	}
409 	return (0);
410 }
411 
412 static void
sctp_abort_in_reasm(struct sctp_tcb * stcb,struct sctp_queued_to_read * control,struct sctp_tmit_chunk * chk,int * abort_flag,int opspot)413 sctp_abort_in_reasm(struct sctp_tcb *stcb,
414     struct sctp_queued_to_read *control,
415     struct sctp_tmit_chunk *chk,
416     int *abort_flag, int opspot)
417 {
418 	char msg[SCTP_DIAG_INFO_LEN];
419 	struct mbuf *oper;
420 
421 	if (stcb->asoc.idata_supported) {
422 		SCTP_SNPRINTF(msg, sizeof(msg),
423 		    "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
424 		    opspot,
425 		    control->fsn_included,
426 		    chk->rec.data.tsn,
427 		    chk->rec.data.sid,
428 		    chk->rec.data.fsn, chk->rec.data.mid);
429 	} else {
430 		SCTP_SNPRINTF(msg, sizeof(msg),
431 		    "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
432 		    opspot,
433 		    control->fsn_included,
434 		    chk->rec.data.tsn,
435 		    chk->rec.data.sid,
436 		    chk->rec.data.fsn,
437 		    (uint16_t)chk->rec.data.mid);
438 	}
439 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
440 	sctp_m_freem(chk->data);
441 	chk->data = NULL;
442 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
443 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
444 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, false, SCTP_SO_NOT_LOCKED);
445 	*abort_flag = 1;
446 }
447 
448 static void
sctp_clean_up_control(struct sctp_tcb * stcb,struct sctp_queued_to_read * control)449 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
450 {
451 	/*
452 	 * The control could not be placed and must be cleaned.
453 	 */
454 	struct sctp_tmit_chunk *chk, *nchk;
455 
456 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
457 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
458 		if (chk->data)
459 			sctp_m_freem(chk->data);
460 		chk->data = NULL;
461 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
462 	}
463 	sctp_free_remote_addr(control->whoFrom);
464 	if (control->data) {
465 		sctp_m_freem(control->data);
466 		control->data = NULL;
467 	}
468 	sctp_free_a_readq(stcb, control);
469 }
470 
471 /*
472  * Queue the chunk either right into the socket buffer if it is the next one
473  * to go OR put it in the correct place in the delivery queue.  If we do
474  * append to the so_buf, keep doing so until we are out of order as
475  * long as the control's entered are non-fragmented.
476  */
477 static void
sctp_queue_data_to_stream(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_queued_to_read * control,int * abort_flag,int * need_reasm)478 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
479     struct sctp_association *asoc,
480     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
481 {
482 	/*
483 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
484 	 * all the data in one stream this could happen quite rapidly. One
485 	 * could use the TSN to keep track of things, but this scheme breaks
486 	 * down in the other type of stream usage that could occur. Send a
487 	 * single msg to stream 0, send 4Billion messages to stream 1, now
488 	 * send a message to stream 0. You have a situation where the TSN
489 	 * has wrapped but not in the stream. Is this worth worrying about
490 	 * or should we just change our queue sort at the bottom to be by
491 	 * TSN.
492 	 *
493 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
494 	 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
495 	 * assignment this could happen... and I don't see how this would be
496 	 * a violation. So for now I am undecided an will leave the sort by
497 	 * SSN alone. Maybe a hybrid approach is the answer
498 	 *
499 	 */
500 	struct sctp_queued_to_read *at;
501 	int queue_needed;
502 	uint32_t nxt_todel;
503 	struct mbuf *op_err;
504 	struct sctp_stream_in *strm;
505 	char msg[SCTP_DIAG_INFO_LEN];
506 
507 	strm = &asoc->strmin[control->sinfo_stream];
508 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
509 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
510 	}
511 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
512 		/* The incoming sseq is behind where we last delivered? */
513 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
514 		    strm->last_mid_delivered, control->mid);
515 		/*
516 		 * throw it in the stream so it gets cleaned up in
517 		 * association destruction
518 		 */
519 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
520 		if (asoc->idata_supported) {
521 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
522 			    strm->last_mid_delivered, control->sinfo_tsn,
523 			    control->sinfo_stream, control->mid);
524 		} else {
525 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
526 			    (uint16_t)strm->last_mid_delivered,
527 			    control->sinfo_tsn,
528 			    control->sinfo_stream,
529 			    (uint16_t)control->mid);
530 		}
531 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
532 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
533 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
534 		*abort_flag = 1;
535 		return;
536 	}
537 	queue_needed = 1;
538 	asoc->size_on_all_streams += control->length;
539 	sctp_ucount_incr(asoc->cnt_on_all_streams);
540 	nxt_todel = strm->last_mid_delivered + 1;
541 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
542 		/* can be delivered right away? */
543 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
544 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
545 		}
546 		/* EY it wont be queued if it could be delivered directly */
547 		queue_needed = 0;
548 		if (asoc->size_on_all_streams >= control->length) {
549 			asoc->size_on_all_streams -= control->length;
550 		} else {
551 #ifdef INVARIANTS
552 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
553 #else
554 			asoc->size_on_all_streams = 0;
555 #endif
556 		}
557 		sctp_ucount_decr(asoc->cnt_on_all_streams);
558 		strm->last_mid_delivered++;
559 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
560 		sctp_add_to_readq(stcb->sctp_ep, stcb,
561 		    control,
562 		    &stcb->sctp_socket->so_rcv, 1,
563 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
564 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
565 			/* all delivered */
566 			nxt_todel = strm->last_mid_delivered + 1;
567 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
568 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
569 				if (control->on_strm_q == SCTP_ON_ORDERED) {
570 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
571 					if (asoc->size_on_all_streams >= control->length) {
572 						asoc->size_on_all_streams -= control->length;
573 					} else {
574 #ifdef INVARIANTS
575 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
576 #else
577 						asoc->size_on_all_streams = 0;
578 #endif
579 					}
580 					sctp_ucount_decr(asoc->cnt_on_all_streams);
581 #ifdef INVARIANTS
582 				} else {
583 					panic("Huh control: %p is on_strm_q: %d",
584 					    control, control->on_strm_q);
585 #endif
586 				}
587 				control->on_strm_q = 0;
588 				strm->last_mid_delivered++;
589 				/*
590 				 * We ignore the return of deliver_data here
591 				 * since we always can hold the chunk on the
592 				 * d-queue. And we have a finite number that
593 				 * can be delivered from the strq.
594 				 */
595 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
596 					sctp_log_strm_del(control, NULL,
597 					    SCTP_STR_LOG_FROM_IMMED_DEL);
598 				}
599 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
600 				sctp_add_to_readq(stcb->sctp_ep, stcb,
601 				    control,
602 				    &stcb->sctp_socket->so_rcv, 1,
603 				    SCTP_READ_LOCK_NOT_HELD,
604 				    SCTP_SO_LOCKED);
605 				continue;
606 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
607 				*need_reasm = 1;
608 			}
609 			break;
610 		}
611 	}
612 	if (queue_needed) {
613 		/*
614 		 * Ok, we did not deliver this guy, find the correct place
615 		 * to put it on the queue.
616 		 */
617 		if (sctp_place_control_in_stream(strm, asoc, control)) {
618 			SCTP_SNPRINTF(msg, sizeof(msg),
619 			    "Queue to str MID: %u duplicate", control->mid);
620 			sctp_clean_up_control(stcb, control);
621 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
622 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
623 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
624 			*abort_flag = 1;
625 		}
626 	}
627 }
628 
629 static void
sctp_setup_tail_pointer(struct sctp_queued_to_read * control)630 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
631 {
632 	struct mbuf *m, *prev = NULL;
633 	struct sctp_tcb *stcb;
634 
635 	stcb = control->stcb;
636 	control->held_length = 0;
637 	control->length = 0;
638 	m = control->data;
639 	while (m) {
640 		if (SCTP_BUF_LEN(m) == 0) {
641 			/* Skip mbufs with NO length */
642 			if (prev == NULL) {
643 				/* First one */
644 				control->data = sctp_m_free(m);
645 				m = control->data;
646 			} else {
647 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
648 				m = SCTP_BUF_NEXT(prev);
649 			}
650 			if (m == NULL) {
651 				control->tail_mbuf = prev;
652 			}
653 			continue;
654 		}
655 		prev = m;
656 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
657 		if (control->on_read_q) {
658 			/*
659 			 * On read queue so we must increment the SB stuff,
660 			 * we assume caller has done any locks of SB.
661 			 */
662 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
663 		}
664 		m = SCTP_BUF_NEXT(m);
665 	}
666 	if (prev) {
667 		control->tail_mbuf = prev;
668 	}
669 }
670 
671 static void
sctp_add_to_tail_pointer(struct sctp_queued_to_read * control,struct mbuf * m,uint32_t * added)672 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
673 {
674 	struct mbuf *prev = NULL;
675 	struct sctp_tcb *stcb;
676 
677 	stcb = control->stcb;
678 	if (stcb == NULL) {
679 #ifdef INVARIANTS
680 		panic("Control broken");
681 #else
682 		return;
683 #endif
684 	}
685 	if (control->tail_mbuf == NULL) {
686 		/* TSNH */
687 		sctp_m_freem(control->data);
688 		control->data = m;
689 		sctp_setup_tail_pointer(control);
690 		return;
691 	}
692 	control->tail_mbuf->m_next = m;
693 	while (m) {
694 		if (SCTP_BUF_LEN(m) == 0) {
695 			/* Skip mbufs with NO length */
696 			if (prev == NULL) {
697 				/* First one */
698 				control->tail_mbuf->m_next = sctp_m_free(m);
699 				m = control->tail_mbuf->m_next;
700 			} else {
701 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
702 				m = SCTP_BUF_NEXT(prev);
703 			}
704 			if (m == NULL) {
705 				control->tail_mbuf = prev;
706 			}
707 			continue;
708 		}
709 		prev = m;
710 		if (control->on_read_q) {
711 			/*
712 			 * On read queue so we must increment the SB stuff,
713 			 * we assume caller has done any locks of SB.
714 			 */
715 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
716 		}
717 		*added += SCTP_BUF_LEN(m);
718 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
719 		m = SCTP_BUF_NEXT(m);
720 	}
721 	if (prev) {
722 		control->tail_mbuf = prev;
723 	}
724 }
725 
726 static void
sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read * nc,struct sctp_queued_to_read * control)727 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
728 {
729 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
730 	nc->sinfo_stream = control->sinfo_stream;
731 	nc->mid = control->mid;
732 	TAILQ_INIT(&nc->reasm);
733 	nc->top_fsn = control->top_fsn;
734 	nc->mid = control->mid;
735 	nc->sinfo_flags = control->sinfo_flags;
736 	nc->sinfo_ppid = control->sinfo_ppid;
737 	nc->sinfo_context = control->sinfo_context;
738 	nc->fsn_included = 0xffffffff;
739 	nc->sinfo_tsn = control->sinfo_tsn;
740 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
741 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
742 	nc->whoFrom = control->whoFrom;
743 	atomic_add_int(&nc->whoFrom->ref_count, 1);
744 	nc->stcb = control->stcb;
745 	nc->port_from = control->port_from;
746 	nc->do_not_ref_stcb = control->do_not_ref_stcb;
747 }
748 
749 static int
sctp_handle_old_unordered_data(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_stream_in * strm,struct sctp_queued_to_read * control,uint32_t pd_point,int inp_read_lock_held)750 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
751     struct sctp_association *asoc,
752     struct sctp_stream_in *strm,
753     struct sctp_queued_to_read *control,
754     uint32_t pd_point,
755     int inp_read_lock_held)
756 {
757 	/*
758 	 * Special handling for the old un-ordered data chunk. All the
759 	 * chunks/TSN's go to mid 0. So we have to do the old style watching
760 	 * to see if we have it all. If you return one, no other control
761 	 * entries on the un-ordered queue will be looked at. In theory
762 	 * there should be no others entries in reality, unless the guy is
763 	 * sending both unordered NDATA and unordered DATA...
764 	 */
765 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
766 	uint32_t fsn;
767 	struct sctp_queued_to_read *nc;
768 	int cnt_added;
769 
770 	if (control->first_frag_seen == 0) {
771 		/* Nothing we can do, we have not seen the first piece yet */
772 		return (1);
773 	}
774 	/* Collapse any we can */
775 	cnt_added = 0;
776 restart:
777 	fsn = control->fsn_included + 1;
778 	/* Now what can we add? */
779 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
780 		if (chk->rec.data.fsn == fsn) {
781 			/* Ok lets add it */
782 			sctp_alloc_a_readq(stcb, nc);
783 			if (nc == NULL) {
784 				break;
785 			}
786 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
787 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
788 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, inp_read_lock_held);
789 			fsn++;
790 			cnt_added++;
791 			chk = NULL;
792 			if (control->end_added) {
793 				/* We are done */
794 				if (!TAILQ_EMPTY(&control->reasm)) {
795 					/*
796 					 * Ok we have to move anything left
797 					 * on the control queue to a new
798 					 * control.
799 					 */
800 					sctp_build_readq_entry_from_ctl(nc, control);
801 					tchk = TAILQ_FIRST(&control->reasm);
802 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
803 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
804 						if (asoc->size_on_reasm_queue >= tchk->send_size) {
805 							asoc->size_on_reasm_queue -= tchk->send_size;
806 						} else {
807 #ifdef INVARIANTS
808 							panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
809 #else
810 							asoc->size_on_reasm_queue = 0;
811 #endif
812 						}
813 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
814 						nc->first_frag_seen = 1;
815 						nc->fsn_included = tchk->rec.data.fsn;
816 						nc->data = tchk->data;
817 						nc->sinfo_ppid = tchk->rec.data.ppid;
818 						nc->sinfo_tsn = tchk->rec.data.tsn;
819 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
820 						tchk->data = NULL;
821 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
822 						sctp_setup_tail_pointer(nc);
823 						tchk = TAILQ_FIRST(&control->reasm);
824 					}
825 					/* Spin the rest onto the queue */
826 					while (tchk) {
827 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
828 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
829 						tchk = TAILQ_FIRST(&control->reasm);
830 					}
831 					/*
832 					 * Now lets add it to the queue
833 					 * after removing control
834 					 */
835 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
836 					nc->on_strm_q = SCTP_ON_UNORDERED;
837 					if (control->on_strm_q) {
838 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
839 						control->on_strm_q = 0;
840 					}
841 				}
842 				if (control->pdapi_started) {
843 					strm->pd_api_started = 0;
844 					control->pdapi_started = 0;
845 				}
846 				if (control->on_strm_q) {
847 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
848 					control->on_strm_q = 0;
849 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
850 				}
851 				if (control->on_read_q == 0) {
852 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
853 					    &stcb->sctp_socket->so_rcv, control->end_added,
854 					    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
855 				}
856 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
857 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
858 					/*
859 					 * Switch to the new guy and
860 					 * continue
861 					 */
862 					control = nc;
863 					goto restart;
864 				} else {
865 					if (nc->on_strm_q == 0) {
866 						sctp_free_a_readq(stcb, nc);
867 					}
868 				}
869 				return (1);
870 			} else {
871 				sctp_free_a_readq(stcb, nc);
872 			}
873 		} else {
874 			/* Can't add more */
875 			break;
876 		}
877 	}
878 	if (cnt_added && strm->pd_api_started) {
879 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
880 	}
881 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
882 		strm->pd_api_started = 1;
883 		control->pdapi_started = 1;
884 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
885 		    &stcb->sctp_socket->so_rcv, control->end_added,
886 		    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
887 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
888 		return (0);
889 	} else {
890 		return (1);
891 	}
892 }
893 
894 static void
sctp_inject_old_unordered_data(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_queued_to_read * control,struct sctp_tmit_chunk * chk,int * abort_flag)895 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
896     struct sctp_association *asoc,
897     struct sctp_queued_to_read *control,
898     struct sctp_tmit_chunk *chk,
899     int *abort_flag)
900 {
901 	struct sctp_tmit_chunk *at;
902 	int inserted;
903 
904 	/*
905 	 * Here we need to place the chunk into the control structure sorted
906 	 * in the correct order.
907 	 */
908 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
909 		/* Its the very first one. */
910 		SCTPDBG(SCTP_DEBUG_XXX,
911 		    "chunk is a first fsn: %u becomes fsn_included\n",
912 		    chk->rec.data.fsn);
913 		at = TAILQ_FIRST(&control->reasm);
914 		if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
915 			/*
916 			 * The first chunk in the reassembly is a smaller
917 			 * TSN than this one, even though this has a first,
918 			 * it must be from a subsequent msg.
919 			 */
920 			goto place_chunk;
921 		}
922 		if (control->first_frag_seen) {
923 			/*
924 			 * In old un-ordered we can reassembly on one
925 			 * control multiple messages. As long as the next
926 			 * FIRST is greater then the old first (TSN i.e. FSN
927 			 * wise)
928 			 */
929 			struct mbuf *tdata;
930 			uint32_t tmp;
931 
932 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
933 				/*
934 				 * Easy way the start of a new guy beyond
935 				 * the lowest
936 				 */
937 				goto place_chunk;
938 			}
939 			if ((chk->rec.data.fsn == control->fsn_included) ||
940 			    (control->pdapi_started)) {
941 				/*
942 				 * Ok this should not happen, if it does we
943 				 * started the pd-api on the higher TSN
944 				 * (since the equals part is a TSN failure
945 				 * it must be that).
946 				 *
947 				 * We are completely hosed in that case
948 				 * since I have no way to recover. This
949 				 * really will only happen if we can get
950 				 * more TSN's higher before the
951 				 * pd-api-point.
952 				 */
953 				sctp_abort_in_reasm(stcb, control, chk,
954 				    abort_flag,
955 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
956 
957 				return;
958 			}
959 			/*
960 			 * Ok we have two firsts and the one we just got is
961 			 * smaller than the one we previously placed.. yuck!
962 			 * We must swap them out.
963 			 */
964 			/* swap the mbufs */
965 			tdata = control->data;
966 			control->data = chk->data;
967 			chk->data = tdata;
968 			/* Save the lengths */
969 			chk->send_size = control->length;
970 			/* Recompute length of control and tail pointer */
971 			sctp_setup_tail_pointer(control);
972 			/* Fix the FSN included */
973 			tmp = control->fsn_included;
974 			control->fsn_included = chk->rec.data.fsn;
975 			chk->rec.data.fsn = tmp;
976 			/* Fix the TSN included */
977 			tmp = control->sinfo_tsn;
978 			control->sinfo_tsn = chk->rec.data.tsn;
979 			chk->rec.data.tsn = tmp;
980 			/* Fix the PPID included */
981 			tmp = control->sinfo_ppid;
982 			control->sinfo_ppid = chk->rec.data.ppid;
983 			chk->rec.data.ppid = tmp;
984 			/* Fix tail pointer */
985 			goto place_chunk;
986 		}
987 		control->first_frag_seen = 1;
988 		control->fsn_included = chk->rec.data.fsn;
989 		control->top_fsn = chk->rec.data.fsn;
990 		control->sinfo_tsn = chk->rec.data.tsn;
991 		control->sinfo_ppid = chk->rec.data.ppid;
992 		control->data = chk->data;
993 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
994 		chk->data = NULL;
995 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
996 		sctp_setup_tail_pointer(control);
997 		return;
998 	}
999 place_chunk:
1000 	inserted = 0;
1001 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1002 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1003 			/*
1004 			 * This one in queue is bigger than the new one,
1005 			 * insert the new one before at.
1006 			 */
1007 			asoc->size_on_reasm_queue += chk->send_size;
1008 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1009 			inserted = 1;
1010 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1011 			break;
1012 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1013 			/*
1014 			 * They sent a duplicate fsn number. This really
1015 			 * should not happen since the FSN is a TSN and it
1016 			 * should have been dropped earlier.
1017 			 */
1018 			sctp_abort_in_reasm(stcb, control, chk,
1019 			    abort_flag,
1020 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1021 			return;
1022 		}
1023 	}
1024 	if (inserted == 0) {
1025 		/* Its at the end */
1026 		asoc->size_on_reasm_queue += chk->send_size;
1027 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1028 		control->top_fsn = chk->rec.data.fsn;
1029 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1030 	}
1031 }
1032 
1033 static int
sctp_deliver_reasm_check(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_stream_in * strm,int inp_read_lock_held)1034 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1035     struct sctp_stream_in *strm, int inp_read_lock_held)
1036 {
1037 	/*
1038 	 * Given a stream, strm, see if any of the SSN's on it that are
1039 	 * fragmented are ready to deliver. If so go ahead and place them on
1040 	 * the read queue. In so placing if we have hit the end, then we
1041 	 * need to remove them from the stream's queue.
1042 	 */
1043 	struct sctp_queued_to_read *control, *nctl = NULL;
1044 	uint32_t next_to_del;
1045 	uint32_t pd_point;
1046 	int ret = 0;
1047 
1048 	if (stcb->sctp_socket) {
1049 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1050 		    stcb->sctp_ep->partial_delivery_point);
1051 	} else {
1052 		pd_point = stcb->sctp_ep->partial_delivery_point;
1053 	}
1054 	control = TAILQ_FIRST(&strm->uno_inqueue);
1055 
1056 	if ((control != NULL) &&
1057 	    (asoc->idata_supported == 0)) {
1058 		/* Special handling needed for "old" data format */
1059 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1060 			goto done_un;
1061 		}
1062 	}
1063 	if (strm->pd_api_started) {
1064 		/* Can't add more */
1065 		return (0);
1066 	}
1067 	while (control) {
1068 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1069 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1070 		nctl = TAILQ_NEXT(control, next_instrm);
1071 		if (control->end_added) {
1072 			/* We just put the last bit on */
1073 			if (control->on_strm_q) {
1074 #ifdef INVARIANTS
1075 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1076 					panic("Huh control: %p on_q: %d -- not unordered?",
1077 					    control, control->on_strm_q);
1078 				}
1079 #endif
1080 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1081 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1082 				if (asoc->size_on_all_streams >= control->length) {
1083 					asoc->size_on_all_streams -= control->length;
1084 				} else {
1085 #ifdef INVARIANTS
1086 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1087 #else
1088 					asoc->size_on_all_streams = 0;
1089 #endif
1090 				}
1091 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1092 				control->on_strm_q = 0;
1093 			}
1094 			if (control->on_read_q == 0) {
1095 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1096 				    control,
1097 				    &stcb->sctp_socket->so_rcv, control->end_added,
1098 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1099 			}
1100 		} else {
1101 			/* Can we do a PD-API for this un-ordered guy? */
1102 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1103 				strm->pd_api_started = 1;
1104 				control->pdapi_started = 1;
1105 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1106 				    control,
1107 				    &stcb->sctp_socket->so_rcv, control->end_added,
1108 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1109 
1110 				break;
1111 			}
1112 		}
1113 		control = nctl;
1114 	}
1115 done_un:
1116 	control = TAILQ_FIRST(&strm->inqueue);
1117 	if (strm->pd_api_started) {
1118 		/* Can't add more */
1119 		return (0);
1120 	}
1121 	if (control == NULL) {
1122 		return (ret);
1123 	}
1124 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1125 		/*
1126 		 * Ok the guy at the top was being partially delivered
1127 		 * completed, so we remove it. Note the pd_api flag was
1128 		 * taken off when the chunk was merged on in
1129 		 * sctp_queue_data_for_reasm below.
1130 		 */
1131 		nctl = TAILQ_NEXT(control, next_instrm);
1132 		SCTPDBG(SCTP_DEBUG_XXX,
1133 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1134 		    control, control->end_added, control->mid,
1135 		    control->top_fsn, control->fsn_included,
1136 		    strm->last_mid_delivered);
1137 		if (control->end_added) {
1138 			if (control->on_strm_q) {
1139 #ifdef INVARIANTS
1140 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1141 					panic("Huh control: %p on_q: %d -- not ordered?",
1142 					    control, control->on_strm_q);
1143 				}
1144 #endif
1145 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1146 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1147 				if (asoc->size_on_all_streams >= control->length) {
1148 					asoc->size_on_all_streams -= control->length;
1149 				} else {
1150 #ifdef INVARIANTS
1151 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1152 #else
1153 					asoc->size_on_all_streams = 0;
1154 #endif
1155 				}
1156 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1157 				control->on_strm_q = 0;
1158 			}
1159 			if (strm->pd_api_started && control->pdapi_started) {
1160 				control->pdapi_started = 0;
1161 				strm->pd_api_started = 0;
1162 			}
1163 			if (control->on_read_q == 0) {
1164 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1165 				    control,
1166 				    &stcb->sctp_socket->so_rcv, control->end_added,
1167 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1168 			}
1169 			control = nctl;
1170 		}
1171 	}
1172 	if (strm->pd_api_started) {
1173 		/*
1174 		 * Can't add more must have gotten an un-ordered above being
1175 		 * partially delivered.
1176 		 */
1177 		return (0);
1178 	}
1179 deliver_more:
1180 	next_to_del = strm->last_mid_delivered + 1;
1181 	if (control) {
1182 		SCTPDBG(SCTP_DEBUG_XXX,
1183 		    "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1184 		    control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1185 		    next_to_del);
1186 		nctl = TAILQ_NEXT(control, next_instrm);
1187 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1188 		    (control->first_frag_seen)) {
1189 			int done;
1190 
1191 			/* Ok we can deliver it onto the stream. */
1192 			if (control->end_added) {
1193 				/* We are done with it afterwards */
1194 				if (control->on_strm_q) {
1195 #ifdef INVARIANTS
1196 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1197 						panic("Huh control: %p on_q: %d -- not ordered?",
1198 						    control, control->on_strm_q);
1199 					}
1200 #endif
1201 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1202 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1203 					if (asoc->size_on_all_streams >= control->length) {
1204 						asoc->size_on_all_streams -= control->length;
1205 					} else {
1206 #ifdef INVARIANTS
1207 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1208 #else
1209 						asoc->size_on_all_streams = 0;
1210 #endif
1211 					}
1212 					sctp_ucount_decr(asoc->cnt_on_all_streams);
1213 					control->on_strm_q = 0;
1214 				}
1215 				ret++;
1216 			}
1217 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1218 				/*
1219 				 * A singleton now slipping through - mark
1220 				 * it non-revokable too
1221 				 */
1222 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1223 			} else if (control->end_added == 0) {
1224 				/*
1225 				 * Check if we can defer adding until its
1226 				 * all there
1227 				 */
1228 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1229 					/*
1230 					 * Don't need it or cannot add more
1231 					 * (one being delivered that way)
1232 					 */
1233 					goto out;
1234 				}
1235 			}
1236 			done = (control->end_added) && (control->last_frag_seen);
1237 			if (control->on_read_q == 0) {
1238 				if (!done) {
1239 					if (asoc->size_on_all_streams >= control->length) {
1240 						asoc->size_on_all_streams -= control->length;
1241 					} else {
1242 #ifdef INVARIANTS
1243 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1244 #else
1245 						asoc->size_on_all_streams = 0;
1246 #endif
1247 					}
1248 					strm->pd_api_started = 1;
1249 					control->pdapi_started = 1;
1250 				}
1251 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1252 				    control,
1253 				    &stcb->sctp_socket->so_rcv, control->end_added,
1254 				    inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1255 			}
1256 			strm->last_mid_delivered = next_to_del;
1257 			if (done) {
1258 				control = nctl;
1259 				goto deliver_more;
1260 			}
1261 		}
1262 	}
1263 out:
1264 	return (ret);
1265 }
1266 
1267 uint32_t
sctp_add_chk_to_control(struct sctp_queued_to_read * control,struct sctp_stream_in * strm,struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_tmit_chunk * chk,int hold_rlock)1268 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1269     struct sctp_stream_in *strm,
1270     struct sctp_tcb *stcb, struct sctp_association *asoc,
1271     struct sctp_tmit_chunk *chk, int hold_rlock)
1272 {
1273 	/*
1274 	 * Given a control and a chunk, merge the data from the chk onto the
1275 	 * control and free up the chunk resources.
1276 	 */
1277 	uint32_t added = 0;
1278 	bool i_locked = false;
1279 
1280 	if (control->on_read_q) {
1281 		if (hold_rlock == 0) {
1282 			/* Its being pd-api'd so we must do some locks. */
1283 			SCTP_INP_READ_LOCK(stcb->sctp_ep);
1284 			i_locked = true;
1285 		}
1286 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
1287 			goto out;
1288 		}
1289 	}
1290 	if (control->data == NULL) {
1291 		control->data = chk->data;
1292 		sctp_setup_tail_pointer(control);
1293 	} else {
1294 		sctp_add_to_tail_pointer(control, chk->data, &added);
1295 	}
1296 	control->fsn_included = chk->rec.data.fsn;
1297 	asoc->size_on_reasm_queue -= chk->send_size;
1298 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1299 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1300 	chk->data = NULL;
1301 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1302 		control->first_frag_seen = 1;
1303 		control->sinfo_tsn = chk->rec.data.tsn;
1304 		control->sinfo_ppid = chk->rec.data.ppid;
1305 	}
1306 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1307 		/* Its complete */
1308 		if ((control->on_strm_q) && (control->on_read_q)) {
1309 			if (control->pdapi_started) {
1310 				control->pdapi_started = 0;
1311 				strm->pd_api_started = 0;
1312 			}
1313 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1314 				/* Unordered */
1315 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1316 				control->on_strm_q = 0;
1317 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1318 				/* Ordered */
1319 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1320 				/*
1321 				 * Don't need to decrement
1322 				 * size_on_all_streams, since control is on
1323 				 * the read queue.
1324 				 */
1325 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1326 				control->on_strm_q = 0;
1327 #ifdef INVARIANTS
1328 			} else if (control->on_strm_q) {
1329 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1330 				    control->on_strm_q);
1331 #endif
1332 			}
1333 		}
1334 		control->end_added = 1;
1335 		control->last_frag_seen = 1;
1336 	}
1337 out:
1338 	if (i_locked) {
1339 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1340 	}
1341 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1342 	return (added);
1343 }
1344 
1345 /*
1346  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1347  * queue, see if anything can be delivered. If so pull it off (or as much as
1348  * we can. If we run out of space then we must dump what we can and set the
1349  * appropriate flag to say we queued what we could.
1350  */
1351 static void
sctp_queue_data_for_reasm(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_queued_to_read * control,struct sctp_tmit_chunk * chk,int created_control,int * abort_flag,uint32_t tsn)1352 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1353     struct sctp_queued_to_read *control,
1354     struct sctp_tmit_chunk *chk,
1355     int created_control,
1356     int *abort_flag, uint32_t tsn)
1357 {
1358 	uint32_t next_fsn;
1359 	struct sctp_tmit_chunk *at, *nat;
1360 	struct sctp_stream_in *strm;
1361 	int do_wakeup, unordered;
1362 	uint32_t lenadded;
1363 
1364 	strm = &asoc->strmin[control->sinfo_stream];
1365 	/*
1366 	 * For old un-ordered data chunks.
1367 	 */
1368 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1369 		unordered = 1;
1370 	} else {
1371 		unordered = 0;
1372 	}
1373 	/* Must be added to the stream-in queue */
1374 	if (created_control) {
1375 		if ((unordered == 0) || (asoc->idata_supported)) {
1376 			sctp_ucount_incr(asoc->cnt_on_all_streams);
1377 		}
1378 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1379 			/* Duplicate SSN? */
1380 			sctp_abort_in_reasm(stcb, control, chk,
1381 			    abort_flag,
1382 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1383 			sctp_clean_up_control(stcb, control);
1384 			return;
1385 		}
1386 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1387 			/*
1388 			 * Ok we created this control and now lets validate
1389 			 * that its legal i.e. there is a B bit set, if not
1390 			 * and we have up to the cum-ack then its invalid.
1391 			 */
1392 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1393 				sctp_abort_in_reasm(stcb, control, chk,
1394 				    abort_flag,
1395 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1396 				return;
1397 			}
1398 		}
1399 	}
1400 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1401 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1402 		return;
1403 	}
1404 	/*
1405 	 * Ok we must queue the chunk into the reasembly portion: o if its
1406 	 * the first it goes to the control mbuf. o if its not first but the
1407 	 * next in sequence it goes to the control, and each succeeding one
1408 	 * in order also goes. o if its not in order we place it on the list
1409 	 * in its place.
1410 	 */
1411 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1412 		/* Its the very first one. */
1413 		SCTPDBG(SCTP_DEBUG_XXX,
1414 		    "chunk is a first fsn: %u becomes fsn_included\n",
1415 		    chk->rec.data.fsn);
1416 		if (control->first_frag_seen) {
1417 			/*
1418 			 * Error on senders part, they either sent us two
1419 			 * data chunks with FIRST, or they sent two
1420 			 * un-ordered chunks that were fragmented at the
1421 			 * same time in the same stream.
1422 			 */
1423 			sctp_abort_in_reasm(stcb, control, chk,
1424 			    abort_flag,
1425 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1426 			return;
1427 		}
1428 		control->first_frag_seen = 1;
1429 		control->sinfo_ppid = chk->rec.data.ppid;
1430 		control->sinfo_tsn = chk->rec.data.tsn;
1431 		control->fsn_included = chk->rec.data.fsn;
1432 		control->data = chk->data;
1433 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1434 		chk->data = NULL;
1435 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1436 		sctp_setup_tail_pointer(control);
1437 		asoc->size_on_all_streams += control->length;
1438 	} else {
1439 		/* Place the chunk in our list */
1440 		int inserted = 0;
1441 
1442 		if (control->last_frag_seen == 0) {
1443 			/* Still willing to raise highest FSN seen */
1444 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1445 				SCTPDBG(SCTP_DEBUG_XXX,
1446 				    "We have a new top_fsn: %u\n",
1447 				    chk->rec.data.fsn);
1448 				control->top_fsn = chk->rec.data.fsn;
1449 			}
1450 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1451 				SCTPDBG(SCTP_DEBUG_XXX,
1452 				    "The last fsn is now in place fsn: %u\n",
1453 				    chk->rec.data.fsn);
1454 				control->last_frag_seen = 1;
1455 				if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1456 					SCTPDBG(SCTP_DEBUG_XXX,
1457 					    "New fsn: %u is not at top_fsn: %u -- abort\n",
1458 					    chk->rec.data.fsn,
1459 					    control->top_fsn);
1460 					sctp_abort_in_reasm(stcb, control, chk,
1461 					    abort_flag,
1462 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1463 					return;
1464 				}
1465 			}
1466 			if (asoc->idata_supported || control->first_frag_seen) {
1467 				/*
1468 				 * For IDATA we always check since we know
1469 				 * that the first fragment is 0. For old
1470 				 * DATA we have to receive the first before
1471 				 * we know the first FSN (which is the TSN).
1472 				 */
1473 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1474 					/*
1475 					 * We have already delivered up to
1476 					 * this so its a dup
1477 					 */
1478 					sctp_abort_in_reasm(stcb, control, chk,
1479 					    abort_flag,
1480 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1481 					return;
1482 				}
1483 			}
1484 		} else {
1485 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1486 				/* Second last? huh? */
1487 				SCTPDBG(SCTP_DEBUG_XXX,
1488 				    "Duplicate last fsn: %u (top: %u) -- abort\n",
1489 				    chk->rec.data.fsn, control->top_fsn);
1490 				sctp_abort_in_reasm(stcb, control,
1491 				    chk, abort_flag,
1492 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1493 				return;
1494 			}
1495 			if (asoc->idata_supported || control->first_frag_seen) {
1496 				/*
1497 				 * For IDATA we always check since we know
1498 				 * that the first fragment is 0. For old
1499 				 * DATA we have to receive the first before
1500 				 * we know the first FSN (which is the TSN).
1501 				 */
1502 
1503 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1504 					/*
1505 					 * We have already delivered up to
1506 					 * this so its a dup
1507 					 */
1508 					SCTPDBG(SCTP_DEBUG_XXX,
1509 					    "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1510 					    chk->rec.data.fsn, control->fsn_included);
1511 					sctp_abort_in_reasm(stcb, control, chk,
1512 					    abort_flag,
1513 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1514 					return;
1515 				}
1516 			}
1517 			/*
1518 			 * validate not beyond top FSN if we have seen last
1519 			 * one
1520 			 */
1521 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1522 				SCTPDBG(SCTP_DEBUG_XXX,
1523 				    "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1524 				    chk->rec.data.fsn,
1525 				    control->top_fsn);
1526 				sctp_abort_in_reasm(stcb, control, chk,
1527 				    abort_flag,
1528 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1529 				return;
1530 			}
1531 		}
1532 		/*
1533 		 * If we reach here, we need to place the new chunk in the
1534 		 * reassembly for this control.
1535 		 */
1536 		SCTPDBG(SCTP_DEBUG_XXX,
1537 		    "chunk is a not first fsn: %u needs to be inserted\n",
1538 		    chk->rec.data.fsn);
1539 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1540 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1541 				if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1542 					/* Last not at the end? huh? */
1543 					SCTPDBG(SCTP_DEBUG_XXX,
1544 					    "Last fragment not last in list: -- abort\n");
1545 					sctp_abort_in_reasm(stcb, control,
1546 					    chk, abort_flag,
1547 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1548 					return;
1549 				}
1550 				/*
1551 				 * This one in queue is bigger than the new
1552 				 * one, insert the new one before at.
1553 				 */
1554 				SCTPDBG(SCTP_DEBUG_XXX,
1555 				    "Insert it before fsn: %u\n",
1556 				    at->rec.data.fsn);
1557 				asoc->size_on_reasm_queue += chk->send_size;
1558 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1559 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1560 				inserted = 1;
1561 				break;
1562 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1563 				/*
1564 				 * Gak, He sent me a duplicate str seq
1565 				 * number
1566 				 */
1567 				/*
1568 				 * foo bar, I guess I will just free this
1569 				 * new guy, should we abort too? FIX ME
1570 				 * MAYBE? Or it COULD be that the SSN's have
1571 				 * wrapped. Maybe I should compare to TSN
1572 				 * somehow... sigh for now just blow away
1573 				 * the chunk!
1574 				 */
1575 				SCTPDBG(SCTP_DEBUG_XXX,
1576 				    "Duplicate to fsn: %u -- abort\n",
1577 				    at->rec.data.fsn);
1578 				sctp_abort_in_reasm(stcb, control,
1579 				    chk, abort_flag,
1580 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1581 				return;
1582 			}
1583 		}
1584 		if (inserted == 0) {
1585 			/* Goes on the end */
1586 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1587 			    chk->rec.data.fsn);
1588 			asoc->size_on_reasm_queue += chk->send_size;
1589 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1590 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1591 		}
1592 	}
1593 	/*
1594 	 * Ok lets see if we can suck any up into the control structure that
1595 	 * are in seq if it makes sense.
1596 	 */
1597 	do_wakeup = 0;
1598 	/*
1599 	 * If the first fragment has not been seen there is no sense in
1600 	 * looking.
1601 	 */
1602 	if (control->first_frag_seen) {
1603 		next_fsn = control->fsn_included + 1;
1604 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1605 			if (at->rec.data.fsn == next_fsn) {
1606 				/* We can add this one now to the control */
1607 				SCTPDBG(SCTP_DEBUG_XXX,
1608 				    "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1609 				    control, at,
1610 				    at->rec.data.fsn,
1611 				    next_fsn, control->fsn_included);
1612 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1613 				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1614 				if (control->on_read_q) {
1615 					do_wakeup = 1;
1616 				} else {
1617 					/*
1618 					 * We only add to the
1619 					 * size-on-all-streams if its not on
1620 					 * the read q. The read q flag will
1621 					 * cause a sballoc so its accounted
1622 					 * for there.
1623 					 */
1624 					asoc->size_on_all_streams += lenadded;
1625 				}
1626 				next_fsn++;
1627 				if (control->end_added && control->pdapi_started) {
1628 					if (strm->pd_api_started) {
1629 						strm->pd_api_started = 0;
1630 						control->pdapi_started = 0;
1631 					}
1632 					if (control->on_read_q == 0) {
1633 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1634 						    control,
1635 						    &stcb->sctp_socket->so_rcv, control->end_added,
1636 						    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1637 					}
1638 					break;
1639 				}
1640 			} else {
1641 				break;
1642 			}
1643 		}
1644 	}
1645 	if (do_wakeup) {
1646 		/* Need to wakeup the reader */
1647 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1648 	}
1649 }
1650 
1651 static struct sctp_queued_to_read *
sctp_find_reasm_entry(struct sctp_stream_in * strm,uint32_t mid,int ordered,int idata_supported)1652 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1653 {
1654 	struct sctp_queued_to_read *control;
1655 
1656 	if (ordered) {
1657 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1658 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1659 				break;
1660 			}
1661 		}
1662 	} else {
1663 		if (idata_supported) {
1664 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1665 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1666 					break;
1667 				}
1668 			}
1669 		} else {
1670 			control = TAILQ_FIRST(&strm->uno_inqueue);
1671 		}
1672 	}
1673 	return (control);
1674 }
1675 
1676 static int
sctp_process_a_data_chunk(struct sctp_tcb * stcb,struct sctp_association * asoc,struct mbuf ** m,int offset,int chk_length,struct sctp_nets * net,uint32_t * high_tsn,int * abort_flag,int * break_flag,int last_chunk,uint8_t chk_type)1677 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1678     struct mbuf **m, int offset, int chk_length,
1679     struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1680     int *break_flag, int last_chunk, uint8_t chk_type)
1681 {
1682 	struct sctp_tmit_chunk *chk = NULL;	/* make gcc happy */
1683 	struct sctp_stream_in *strm;
1684 	uint32_t tsn, fsn, gap, mid;
1685 	struct mbuf *dmbuf;
1686 	int the_len;
1687 	int need_reasm_check = 0;
1688 	uint16_t sid;
1689 	struct mbuf *op_err;
1690 	char msg[SCTP_DIAG_INFO_LEN];
1691 	struct sctp_queued_to_read *control, *ncontrol;
1692 	uint32_t ppid;
1693 	uint8_t chk_flags;
1694 	struct sctp_stream_reset_list *liste;
1695 	int ordered;
1696 	size_t clen;
1697 	int created_control = 0;
1698 
1699 	if (chk_type == SCTP_IDATA) {
1700 		struct sctp_idata_chunk *chunk, chunk_buf;
1701 
1702 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1703 		    sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1704 		chk_flags = chunk->ch.chunk_flags;
1705 		clen = sizeof(struct sctp_idata_chunk);
1706 		tsn = ntohl(chunk->dp.tsn);
1707 		sid = ntohs(chunk->dp.sid);
1708 		mid = ntohl(chunk->dp.mid);
1709 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1710 			fsn = 0;
1711 			ppid = chunk->dp.ppid_fsn.ppid;
1712 		} else {
1713 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1714 			ppid = 0xffffffff;	/* Use as an invalid value. */
1715 		}
1716 	} else {
1717 		struct sctp_data_chunk *chunk, chunk_buf;
1718 
1719 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1720 		    sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1721 		chk_flags = chunk->ch.chunk_flags;
1722 		clen = sizeof(struct sctp_data_chunk);
1723 		tsn = ntohl(chunk->dp.tsn);
1724 		sid = ntohs(chunk->dp.sid);
1725 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1726 		fsn = tsn;
1727 		ppid = chunk->dp.ppid;
1728 	}
1729 	if ((size_t)chk_length == clen) {
1730 		/*
1731 		 * Need to send an abort since we had a empty data chunk.
1732 		 */
1733 		op_err = sctp_generate_no_user_data_cause(tsn);
1734 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1735 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1736 		*abort_flag = 1;
1737 		return (0);
1738 	}
1739 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1740 		asoc->send_sack = 1;
1741 	}
1742 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1743 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1744 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1745 	}
1746 	if (stcb == NULL) {
1747 		return (0);
1748 	}
1749 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1750 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1751 		/* It is a duplicate */
1752 		SCTP_STAT_INCR(sctps_recvdupdata);
1753 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1754 			/* Record a dup for the next outbound sack */
1755 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1756 			asoc->numduptsns++;
1757 		}
1758 		asoc->send_sack = 1;
1759 		return (0);
1760 	}
1761 	/* Calculate the number of TSN's between the base and this TSN */
1762 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1763 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1764 		/* Can't hold the bit in the mapping at max array, toss it */
1765 		return (0);
1766 	}
1767 	if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1768 		SCTP_TCB_LOCK_ASSERT(stcb);
1769 		if (sctp_expand_mapping_array(asoc, gap)) {
1770 			/* Can't expand, drop it */
1771 			return (0);
1772 		}
1773 	}
1774 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1775 		*high_tsn = tsn;
1776 	}
1777 	/* See if we have received this one already */
1778 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1779 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1780 		SCTP_STAT_INCR(sctps_recvdupdata);
1781 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1782 			/* Record a dup for the next outbound sack */
1783 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1784 			asoc->numduptsns++;
1785 		}
1786 		asoc->send_sack = 1;
1787 		return (0);
1788 	}
1789 	/*
1790 	 * Check to see about the GONE flag, duplicates would cause a sack
1791 	 * to be sent up above
1792 	 */
1793 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1794 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1795 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1796 		/*
1797 		 * wait a minute, this guy is gone, there is no longer a
1798 		 * receiver. Send peer an ABORT!
1799 		 */
1800 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1801 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1802 		*abort_flag = 1;
1803 		return (0);
1804 	}
1805 	/*
1806 	 * Now before going further we see if there is room. If NOT then we
1807 	 * MAY let one through only IF this TSN is the one we are waiting
1808 	 * for on a partial delivery API.
1809 	 */
1810 
1811 	/* Is the stream valid? */
1812 	if (sid >= asoc->streamincnt) {
1813 		struct sctp_error_invalid_stream *cause;
1814 
1815 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1816 		    0, M_NOWAIT, 1, MT_DATA);
1817 		if (op_err != NULL) {
1818 			/* add some space up front so prepend will work well */
1819 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1820 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1821 			/*
1822 			 * Error causes are just param's and this one has
1823 			 * two back to back phdr, one with the error type
1824 			 * and size, the other with the streamid and a rsvd
1825 			 */
1826 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1827 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1828 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1829 			cause->stream_id = htons(sid);
1830 			cause->reserved = htons(0);
1831 			sctp_queue_op_err(stcb, op_err);
1832 		}
1833 		SCTP_STAT_INCR(sctps_badsid);
1834 		SCTP_TCB_LOCK_ASSERT(stcb);
1835 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1836 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1837 			asoc->highest_tsn_inside_nr_map = tsn;
1838 		}
1839 		if (tsn == (asoc->cumulative_tsn + 1)) {
1840 			/* Update cum-ack */
1841 			asoc->cumulative_tsn = tsn;
1842 		}
1843 		return (0);
1844 	}
1845 	/*
1846 	 * If its a fragmented message, lets see if we can find the control
1847 	 * on the reassembly queues.
1848 	 */
1849 	if ((chk_type == SCTP_IDATA) &&
1850 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1851 	    (fsn == 0)) {
1852 		/*
1853 		 * The first *must* be fsn 0, and other (middle/end) pieces
1854 		 * can *not* be fsn 0. XXX: This can happen in case of a
1855 		 * wrap around. Ignore is for now.
1856 		 */
1857 		SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
1858 		goto err_out;
1859 	}
1860 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1861 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1862 	    chk_flags, control);
1863 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1864 		/* See if we can find the re-assembly entity */
1865 		if (control != NULL) {
1866 			/* We found something, does it belong? */
1867 			if (ordered && (mid != control->mid)) {
1868 				SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1869 		err_out:
1870 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1871 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1872 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1873 				*abort_flag = 1;
1874 				return (0);
1875 			}
1876 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1877 				/*
1878 				 * We can't have a switched order with an
1879 				 * unordered chunk
1880 				 */
1881 				SCTP_SNPRINTF(msg, sizeof(msg),
1882 				    "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1883 				    tsn);
1884 				goto err_out;
1885 			}
1886 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1887 				/*
1888 				 * We can't have a switched unordered with a
1889 				 * ordered chunk
1890 				 */
1891 				SCTP_SNPRINTF(msg, sizeof(msg),
1892 				    "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1893 				    tsn);
1894 				goto err_out;
1895 			}
1896 		}
1897 	} else {
1898 		/*
1899 		 * Its a complete segment. Lets validate we don't have a
1900 		 * re-assembly going on with the same Stream/Seq (for
1901 		 * ordered) or in the same Stream for unordered.
1902 		 */
1903 		if (control != NULL) {
1904 			if (ordered || asoc->idata_supported) {
1905 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1906 				    chk_flags, mid);
1907 				SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1908 				goto err_out;
1909 			} else {
1910 				if ((control->first_frag_seen) &&
1911 				    (tsn == control->fsn_included + 1) &&
1912 				    (control->end_added == 0)) {
1913 					SCTP_SNPRINTF(msg, sizeof(msg),
1914 					    "Illegal message sequence, missing end for MID: %8.8x",
1915 					    control->fsn_included);
1916 					goto err_out;
1917 				} else {
1918 					control = NULL;
1919 				}
1920 			}
1921 		}
1922 	}
1923 	/* now do the tests */
1924 	if (((asoc->cnt_on_all_streams +
1925 	    asoc->cnt_on_reasm_queue +
1926 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1927 	    (((int)asoc->my_rwnd) <= 0)) {
1928 		/*
1929 		 * When we have NO room in the rwnd we check to make sure
1930 		 * the reader is doing its job...
1931 		 */
1932 		if (SCTP_SBAVAIL(&stcb->sctp_socket->so_rcv) > 0) {
1933 			/* some to read, wake-up */
1934 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1935 		}
1936 		/* now is it in the mapping array of what we have accepted? */
1937 		if (chk_type == SCTP_DATA) {
1938 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1939 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1940 				/* Nope not in the valid range dump it */
1941 		dump_packet:
1942 				sctp_set_rwnd(stcb, asoc);
1943 				if ((asoc->cnt_on_all_streams +
1944 				    asoc->cnt_on_reasm_queue +
1945 				    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1946 					SCTP_STAT_INCR(sctps_datadropchklmt);
1947 				} else {
1948 					SCTP_STAT_INCR(sctps_datadroprwnd);
1949 				}
1950 				*break_flag = 1;
1951 				return (0);
1952 			}
1953 		} else {
1954 			if (control == NULL) {
1955 				goto dump_packet;
1956 			}
1957 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1958 				goto dump_packet;
1959 			}
1960 		}
1961 	}
1962 #ifdef SCTP_ASOCLOG_OF_TSNS
1963 	SCTP_TCB_LOCK_ASSERT(stcb);
1964 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1965 		asoc->tsn_in_at = 0;
1966 		asoc->tsn_in_wrapped = 1;
1967 	}
1968 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1969 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1970 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1971 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1972 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1973 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1974 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1975 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1976 	asoc->tsn_in_at++;
1977 #endif
1978 	/*
1979 	 * Before we continue lets validate that we are not being fooled by
1980 	 * an evil attacker. We can only have Nk chunks based on our TSN
1981 	 * spread allowed by the mapping array N * 8 bits, so there is no
1982 	 * way our stream sequence numbers could have wrapped. We of course
1983 	 * only validate the FIRST fragment so the bit must be set.
1984 	 */
1985 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
1986 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1987 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
1988 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
1989 		/* The incoming sseq is behind where we last delivered? */
1990 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1991 		    mid, asoc->strmin[sid].last_mid_delivered);
1992 
1993 		if (asoc->idata_supported) {
1994 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
1995 			    asoc->strmin[sid].last_mid_delivered,
1996 			    tsn,
1997 			    sid,
1998 			    mid);
1999 		} else {
2000 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2001 			    (uint16_t)asoc->strmin[sid].last_mid_delivered,
2002 			    tsn,
2003 			    sid,
2004 			    (uint16_t)mid);
2005 		}
2006 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2007 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2008 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2009 		*abort_flag = 1;
2010 		return (0);
2011 	}
2012 	if (chk_type == SCTP_IDATA) {
2013 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2014 	} else {
2015 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
2016 	}
2017 	if (last_chunk == 0) {
2018 		if (chk_type == SCTP_IDATA) {
2019 			dmbuf = SCTP_M_COPYM(*m,
2020 			    (offset + sizeof(struct sctp_idata_chunk)),
2021 			    the_len, M_NOWAIT);
2022 		} else {
2023 			dmbuf = SCTP_M_COPYM(*m,
2024 			    (offset + sizeof(struct sctp_data_chunk)),
2025 			    the_len, M_NOWAIT);
2026 		}
2027 #ifdef SCTP_MBUF_LOGGING
2028 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2029 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2030 		}
2031 #endif
2032 	} else {
2033 		/* We can steal the last chunk */
2034 		int l_len;
2035 
2036 		dmbuf = *m;
2037 		/* lop off the top part */
2038 		if (chk_type == SCTP_IDATA) {
2039 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2040 		} else {
2041 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2042 		}
2043 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2044 			l_len = SCTP_BUF_LEN(dmbuf);
2045 		} else {
2046 			/*
2047 			 * need to count up the size hopefully does not hit
2048 			 * this to often :-0
2049 			 */
2050 			struct mbuf *lat;
2051 
2052 			l_len = 0;
2053 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2054 				l_len += SCTP_BUF_LEN(lat);
2055 			}
2056 		}
2057 		if (l_len > the_len) {
2058 			/* Trim the end round bytes off  too */
2059 			m_adj(dmbuf, -(l_len - the_len));
2060 		}
2061 	}
2062 	if (dmbuf == NULL) {
2063 		SCTP_STAT_INCR(sctps_nomem);
2064 		return (0);
2065 	}
2066 	/*
2067 	 * Now no matter what, we need a control, get one if we don't have
2068 	 * one (we may have gotten it above when we found the message was
2069 	 * fragmented
2070 	 */
2071 	if (control == NULL) {
2072 		sctp_alloc_a_readq(stcb, control);
2073 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2074 		    ppid,
2075 		    sid,
2076 		    chk_flags,
2077 		    NULL, fsn, mid);
2078 		if (control == NULL) {
2079 			SCTP_STAT_INCR(sctps_nomem);
2080 			return (0);
2081 		}
2082 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2083 			struct mbuf *mm;
2084 
2085 			control->data = dmbuf;
2086 			control->tail_mbuf = NULL;
2087 			for (mm = control->data; mm; mm = mm->m_next) {
2088 				control->length += SCTP_BUF_LEN(mm);
2089 				if (SCTP_BUF_NEXT(mm) == NULL) {
2090 					control->tail_mbuf = mm;
2091 				}
2092 			}
2093 			control->end_added = 1;
2094 			control->last_frag_seen = 1;
2095 			control->first_frag_seen = 1;
2096 			control->fsn_included = fsn;
2097 			control->top_fsn = fsn;
2098 		}
2099 		created_control = 1;
2100 	}
2101 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2102 	    chk_flags, ordered, mid, control);
2103 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2104 	    TAILQ_EMPTY(&asoc->resetHead) &&
2105 	    ((ordered == 0) ||
2106 	    (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2107 	    TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2108 		/* Candidate for express delivery */
2109 		/*
2110 		 * Its not fragmented, No PD-API is up, Nothing in the
2111 		 * delivery queue, Its un-ordered OR ordered and the next to
2112 		 * deliver AND nothing else is stuck on the stream queue,
2113 		 * And there is room for it in the socket buffer. Lets just
2114 		 * stuff it up the buffer....
2115 		 */
2116 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2117 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2118 			asoc->highest_tsn_inside_nr_map = tsn;
2119 		}
2120 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2121 		    control, mid);
2122 
2123 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2124 		    control, &stcb->sctp_socket->so_rcv,
2125 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2126 
2127 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2128 			/* for ordered, bump what we delivered */
2129 			asoc->strmin[sid].last_mid_delivered++;
2130 		}
2131 		SCTP_STAT_INCR(sctps_recvexpress);
2132 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2133 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2134 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
2135 		}
2136 		control = NULL;
2137 		goto finish_express_del;
2138 	}
2139 
2140 	/* Now will we need a chunk too? */
2141 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2142 		sctp_alloc_a_chunk(stcb, chk);
2143 		if (chk == NULL) {
2144 			/* No memory so we drop the chunk */
2145 			SCTP_STAT_INCR(sctps_nomem);
2146 			if (last_chunk == 0) {
2147 				/* we copied it, free the copy */
2148 				sctp_m_freem(dmbuf);
2149 			}
2150 			return (0);
2151 		}
2152 		chk->rec.data.tsn = tsn;
2153 		chk->no_fr_allowed = 0;
2154 		chk->rec.data.fsn = fsn;
2155 		chk->rec.data.mid = mid;
2156 		chk->rec.data.sid = sid;
2157 		chk->rec.data.ppid = ppid;
2158 		chk->rec.data.context = stcb->asoc.context;
2159 		chk->rec.data.doing_fast_retransmit = 0;
2160 		chk->rec.data.rcv_flags = chk_flags;
2161 		chk->asoc = asoc;
2162 		chk->send_size = the_len;
2163 		chk->whoTo = net;
2164 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2165 		    chk,
2166 		    control, mid);
2167 		atomic_add_int(&net->ref_count, 1);
2168 		chk->data = dmbuf;
2169 	}
2170 	/* Set the appropriate TSN mark */
2171 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2172 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2173 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2174 			asoc->highest_tsn_inside_nr_map = tsn;
2175 		}
2176 	} else {
2177 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2178 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2179 			asoc->highest_tsn_inside_map = tsn;
2180 		}
2181 	}
2182 	/* Now is it complete (i.e. not fragmented)? */
2183 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2184 		/*
2185 		 * Special check for when streams are resetting. We could be
2186 		 * more smart about this and check the actual stream to see
2187 		 * if it is not being reset.. that way we would not create a
2188 		 * HOLB when amongst streams being reset and those not being
2189 		 * reset.
2190 		 *
2191 		 */
2192 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2193 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2194 			/*
2195 			 * yep its past where we need to reset... go ahead
2196 			 * and queue it.
2197 			 */
2198 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2199 				/* first one on */
2200 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2201 			} else {
2202 				struct sctp_queued_to_read *lcontrol, *nlcontrol;
2203 				unsigned char inserted = 0;
2204 
2205 				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2206 					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2207 						continue;
2208 					} else {
2209 						/* found it */
2210 						TAILQ_INSERT_BEFORE(lcontrol, control, next);
2211 						inserted = 1;
2212 						break;
2213 					}
2214 				}
2215 				if (inserted == 0) {
2216 					/*
2217 					 * must be put at end, use prevP
2218 					 * (all setup from loop) to setup
2219 					 * nextP.
2220 					 */
2221 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2222 				}
2223 			}
2224 			goto finish_express_del;
2225 		}
2226 		if (chk_flags & SCTP_DATA_UNORDERED) {
2227 			/* queue directly into socket buffer */
2228 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2229 			    control, mid);
2230 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2231 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2232 			    control,
2233 			    &stcb->sctp_socket->so_rcv, 1,
2234 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2235 
2236 		} else {
2237 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2238 			    mid);
2239 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2240 			if (*abort_flag) {
2241 				if (last_chunk) {
2242 					*m = NULL;
2243 				}
2244 				return (0);
2245 			}
2246 		}
2247 		goto finish_express_del;
2248 	}
2249 	/* If we reach here its a reassembly */
2250 	need_reasm_check = 1;
2251 	SCTPDBG(SCTP_DEBUG_XXX,
2252 	    "Queue data to stream for reasm control: %p MID: %u\n",
2253 	    control, mid);
2254 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2255 	if (*abort_flag) {
2256 		/*
2257 		 * the assoc is now gone and chk was put onto the reasm
2258 		 * queue, which has all been freed.
2259 		 */
2260 		if (last_chunk) {
2261 			*m = NULL;
2262 		}
2263 		return (0);
2264 	}
2265 finish_express_del:
2266 	/* Here we tidy up things */
2267 	if (tsn == (asoc->cumulative_tsn + 1)) {
2268 		/* Update cum-ack */
2269 		asoc->cumulative_tsn = tsn;
2270 	}
2271 	if (last_chunk) {
2272 		*m = NULL;
2273 	}
2274 	if (ordered) {
2275 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2276 	} else {
2277 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2278 	}
2279 	SCTP_STAT_INCR(sctps_recvdata);
2280 	/* Set it present please */
2281 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2282 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2283 	}
2284 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2285 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2286 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2287 	}
2288 	if (need_reasm_check) {
2289 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2290 		need_reasm_check = 0;
2291 	}
2292 	/* check the special flag for stream resets */
2293 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2294 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2295 		/*
2296 		 * we have finished working through the backlogged TSN's now
2297 		 * time to reset streams. 1: call reset function. 2: free
2298 		 * pending_reply space 3: distribute any chunks in
2299 		 * pending_reply_queue.
2300 		 */
2301 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2302 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2303 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2304 		SCTP_FREE(liste, SCTP_M_STRESET);
2305 		/* sa_ignore FREED_MEMORY */
2306 		liste = TAILQ_FIRST(&asoc->resetHead);
2307 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2308 			/* All can be removed */
2309 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2310 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2311 				strm = &asoc->strmin[control->sinfo_stream];
2312 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2313 				if (*abort_flag) {
2314 					return (0);
2315 				}
2316 				if (need_reasm_check) {
2317 					(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2318 					need_reasm_check = 0;
2319 				}
2320 			}
2321 		} else {
2322 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2323 				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2324 					break;
2325 				}
2326 				/*
2327 				 * if control->sinfo_tsn is <= liste->tsn we
2328 				 * can process it which is the NOT of
2329 				 * control->sinfo_tsn > liste->tsn
2330 				 */
2331 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2332 				strm = &asoc->strmin[control->sinfo_stream];
2333 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2334 				if (*abort_flag) {
2335 					return (0);
2336 				}
2337 				if (need_reasm_check) {
2338 					(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2339 					need_reasm_check = 0;
2340 				}
2341 			}
2342 		}
2343 	}
2344 	return (1);
2345 }
2346 
2347 static const int8_t sctp_map_lookup_tab[256] = {
2348 	0, 1, 0, 2, 0, 1, 0, 3,
2349 	0, 1, 0, 2, 0, 1, 0, 4,
2350 	0, 1, 0, 2, 0, 1, 0, 3,
2351 	0, 1, 0, 2, 0, 1, 0, 5,
2352 	0, 1, 0, 2, 0, 1, 0, 3,
2353 	0, 1, 0, 2, 0, 1, 0, 4,
2354 	0, 1, 0, 2, 0, 1, 0, 3,
2355 	0, 1, 0, 2, 0, 1, 0, 6,
2356 	0, 1, 0, 2, 0, 1, 0, 3,
2357 	0, 1, 0, 2, 0, 1, 0, 4,
2358 	0, 1, 0, 2, 0, 1, 0, 3,
2359 	0, 1, 0, 2, 0, 1, 0, 5,
2360 	0, 1, 0, 2, 0, 1, 0, 3,
2361 	0, 1, 0, 2, 0, 1, 0, 4,
2362 	0, 1, 0, 2, 0, 1, 0, 3,
2363 	0, 1, 0, 2, 0, 1, 0, 7,
2364 	0, 1, 0, 2, 0, 1, 0, 3,
2365 	0, 1, 0, 2, 0, 1, 0, 4,
2366 	0, 1, 0, 2, 0, 1, 0, 3,
2367 	0, 1, 0, 2, 0, 1, 0, 5,
2368 	0, 1, 0, 2, 0, 1, 0, 3,
2369 	0, 1, 0, 2, 0, 1, 0, 4,
2370 	0, 1, 0, 2, 0, 1, 0, 3,
2371 	0, 1, 0, 2, 0, 1, 0, 6,
2372 	0, 1, 0, 2, 0, 1, 0, 3,
2373 	0, 1, 0, 2, 0, 1, 0, 4,
2374 	0, 1, 0, 2, 0, 1, 0, 3,
2375 	0, 1, 0, 2, 0, 1, 0, 5,
2376 	0, 1, 0, 2, 0, 1, 0, 3,
2377 	0, 1, 0, 2, 0, 1, 0, 4,
2378 	0, 1, 0, 2, 0, 1, 0, 3,
2379 	0, 1, 0, 2, 0, 1, 0, 8
2380 };
2381 
2382 void
sctp_slide_mapping_arrays(struct sctp_tcb * stcb)2383 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2384 {
2385 	/*
2386 	 * Now we also need to check the mapping array in a couple of ways.
2387 	 * 1) Did we move the cum-ack point?
2388 	 *
2389 	 * When you first glance at this you might think that all entries
2390 	 * that make up the position of the cum-ack would be in the
2391 	 * nr-mapping array only.. i.e. things up to the cum-ack are always
2392 	 * deliverable. Thats true with one exception, when its a fragmented
2393 	 * message we may not deliver the data until some threshold (or all
2394 	 * of it) is in place. So we must OR the nr_mapping_array and
2395 	 * mapping_array to get a true picture of the cum-ack.
2396 	 */
2397 	struct sctp_association *asoc;
2398 	int at;
2399 	uint8_t val;
2400 	int slide_from, slide_end, lgap, distance;
2401 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2402 
2403 	asoc = &stcb->asoc;
2404 
2405 	old_cumack = asoc->cumulative_tsn;
2406 	old_base = asoc->mapping_array_base_tsn;
2407 	old_highest = asoc->highest_tsn_inside_map;
2408 	/*
2409 	 * We could probably improve this a small bit by calculating the
2410 	 * offset of the current cum-ack as the starting point.
2411 	 */
2412 	at = 0;
2413 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2414 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2415 		if (val == 0xff) {
2416 			at += 8;
2417 		} else {
2418 			/* there is a 0 bit */
2419 			at += sctp_map_lookup_tab[val];
2420 			break;
2421 		}
2422 	}
2423 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2424 
2425 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2426 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2427 #ifdef INVARIANTS
2428 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2429 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2430 #else
2431 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2432 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2433 		sctp_print_mapping_array(asoc);
2434 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2435 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2436 		}
2437 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2438 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2439 #endif
2440 	}
2441 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2442 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2443 	} else {
2444 		highest_tsn = asoc->highest_tsn_inside_map;
2445 	}
2446 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2447 		/* The complete array was completed by a single FR */
2448 		/* highest becomes the cum-ack */
2449 		int clr;
2450 #ifdef INVARIANTS
2451 		unsigned int i;
2452 #endif
2453 
2454 		/* clear the array */
2455 		clr = ((at + 7) >> 3);
2456 		if (clr > asoc->mapping_array_size) {
2457 			clr = asoc->mapping_array_size;
2458 		}
2459 		memset(asoc->mapping_array, 0, clr);
2460 		memset(asoc->nr_mapping_array, 0, clr);
2461 #ifdef INVARIANTS
2462 		for (i = 0; i < asoc->mapping_array_size; i++) {
2463 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2464 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2465 				sctp_print_mapping_array(asoc);
2466 			}
2467 		}
2468 #endif
2469 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2470 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2471 	} else if (at >= 8) {
2472 		/* we can slide the mapping array down */
2473 		/* slide_from holds where we hit the first NON 0xff byte */
2474 
2475 		/*
2476 		 * now calculate the ceiling of the move using our highest
2477 		 * TSN value
2478 		 */
2479 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2480 		slide_end = (lgap >> 3);
2481 		if (slide_end < slide_from) {
2482 			sctp_print_mapping_array(asoc);
2483 #ifdef INVARIANTS
2484 			panic("impossible slide");
2485 #else
2486 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2487 			    lgap, slide_end, slide_from, at);
2488 			return;
2489 #endif
2490 		}
2491 		if (slide_end > asoc->mapping_array_size) {
2492 #ifdef INVARIANTS
2493 			panic("would overrun buffer");
2494 #else
2495 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2496 			    asoc->mapping_array_size, slide_end);
2497 			slide_end = asoc->mapping_array_size;
2498 #endif
2499 		}
2500 		distance = (slide_end - slide_from) + 1;
2501 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2502 			sctp_log_map(old_base, old_cumack, old_highest,
2503 			    SCTP_MAP_PREPARE_SLIDE);
2504 			sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2505 			    (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2506 		}
2507 		if (distance + slide_from > asoc->mapping_array_size ||
2508 		    distance < 0) {
2509 			/*
2510 			 * Here we do NOT slide forward the array so that
2511 			 * hopefully when more data comes in to fill it up
2512 			 * we will be able to slide it forward. Really I
2513 			 * don't think this should happen :-0
2514 			 */
2515 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2516 				sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2517 				    (uint32_t)asoc->mapping_array_size,
2518 				    SCTP_MAP_SLIDE_NONE);
2519 			}
2520 		} else {
2521 			int ii;
2522 
2523 			for (ii = 0; ii < distance; ii++) {
2524 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2525 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2526 			}
2527 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2528 				asoc->mapping_array[ii] = 0;
2529 				asoc->nr_mapping_array[ii] = 0;
2530 			}
2531 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2532 				asoc->highest_tsn_inside_map += (slide_from << 3);
2533 			}
2534 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2535 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2536 			}
2537 			asoc->mapping_array_base_tsn += (slide_from << 3);
2538 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2539 				sctp_log_map(asoc->mapping_array_base_tsn,
2540 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2541 				    SCTP_MAP_SLIDE_RESULT);
2542 			}
2543 		}
2544 	}
2545 }
2546 
2547 void
sctp_sack_check(struct sctp_tcb * stcb,int was_a_gap)2548 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2549 {
2550 	struct sctp_association *asoc;
2551 	uint32_t highest_tsn;
2552 	int is_a_gap;
2553 
2554 	sctp_slide_mapping_arrays(stcb);
2555 	asoc = &stcb->asoc;
2556 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2557 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2558 	} else {
2559 		highest_tsn = asoc->highest_tsn_inside_map;
2560 	}
2561 	/* Is there a gap now? */
2562 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2563 
2564 	/*
2565 	 * Now we need to see if we need to queue a sack or just start the
2566 	 * timer (if allowed).
2567 	 */
2568 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2569 		/*
2570 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2571 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2572 		 * SACK
2573 		 */
2574 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2575 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2576 			    stcb->sctp_ep, stcb, NULL,
2577 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2578 		}
2579 		sctp_send_shutdown(stcb,
2580 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2581 		if (is_a_gap) {
2582 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2583 		}
2584 	} else {
2585 		/*
2586 		 * CMT DAC algorithm: increase number of packets received
2587 		 * since last ack
2588 		 */
2589 		stcb->asoc.cmt_dac_pkts_rcvd++;
2590 
2591 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2592 							 * SACK */
2593 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2594 							 * longer is one */
2595 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2596 		    (is_a_gap) ||	/* is still a gap */
2597 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2598 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)) {	/* hit limit of pkts */
2599 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2600 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2601 			    (stcb->asoc.send_sack == 0) &&
2602 			    (stcb->asoc.numduptsns == 0) &&
2603 			    (stcb->asoc.delayed_ack) &&
2604 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2605 				/*
2606 				 * CMT DAC algorithm: With CMT, delay acks
2607 				 * even in the face of reordering.
2608 				 * Therefore, if acks that do not have to be
2609 				 * sent because of the above reasons, will
2610 				 * be delayed. That is, acks that would have
2611 				 * been sent due to gap reports will be
2612 				 * delayed with DAC. Start the delayed ack
2613 				 * timer.
2614 				 */
2615 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2616 				    stcb->sctp_ep, stcb, NULL);
2617 			} else {
2618 				/*
2619 				 * Ok we must build a SACK since the timer
2620 				 * is pending, we got our first packet OR
2621 				 * there are gaps or duplicates.
2622 				 */
2623 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2624 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
2625 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2626 			}
2627 		} else {
2628 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2629 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2630 				    stcb->sctp_ep, stcb, NULL);
2631 			}
2632 		}
2633 	}
2634 }
2635 
2636 int
sctp_process_data(struct mbuf ** mm,int iphlen,int * offset,int length,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,uint32_t * high_tsn)2637 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2638     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2639     struct sctp_nets *net, uint32_t *high_tsn)
2640 {
2641 	struct sctp_chunkhdr *ch, chunk_buf;
2642 	struct sctp_association *asoc;
2643 	int num_chunks = 0;	/* number of control chunks processed */
2644 	int stop_proc = 0;
2645 	int break_flag, last_chunk;
2646 	int abort_flag = 0, was_a_gap;
2647 	struct mbuf *m;
2648 	uint32_t highest_tsn;
2649 	uint16_t chk_length;
2650 
2651 	/* set the rwnd */
2652 	sctp_set_rwnd(stcb, &stcb->asoc);
2653 
2654 	m = *mm;
2655 	SCTP_TCB_LOCK_ASSERT(stcb);
2656 	asoc = &stcb->asoc;
2657 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2658 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2659 	} else {
2660 		highest_tsn = asoc->highest_tsn_inside_map;
2661 	}
2662 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2663 	/*
2664 	 * setup where we got the last DATA packet from for any SACK that
2665 	 * may need to go out. Don't bump the net. This is done ONLY when a
2666 	 * chunk is assigned.
2667 	 */
2668 	asoc->last_data_chunk_from = net;
2669 
2670 	/*-
2671 	 * Now before we proceed we must figure out if this is a wasted
2672 	 * cluster... i.e. it is a small packet sent in and yet the driver
2673 	 * underneath allocated a full cluster for it. If so we must copy it
2674 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2675 	 * with cluster starvation.
2676 	 */
2677 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2678 		/* we only handle mbufs that are singletons.. not chains */
2679 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2680 		if (m) {
2681 			/* ok lets see if we can copy the data up */
2682 			caddr_t *from, *to;
2683 
2684 			/* get the pointers and copy */
2685 			to = mtod(m, caddr_t *);
2686 			from = mtod((*mm), caddr_t *);
2687 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2688 			/* copy the length and free up the old */
2689 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2690 			sctp_m_freem(*mm);
2691 			/* success, back copy */
2692 			*mm = m;
2693 		} else {
2694 			/* We are in trouble in the mbuf world .. yikes */
2695 			m = *mm;
2696 		}
2697 	}
2698 	/* get pointer to the first chunk header */
2699 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2700 	    sizeof(struct sctp_chunkhdr),
2701 	    (uint8_t *)&chunk_buf);
2702 	if (ch == NULL) {
2703 		return (1);
2704 	}
2705 	/*
2706 	 * process all DATA chunks...
2707 	 */
2708 	*high_tsn = asoc->cumulative_tsn;
2709 	break_flag = 0;
2710 	asoc->data_pkts_seen++;
2711 	while (stop_proc == 0) {
2712 		/* validate chunk length */
2713 		chk_length = ntohs(ch->chunk_length);
2714 		if (length - *offset < chk_length) {
2715 			/* all done, mutulated chunk */
2716 			stop_proc = 1;
2717 			continue;
2718 		}
2719 		if ((asoc->idata_supported == 1) &&
2720 		    (ch->chunk_type == SCTP_DATA)) {
2721 			struct mbuf *op_err;
2722 			char msg[SCTP_DIAG_INFO_LEN];
2723 
2724 			SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2725 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2726 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2727 			sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2728 			return (2);
2729 		}
2730 		if ((asoc->idata_supported == 0) &&
2731 		    (ch->chunk_type == SCTP_IDATA)) {
2732 			struct mbuf *op_err;
2733 			char msg[SCTP_DIAG_INFO_LEN];
2734 
2735 			SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2736 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2737 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2738 			sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2739 			return (2);
2740 		}
2741 		if ((ch->chunk_type == SCTP_DATA) ||
2742 		    (ch->chunk_type == SCTP_IDATA)) {
2743 			uint16_t clen;
2744 
2745 			if (ch->chunk_type == SCTP_DATA) {
2746 				clen = sizeof(struct sctp_data_chunk);
2747 			} else {
2748 				clen = sizeof(struct sctp_idata_chunk);
2749 			}
2750 			if (chk_length < clen) {
2751 				/*
2752 				 * Need to send an abort since we had a
2753 				 * invalid data chunk.
2754 				 */
2755 				struct mbuf *op_err;
2756 				char msg[SCTP_DIAG_INFO_LEN];
2757 
2758 				SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2759 				    ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2760 				    chk_length);
2761 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2762 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2763 				sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2764 				return (2);
2765 			}
2766 #ifdef SCTP_AUDITING_ENABLED
2767 			sctp_audit_log(0xB1, 0);
2768 #endif
2769 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2770 				last_chunk = 1;
2771 			} else {
2772 				last_chunk = 0;
2773 			}
2774 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2775 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2776 			    last_chunk, ch->chunk_type)) {
2777 				num_chunks++;
2778 			}
2779 			if (abort_flag)
2780 				return (2);
2781 
2782 			if (break_flag) {
2783 				/*
2784 				 * Set because of out of rwnd space and no
2785 				 * drop rep space left.
2786 				 */
2787 				stop_proc = 1;
2788 				continue;
2789 			}
2790 		} else {
2791 			/* not a data chunk in the data region */
2792 			switch (ch->chunk_type) {
2793 			case SCTP_INITIATION:
2794 			case SCTP_INITIATION_ACK:
2795 			case SCTP_SELECTIVE_ACK:
2796 			case SCTP_NR_SELECTIVE_ACK:
2797 			case SCTP_HEARTBEAT_REQUEST:
2798 			case SCTP_HEARTBEAT_ACK:
2799 			case SCTP_ABORT_ASSOCIATION:
2800 			case SCTP_SHUTDOWN:
2801 			case SCTP_SHUTDOWN_ACK:
2802 			case SCTP_OPERATION_ERROR:
2803 			case SCTP_COOKIE_ECHO:
2804 			case SCTP_COOKIE_ACK:
2805 			case SCTP_ECN_ECHO:
2806 			case SCTP_ECN_CWR:
2807 			case SCTP_SHUTDOWN_COMPLETE:
2808 			case SCTP_AUTHENTICATION:
2809 			case SCTP_ASCONF_ACK:
2810 			case SCTP_PACKET_DROPPED:
2811 			case SCTP_STREAM_RESET:
2812 			case SCTP_FORWARD_CUM_TSN:
2813 			case SCTP_ASCONF:
2814 				{
2815 					/*
2816 					 * Now, what do we do with KNOWN
2817 					 * chunks that are NOT in the right
2818 					 * place?
2819 					 *
2820 					 * For now, I do nothing but ignore
2821 					 * them. We may later want to add
2822 					 * sysctl stuff to switch out and do
2823 					 * either an ABORT() or possibly
2824 					 * process them.
2825 					 */
2826 					struct mbuf *op_err;
2827 					char msg[SCTP_DIAG_INFO_LEN];
2828 
2829 					SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2830 					    ch->chunk_type);
2831 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2832 					sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2833 					return (2);
2834 				}
2835 			default:
2836 				/*
2837 				 * Unknown chunk type: use bit rules after
2838 				 * checking length
2839 				 */
2840 				if (chk_length < sizeof(struct sctp_chunkhdr)) {
2841 					/*
2842 					 * Need to send an abort since we
2843 					 * had a invalid chunk.
2844 					 */
2845 					struct mbuf *op_err;
2846 					char msg[SCTP_DIAG_INFO_LEN];
2847 
2848 					SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
2849 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2850 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
2851 					sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2852 					return (2);
2853 				}
2854 				if (ch->chunk_type & 0x40) {
2855 					/* Add a error report to the queue */
2856 					struct mbuf *op_err;
2857 					struct sctp_gen_error_cause *cause;
2858 
2859 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2860 					    0, M_NOWAIT, 1, MT_DATA);
2861 					if (op_err != NULL) {
2862 						cause = mtod(op_err, struct sctp_gen_error_cause *);
2863 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2864 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2865 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2866 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2867 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2868 							sctp_queue_op_err(stcb, op_err);
2869 						} else {
2870 							sctp_m_freem(op_err);
2871 						}
2872 					}
2873 				}
2874 				if ((ch->chunk_type & 0x80) == 0) {
2875 					/* discard the rest of this packet */
2876 					stop_proc = 1;
2877 				}	/* else skip this bad chunk and
2878 					 * continue... */
2879 				break;
2880 			}	/* switch of chunk type */
2881 		}
2882 		*offset += SCTP_SIZE32(chk_length);
2883 		if ((*offset >= length) || stop_proc) {
2884 			/* no more data left in the mbuf chain */
2885 			stop_proc = 1;
2886 			continue;
2887 		}
2888 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2889 		    sizeof(struct sctp_chunkhdr),
2890 		    (uint8_t *)&chunk_buf);
2891 		if (ch == NULL) {
2892 			*offset = length;
2893 			stop_proc = 1;
2894 			continue;
2895 		}
2896 	}
2897 	if (break_flag) {
2898 		/*
2899 		 * we need to report rwnd overrun drops.
2900 		 */
2901 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2902 	}
2903 	if (num_chunks) {
2904 		/*
2905 		 * Did we get data, if so update the time for auto-close and
2906 		 * give peer credit for being alive.
2907 		 */
2908 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2909 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2910 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2911 			    stcb->asoc.overall_error_count,
2912 			    0,
2913 			    SCTP_FROM_SCTP_INDATA,
2914 			    __LINE__);
2915 		}
2916 		stcb->asoc.overall_error_count = 0;
2917 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2918 	}
2919 	/* now service all of the reassm queue if needed */
2920 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2921 		/* Assure that we ack right away */
2922 		stcb->asoc.send_sack = 1;
2923 	}
2924 	/* Start a sack timer or QUEUE a SACK for sending */
2925 	sctp_sack_check(stcb, was_a_gap);
2926 	return (0);
2927 }
2928 
2929 static int
sctp_process_segment_range(struct sctp_tcb * stcb,struct sctp_tmit_chunk ** p_tp1,uint32_t last_tsn,uint16_t frag_strt,uint16_t frag_end,int nr_sacking,int * num_frs,uint32_t * biggest_newly_acked_tsn,uint32_t * this_sack_lowest_newack,int * rto_ok)2930 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2931     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2932     int *num_frs,
2933     uint32_t *biggest_newly_acked_tsn,
2934     uint32_t *this_sack_lowest_newack,
2935     int *rto_ok)
2936 {
2937 	struct sctp_tmit_chunk *tp1;
2938 	unsigned int theTSN;
2939 	int j, wake_him = 0, circled = 0;
2940 
2941 	/* Recover the tp1 we last saw */
2942 	tp1 = *p_tp1;
2943 	if (tp1 == NULL) {
2944 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2945 	}
2946 	for (j = frag_strt; j <= frag_end; j++) {
2947 		theTSN = j + last_tsn;
2948 		while (tp1) {
2949 			if (tp1->rec.data.doing_fast_retransmit)
2950 				(*num_frs) += 1;
2951 
2952 			/*-
2953 			 * CMT: CUCv2 algorithm. For each TSN being
2954 			 * processed from the sent queue, track the
2955 			 * next expected pseudo-cumack, or
2956 			 * rtx_pseudo_cumack, if required. Separate
2957 			 * cumack trackers for first transmissions,
2958 			 * and retransmissions.
2959 			 */
2960 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2961 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2962 			    (tp1->snd_count == 1)) {
2963 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2964 				tp1->whoTo->find_pseudo_cumack = 0;
2965 			}
2966 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2967 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2968 			    (tp1->snd_count > 1)) {
2969 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2970 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2971 			}
2972 			if (tp1->rec.data.tsn == theTSN) {
2973 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2974 					/*-
2975 					 * must be held until
2976 					 * cum-ack passes
2977 					 */
2978 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2979 						/*-
2980 						 * If it is less than RESEND, it is
2981 						 * now no-longer in flight.
2982 						 * Higher values may already be set
2983 						 * via previous Gap Ack Blocks...
2984 						 * i.e. ACKED or RESEND.
2985 						 */
2986 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
2987 						    *biggest_newly_acked_tsn)) {
2988 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
2989 						}
2990 						/*-
2991 						 * CMT: SFR algo (and HTNA) - set
2992 						 * saw_newack to 1 for dest being
2993 						 * newly acked. update
2994 						 * this_sack_highest_newack if
2995 						 * appropriate.
2996 						 */
2997 						if (tp1->rec.data.chunk_was_revoked == 0)
2998 							tp1->whoTo->saw_newack = 1;
2999 
3000 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3001 						    tp1->whoTo->this_sack_highest_newack)) {
3002 							tp1->whoTo->this_sack_highest_newack =
3003 							    tp1->rec.data.tsn;
3004 						}
3005 						/*-
3006 						 * CMT DAC algo: also update
3007 						 * this_sack_lowest_newack
3008 						 */
3009 						if (*this_sack_lowest_newack == 0) {
3010 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3011 								sctp_log_sack(*this_sack_lowest_newack,
3012 								    last_tsn,
3013 								    tp1->rec.data.tsn,
3014 								    0,
3015 								    0,
3016 								    SCTP_LOG_TSN_ACKED);
3017 							}
3018 							*this_sack_lowest_newack = tp1->rec.data.tsn;
3019 						}
3020 						/*-
3021 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3022 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3023 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3024 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3025 						 * Separate pseudo_cumack trackers for first transmissions and
3026 						 * retransmissions.
3027 						 */
3028 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3029 							if (tp1->rec.data.chunk_was_revoked == 0) {
3030 								tp1->whoTo->new_pseudo_cumack = 1;
3031 							}
3032 							tp1->whoTo->find_pseudo_cumack = 1;
3033 						}
3034 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3035 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3036 						}
3037 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3038 							if (tp1->rec.data.chunk_was_revoked == 0) {
3039 								tp1->whoTo->new_pseudo_cumack = 1;
3040 							}
3041 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3042 						}
3043 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3044 							sctp_log_sack(*biggest_newly_acked_tsn,
3045 							    last_tsn,
3046 							    tp1->rec.data.tsn,
3047 							    frag_strt,
3048 							    frag_end,
3049 							    SCTP_LOG_TSN_ACKED);
3050 						}
3051 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3052 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3053 							    tp1->whoTo->flight_size,
3054 							    tp1->book_size,
3055 							    (uint32_t)(uintptr_t)tp1->whoTo,
3056 							    tp1->rec.data.tsn);
3057 						}
3058 						sctp_flight_size_decrease(tp1);
3059 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3060 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3061 							    tp1);
3062 						}
3063 						sctp_total_flight_decrease(stcb, tp1);
3064 
3065 						tp1->whoTo->net_ack += tp1->send_size;
3066 						if (tp1->snd_count < 2) {
3067 							/*-
3068 							 * True non-retransmitted chunk
3069 							 */
3070 							tp1->whoTo->net_ack2 += tp1->send_size;
3071 
3072 							/*-
3073 							 * update RTO too ?
3074 							 */
3075 							if (tp1->do_rtt) {
3076 								if (*rto_ok &&
3077 								    sctp_calculate_rto(stcb,
3078 								    &stcb->asoc,
3079 								    tp1->whoTo,
3080 								    &tp1->sent_rcv_time,
3081 								    SCTP_RTT_FROM_DATA)) {
3082 									*rto_ok = 0;
3083 								}
3084 								if (tp1->whoTo->rto_needed == 0) {
3085 									tp1->whoTo->rto_needed = 1;
3086 								}
3087 								tp1->do_rtt = 0;
3088 							}
3089 						}
3090 					}
3091 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3092 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3093 						    stcb->asoc.this_sack_highest_gap)) {
3094 							stcb->asoc.this_sack_highest_gap =
3095 							    tp1->rec.data.tsn;
3096 						}
3097 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3098 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3099 #ifdef SCTP_AUDITING_ENABLED
3100 							sctp_audit_log(0xB2,
3101 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3102 #endif
3103 						}
3104 					}
3105 					/*-
3106 					 * All chunks NOT UNSENT fall through here and are marked
3107 					 * (leave PR-SCTP ones that are to skip alone though)
3108 					 */
3109 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3110 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3111 						tp1->sent = SCTP_DATAGRAM_MARKED;
3112 					}
3113 					if (tp1->rec.data.chunk_was_revoked) {
3114 						/* deflate the cwnd */
3115 						tp1->whoTo->cwnd -= tp1->book_size;
3116 						tp1->rec.data.chunk_was_revoked = 0;
3117 					}
3118 					/* NR Sack code here */
3119 					if (nr_sacking &&
3120 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3121 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3122 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3123 #ifdef INVARIANTS
3124 						} else {
3125 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3126 #endif
3127 						}
3128 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3129 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3130 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3131 							stcb->asoc.trigger_reset = 1;
3132 						}
3133 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3134 						if (tp1->data) {
3135 							/*
3136 							 * sa_ignore
3137 							 * NO_NULL_CHK
3138 							 */
3139 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3140 							sctp_m_freem(tp1->data);
3141 							tp1->data = NULL;
3142 						}
3143 						wake_him++;
3144 					}
3145 				}
3146 				break;
3147 			}	/* if (tp1->tsn == theTSN) */
3148 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3149 				break;
3150 			}
3151 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3152 			if ((tp1 == NULL) && (circled == 0)) {
3153 				circled++;
3154 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3155 			}
3156 		}		/* end while (tp1) */
3157 		if (tp1 == NULL) {
3158 			circled = 0;
3159 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3160 		}
3161 		/* In case the fragments were not in order we must reset */
3162 	}			/* end for (j = fragStart */
3163 	*p_tp1 = tp1;
3164 	return (wake_him);	/* Return value only used for nr-sack */
3165 }
3166 
3167 static int
sctp_handle_segments(struct mbuf * m,int * offset,struct sctp_tcb * stcb,struct sctp_association * asoc,uint32_t last_tsn,uint32_t * biggest_tsn_acked,uint32_t * biggest_newly_acked_tsn,uint32_t * this_sack_lowest_newack,int num_seg,int num_nr_seg,int * rto_ok)3168 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3169     uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3170     uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3171     int num_seg, int num_nr_seg, int *rto_ok)
3172 {
3173 	struct sctp_gap_ack_block *frag, block;
3174 	struct sctp_tmit_chunk *tp1;
3175 	int i;
3176 	int num_frs = 0;
3177 	int chunk_freed;
3178 	int non_revocable;
3179 	uint16_t frag_strt, frag_end, prev_frag_end;
3180 
3181 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3182 	prev_frag_end = 0;
3183 	chunk_freed = 0;
3184 
3185 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3186 		if (i == num_seg) {
3187 			prev_frag_end = 0;
3188 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3189 		}
3190 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3191 		    sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3192 		*offset += sizeof(block);
3193 		if (frag == NULL) {
3194 			return (chunk_freed);
3195 		}
3196 		frag_strt = ntohs(frag->start);
3197 		frag_end = ntohs(frag->end);
3198 
3199 		if (frag_strt > frag_end) {
3200 			/* This gap report is malformed, skip it. */
3201 			continue;
3202 		}
3203 		if (frag_strt <= prev_frag_end) {
3204 			/* This gap report is not in order, so restart. */
3205 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3206 		}
3207 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3208 			*biggest_tsn_acked = last_tsn + frag_end;
3209 		}
3210 		if (i < num_seg) {
3211 			non_revocable = 0;
3212 		} else {
3213 			non_revocable = 1;
3214 		}
3215 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3216 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
3217 		    this_sack_lowest_newack, rto_ok)) {
3218 			chunk_freed = 1;
3219 		}
3220 		prev_frag_end = frag_end;
3221 	}
3222 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3223 		if (num_frs)
3224 			sctp_log_fr(*biggest_tsn_acked,
3225 			    *biggest_newly_acked_tsn,
3226 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3227 	}
3228 	return (chunk_freed);
3229 }
3230 
3231 static void
sctp_check_for_revoked(struct sctp_tcb * stcb,struct sctp_association * asoc,uint32_t cumack,uint32_t biggest_tsn_acked)3232 sctp_check_for_revoked(struct sctp_tcb *stcb,
3233     struct sctp_association *asoc, uint32_t cumack,
3234     uint32_t biggest_tsn_acked)
3235 {
3236 	struct sctp_tmit_chunk *tp1;
3237 
3238 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3239 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3240 			/*
3241 			 * ok this guy is either ACK or MARKED. If it is
3242 			 * ACKED it has been previously acked but not this
3243 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3244 			 * again.
3245 			 */
3246 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3247 				break;
3248 			}
3249 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3250 				/* it has been revoked */
3251 				tp1->sent = SCTP_DATAGRAM_SENT;
3252 				tp1->rec.data.chunk_was_revoked = 1;
3253 				/*
3254 				 * We must add this stuff back in to assure
3255 				 * timers and such get started.
3256 				 */
3257 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3258 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3259 					    tp1->whoTo->flight_size,
3260 					    tp1->book_size,
3261 					    (uint32_t)(uintptr_t)tp1->whoTo,
3262 					    tp1->rec.data.tsn);
3263 				}
3264 				sctp_flight_size_increase(tp1);
3265 				sctp_total_flight_increase(stcb, tp1);
3266 				/*
3267 				 * We inflate the cwnd to compensate for our
3268 				 * artificial inflation of the flight_size.
3269 				 */
3270 				tp1->whoTo->cwnd += tp1->book_size;
3271 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3272 					sctp_log_sack(asoc->last_acked_seq,
3273 					    cumack,
3274 					    tp1->rec.data.tsn,
3275 					    0,
3276 					    0,
3277 					    SCTP_LOG_TSN_REVOKED);
3278 				}
3279 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3280 				/* it has been re-acked in this SACK */
3281 				tp1->sent = SCTP_DATAGRAM_ACKED;
3282 			}
3283 		}
3284 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3285 			break;
3286 	}
3287 }
3288 
3289 static void
sctp_strike_gap_ack_chunks(struct sctp_tcb * stcb,struct sctp_association * asoc,uint32_t biggest_tsn_acked,uint32_t biggest_tsn_newly_acked,uint32_t this_sack_lowest_newack,int accum_moved)3290 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3291     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3292 {
3293 	struct sctp_tmit_chunk *tp1;
3294 	int strike_flag = 0;
3295 	struct timeval now;
3296 	uint32_t sending_seq;
3297 	struct sctp_nets *net;
3298 	int num_dests_sacked = 0;
3299 
3300 	/*
3301 	 * select the sending_seq, this is either the next thing ready to be
3302 	 * sent but not transmitted, OR, the next seq we assign.
3303 	 */
3304 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3305 	if (tp1 == NULL) {
3306 		sending_seq = asoc->sending_seq;
3307 	} else {
3308 		sending_seq = tp1->rec.data.tsn;
3309 	}
3310 
3311 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3312 	if ((asoc->sctp_cmt_on_off > 0) &&
3313 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3314 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3315 			if (net->saw_newack)
3316 				num_dests_sacked++;
3317 		}
3318 	}
3319 	if (stcb->asoc.prsctp_supported) {
3320 		(void)SCTP_GETTIME_TIMEVAL(&now);
3321 	}
3322 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3323 		strike_flag = 0;
3324 		if (tp1->no_fr_allowed) {
3325 			/* this one had a timeout or something */
3326 			continue;
3327 		}
3328 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3329 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3330 				sctp_log_fr(biggest_tsn_newly_acked,
3331 				    tp1->rec.data.tsn,
3332 				    tp1->sent,
3333 				    SCTP_FR_LOG_CHECK_STRIKE);
3334 		}
3335 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3336 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3337 			/* done */
3338 			break;
3339 		}
3340 		if (stcb->asoc.prsctp_supported) {
3341 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3342 				/* Is it expired? */
3343 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3344 					/* Yes so drop it */
3345 					if (tp1->data != NULL) {
3346 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3347 						    SCTP_SO_NOT_LOCKED);
3348 					}
3349 					continue;
3350 				}
3351 			}
3352 		}
3353 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3354 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3355 			/* we are beyond the tsn in the sack  */
3356 			break;
3357 		}
3358 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3359 			/* either a RESEND, ACKED, or MARKED */
3360 			/* skip */
3361 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3362 				/* Continue strikin FWD-TSN chunks */
3363 				tp1->rec.data.fwd_tsn_cnt++;
3364 			}
3365 			continue;
3366 		}
3367 		/*
3368 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3369 		 */
3370 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3371 			/*
3372 			 * No new acks were received for data sent to this
3373 			 * dest. Therefore, according to the SFR algo for
3374 			 * CMT, no data sent to this dest can be marked for
3375 			 * FR using this SACK.
3376 			 */
3377 			continue;
3378 		} else if (tp1->whoTo &&
3379 			    SCTP_TSN_GT(tp1->rec.data.tsn,
3380 			    tp1->whoTo->this_sack_highest_newack) &&
3381 		    !(accum_moved && asoc->fast_retran_loss_recovery)) {
3382 			/*
3383 			 * CMT: New acks were received for data sent to this
3384 			 * dest. But no new acks were seen for data sent
3385 			 * after tp1. Therefore, according to the SFR algo
3386 			 * for CMT, tp1 cannot be marked for FR using this
3387 			 * SACK. This step covers part of the DAC algo and
3388 			 * the HTNA algo as well.
3389 			 */
3390 			continue;
3391 		}
3392 		/*
3393 		 * Here we check to see if we were have already done a FR
3394 		 * and if so we see if the biggest TSN we saw in the sack is
3395 		 * smaller than the recovery point. If so we don't strike
3396 		 * the tsn... otherwise we CAN strike the TSN.
3397 		 */
3398 		/*
3399 		 * @@@ JRI: Check for CMT if (accum_moved &&
3400 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3401 		 * 0)) {
3402 		 */
3403 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3404 			/*
3405 			 * Strike the TSN if in fast-recovery and cum-ack
3406 			 * moved.
3407 			 */
3408 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3409 				sctp_log_fr(biggest_tsn_newly_acked,
3410 				    tp1->rec.data.tsn,
3411 				    tp1->sent,
3412 				    SCTP_FR_LOG_STRIKE_CHUNK);
3413 			}
3414 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3415 				tp1->sent++;
3416 			}
3417 			if ((asoc->sctp_cmt_on_off > 0) &&
3418 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3419 				/*
3420 				 * CMT DAC algorithm: If SACK flag is set to
3421 				 * 0, then lowest_newack test will not pass
3422 				 * because it would have been set to the
3423 				 * cumack earlier. If not already to be
3424 				 * rtx'd, If not a mixed sack and if tp1 is
3425 				 * not between two sacked TSNs, then mark by
3426 				 * one more. NOTE that we are marking by one
3427 				 * additional time since the SACK DAC flag
3428 				 * indicates that two packets have been
3429 				 * received after this missing TSN.
3430 				 */
3431 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3432 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3433 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3434 						sctp_log_fr(16 + num_dests_sacked,
3435 						    tp1->rec.data.tsn,
3436 						    tp1->sent,
3437 						    SCTP_FR_LOG_STRIKE_CHUNK);
3438 					}
3439 					tp1->sent++;
3440 				}
3441 			}
3442 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3443 		    (asoc->sctp_cmt_on_off == 0)) {
3444 			/*
3445 			 * For those that have done a FR we must take
3446 			 * special consideration if we strike. I.e the
3447 			 * biggest_newly_acked must be higher than the
3448 			 * sending_seq at the time we did the FR.
3449 			 */
3450 			if (
3451 #ifdef SCTP_FR_TO_ALTERNATE
3452 			/*
3453 			 * If FR's go to new networks, then we must only do
3454 			 * this for singly homed asoc's. However if the FR's
3455 			 * go to the same network (Armando's work) then its
3456 			 * ok to FR multiple times.
3457 			 */
3458 			    (asoc->numnets < 2)
3459 #else
3460 			    (1)
3461 #endif
3462 			    ) {
3463 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3464 				    tp1->rec.data.fast_retran_tsn)) {
3465 					/*
3466 					 * Strike the TSN, since this ack is
3467 					 * beyond where things were when we
3468 					 * did a FR.
3469 					 */
3470 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3471 						sctp_log_fr(biggest_tsn_newly_acked,
3472 						    tp1->rec.data.tsn,
3473 						    tp1->sent,
3474 						    SCTP_FR_LOG_STRIKE_CHUNK);
3475 					}
3476 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3477 						tp1->sent++;
3478 					}
3479 					strike_flag = 1;
3480 					if ((asoc->sctp_cmt_on_off > 0) &&
3481 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3482 						/*
3483 						 * CMT DAC algorithm: If
3484 						 * SACK flag is set to 0,
3485 						 * then lowest_newack test
3486 						 * will not pass because it
3487 						 * would have been set to
3488 						 * the cumack earlier. If
3489 						 * not already to be rtx'd,
3490 						 * If not a mixed sack and
3491 						 * if tp1 is not between two
3492 						 * sacked TSNs, then mark by
3493 						 * one more. NOTE that we
3494 						 * are marking by one
3495 						 * additional time since the
3496 						 * SACK DAC flag indicates
3497 						 * that two packets have
3498 						 * been received after this
3499 						 * missing TSN.
3500 						 */
3501 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3502 						    (num_dests_sacked == 1) &&
3503 						    SCTP_TSN_GT(this_sack_lowest_newack,
3504 						    tp1->rec.data.tsn)) {
3505 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3506 								sctp_log_fr(32 + num_dests_sacked,
3507 								    tp1->rec.data.tsn,
3508 								    tp1->sent,
3509 								    SCTP_FR_LOG_STRIKE_CHUNK);
3510 							}
3511 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3512 								tp1->sent++;
3513 							}
3514 						}
3515 					}
3516 				}
3517 			}
3518 			/*
3519 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3520 			 * algo covers HTNA.
3521 			 */
3522 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3523 		    biggest_tsn_newly_acked)) {
3524 			/*
3525 			 * We don't strike these: This is the  HTNA
3526 			 * algorithm i.e. we don't strike If our TSN is
3527 			 * larger than the Highest TSN Newly Acked.
3528 			 */
3529 			;
3530 		} else {
3531 			/* Strike the TSN */
3532 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3533 				sctp_log_fr(biggest_tsn_newly_acked,
3534 				    tp1->rec.data.tsn,
3535 				    tp1->sent,
3536 				    SCTP_FR_LOG_STRIKE_CHUNK);
3537 			}
3538 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3539 				tp1->sent++;
3540 			}
3541 			if ((asoc->sctp_cmt_on_off > 0) &&
3542 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3543 				/*
3544 				 * CMT DAC algorithm: If SACK flag is set to
3545 				 * 0, then lowest_newack test will not pass
3546 				 * because it would have been set to the
3547 				 * cumack earlier. If not already to be
3548 				 * rtx'd, If not a mixed sack and if tp1 is
3549 				 * not between two sacked TSNs, then mark by
3550 				 * one more. NOTE that we are marking by one
3551 				 * additional time since the SACK DAC flag
3552 				 * indicates that two packets have been
3553 				 * received after this missing TSN.
3554 				 */
3555 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3556 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3557 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3558 						sctp_log_fr(48 + num_dests_sacked,
3559 						    tp1->rec.data.tsn,
3560 						    tp1->sent,
3561 						    SCTP_FR_LOG_STRIKE_CHUNK);
3562 					}
3563 					tp1->sent++;
3564 				}
3565 			}
3566 		}
3567 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3568 			struct sctp_nets *alt;
3569 
3570 			/* fix counts and things */
3571 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3572 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3573 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3574 				    tp1->book_size,
3575 				    (uint32_t)(uintptr_t)tp1->whoTo,
3576 				    tp1->rec.data.tsn);
3577 			}
3578 			if (tp1->whoTo) {
3579 				tp1->whoTo->net_ack++;
3580 				sctp_flight_size_decrease(tp1);
3581 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3582 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3583 					    tp1);
3584 				}
3585 			}
3586 
3587 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3588 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3589 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3590 			}
3591 			/* add back to the rwnd */
3592 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3593 
3594 			/* remove from the total flight */
3595 			sctp_total_flight_decrease(stcb, tp1);
3596 
3597 			if ((stcb->asoc.prsctp_supported) &&
3598 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3599 				/*
3600 				 * Has it been retransmitted tv_sec times? -
3601 				 * we store the retran count there.
3602 				 */
3603 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3604 					/* Yes, so drop it */
3605 					if (tp1->data != NULL) {
3606 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3607 						    SCTP_SO_NOT_LOCKED);
3608 					}
3609 					/* Make sure to flag we had a FR */
3610 					if (tp1->whoTo != NULL) {
3611 						tp1->whoTo->net_ack++;
3612 					}
3613 					continue;
3614 				}
3615 			}
3616 			/*
3617 			 * SCTP_PRINTF("OK, we are now ready to FR this
3618 			 * guy\n");
3619 			 */
3620 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3621 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3622 				    0, SCTP_FR_MARKED);
3623 			}
3624 			if (strike_flag) {
3625 				/* This is a subsequent FR */
3626 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3627 			}
3628 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3629 			if (asoc->sctp_cmt_on_off > 0) {
3630 				/*
3631 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3632 				 * If CMT is being used, then pick dest with
3633 				 * largest ssthresh for any retransmission.
3634 				 */
3635 				tp1->no_fr_allowed = 1;
3636 				alt = tp1->whoTo;
3637 				/* sa_ignore NO_NULL_CHK */
3638 				if (asoc->sctp_cmt_pf > 0) {
3639 					/*
3640 					 * JRS 5/18/07 - If CMT PF is on,
3641 					 * use the PF version of
3642 					 * find_alt_net()
3643 					 */
3644 					alt = sctp_find_alternate_net(stcb, alt, 2);
3645 				} else {
3646 					/*
3647 					 * JRS 5/18/07 - If only CMT is on,
3648 					 * use the CMT version of
3649 					 * find_alt_net()
3650 					 */
3651 					/* sa_ignore NO_NULL_CHK */
3652 					alt = sctp_find_alternate_net(stcb, alt, 1);
3653 				}
3654 				if (alt == NULL) {
3655 					alt = tp1->whoTo;
3656 				}
3657 				/*
3658 				 * CUCv2: If a different dest is picked for
3659 				 * the retransmission, then new
3660 				 * (rtx-)pseudo_cumack needs to be tracked
3661 				 * for orig dest. Let CUCv2 track new (rtx-)
3662 				 * pseudo-cumack always.
3663 				 */
3664 				if (tp1->whoTo) {
3665 					tp1->whoTo->find_pseudo_cumack = 1;
3666 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3667 				}
3668 			} else {	/* CMT is OFF */
3669 #ifdef SCTP_FR_TO_ALTERNATE
3670 				/* Can we find an alternate? */
3671 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3672 #else
3673 				/*
3674 				 * default behavior is to NOT retransmit
3675 				 * FR's to an alternate. Armando Caro's
3676 				 * paper details why.
3677 				 */
3678 				alt = tp1->whoTo;
3679 #endif
3680 			}
3681 
3682 			tp1->rec.data.doing_fast_retransmit = 1;
3683 			/* mark the sending seq for possible subsequent FR's */
3684 			/*
3685 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3686 			 * (uint32_t)tpi->rec.data.tsn);
3687 			 */
3688 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3689 				/*
3690 				 * If the queue of send is empty then its
3691 				 * the next sequence number that will be
3692 				 * assigned so we subtract one from this to
3693 				 * get the one we last sent.
3694 				 */
3695 				tp1->rec.data.fast_retran_tsn = sending_seq;
3696 			} else {
3697 				/*
3698 				 * If there are chunks on the send queue
3699 				 * (unsent data that has made it from the
3700 				 * stream queues but not out the door, we
3701 				 * take the first one (which will have the
3702 				 * lowest TSN) and subtract one to get the
3703 				 * one we last sent.
3704 				 */
3705 				struct sctp_tmit_chunk *ttt;
3706 
3707 				ttt = TAILQ_FIRST(&asoc->send_queue);
3708 				tp1->rec.data.fast_retran_tsn =
3709 				    ttt->rec.data.tsn;
3710 			}
3711 
3712 			if (tp1->do_rtt) {
3713 				/*
3714 				 * this guy had a RTO calculation pending on
3715 				 * it, cancel it
3716 				 */
3717 				if ((tp1->whoTo != NULL) &&
3718 				    (tp1->whoTo->rto_needed == 0)) {
3719 					tp1->whoTo->rto_needed = 1;
3720 				}
3721 				tp1->do_rtt = 0;
3722 			}
3723 			if (alt != tp1->whoTo) {
3724 				/* yes, there is an alternate. */
3725 				sctp_free_remote_addr(tp1->whoTo);
3726 				/* sa_ignore FREED_MEMORY */
3727 				tp1->whoTo = alt;
3728 				atomic_add_int(&alt->ref_count, 1);
3729 			}
3730 		}
3731 	}
3732 }
3733 
3734 struct sctp_tmit_chunk *
sctp_try_advance_peer_ack_point(struct sctp_tcb * stcb,struct sctp_association * asoc)3735 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3736     struct sctp_association *asoc)
3737 {
3738 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3739 	struct timeval now;
3740 	int now_filled = 0;
3741 
3742 	if (asoc->prsctp_supported == 0) {
3743 		return (NULL);
3744 	}
3745 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3746 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3747 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3748 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3749 			/* no chance to advance, out of here */
3750 			break;
3751 		}
3752 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3753 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3754 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3755 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3756 				    asoc->advanced_peer_ack_point,
3757 				    tp1->rec.data.tsn, 0, 0);
3758 			}
3759 		}
3760 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3761 			/*
3762 			 * We can't fwd-tsn past any that are reliable aka
3763 			 * retransmitted until the asoc fails.
3764 			 */
3765 			break;
3766 		}
3767 		if (!now_filled) {
3768 			(void)SCTP_GETTIME_TIMEVAL(&now);
3769 			now_filled = 1;
3770 		}
3771 		/*
3772 		 * now we got a chunk which is marked for another
3773 		 * retransmission to a PR-stream but has run out its chances
3774 		 * already maybe OR has been marked to skip now. Can we skip
3775 		 * it if its a resend?
3776 		 */
3777 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3778 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3779 			/*
3780 			 * Now is this one marked for resend and its time is
3781 			 * now up?
3782 			 */
3783 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3784 				/* Yes so drop it */
3785 				if (tp1->data) {
3786 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3787 					    1, SCTP_SO_NOT_LOCKED);
3788 				}
3789 			} else {
3790 				/*
3791 				 * No, we are done when hit one for resend
3792 				 * whos time as not expired.
3793 				 */
3794 				break;
3795 			}
3796 		}
3797 		/*
3798 		 * Ok now if this chunk is marked to drop it we can clean up
3799 		 * the chunk, advance our peer ack point and we can check
3800 		 * the next chunk.
3801 		 */
3802 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3803 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3804 			/* advance PeerAckPoint goes forward */
3805 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3806 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3807 				a_adv = tp1;
3808 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3809 				/* No update but we do save the chk */
3810 				a_adv = tp1;
3811 			}
3812 		} else {
3813 			/*
3814 			 * If it is still in RESEND we can advance no
3815 			 * further
3816 			 */
3817 			break;
3818 		}
3819 	}
3820 	return (a_adv);
3821 }
3822 
3823 static int
sctp_fs_audit(struct sctp_association * asoc)3824 sctp_fs_audit(struct sctp_association *asoc)
3825 {
3826 	struct sctp_tmit_chunk *chk;
3827 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3828 	int ret;
3829 #ifndef INVARIANTS
3830 	int entry_flight, entry_cnt;
3831 #endif
3832 
3833 	ret = 0;
3834 #ifndef INVARIANTS
3835 	entry_flight = asoc->total_flight;
3836 	entry_cnt = asoc->total_flight_count;
3837 #endif
3838 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3839 		return (0);
3840 
3841 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3842 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3843 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3844 			    chk->rec.data.tsn,
3845 			    chk->send_size,
3846 			    chk->snd_count);
3847 			inflight++;
3848 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3849 			resend++;
3850 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3851 			inbetween++;
3852 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3853 			above++;
3854 		} else {
3855 			acked++;
3856 		}
3857 	}
3858 
3859 	if ((inflight > 0) || (inbetween > 0)) {
3860 #ifdef INVARIANTS
3861 		panic("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d",
3862 		    inflight, inbetween, resend, above, acked);
3863 #else
3864 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3865 		    entry_flight, entry_cnt);
3866 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3867 		    inflight, inbetween, resend, above, acked);
3868 		ret = 1;
3869 #endif
3870 	}
3871 	return (ret);
3872 }
3873 
3874 static void
sctp_window_probe_recovery(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_tmit_chunk * tp1)3875 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3876     struct sctp_association *asoc,
3877     struct sctp_tmit_chunk *tp1)
3878 {
3879 	tp1->window_probe = 0;
3880 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3881 		/* TSN's skipped we do NOT move back. */
3882 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3883 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3884 		    tp1->book_size,
3885 		    (uint32_t)(uintptr_t)tp1->whoTo,
3886 		    tp1->rec.data.tsn);
3887 		return;
3888 	}
3889 	/* First setup this by shrinking flight */
3890 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3891 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3892 		    tp1);
3893 	}
3894 	sctp_flight_size_decrease(tp1);
3895 	sctp_total_flight_decrease(stcb, tp1);
3896 	/* Now mark for resend */
3897 	tp1->sent = SCTP_DATAGRAM_RESEND;
3898 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3899 
3900 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3901 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3902 		    tp1->whoTo->flight_size,
3903 		    tp1->book_size,
3904 		    (uint32_t)(uintptr_t)tp1->whoTo,
3905 		    tp1->rec.data.tsn);
3906 	}
3907 }
3908 
3909 void
sctp_express_handle_sack(struct sctp_tcb * stcb,uint32_t cumack,uint32_t rwnd,int * abort_now,int ecne_seen)3910 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3911     uint32_t rwnd, int *abort_now, int ecne_seen)
3912 {
3913 	struct sctp_nets *net;
3914 	struct sctp_association *asoc;
3915 	struct sctp_tmit_chunk *tp1, *tp2;
3916 	uint32_t old_rwnd;
3917 	int win_probe_recovery = 0;
3918 	int win_probe_recovered = 0;
3919 	int j, done_once = 0;
3920 	int rto_ok = 1;
3921 	uint32_t send_s;
3922 
3923 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3924 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3925 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3926 	}
3927 	SCTP_TCB_LOCK_ASSERT(stcb);
3928 #ifdef SCTP_ASOCLOG_OF_TSNS
3929 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3930 	stcb->asoc.cumack_log_at++;
3931 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3932 		stcb->asoc.cumack_log_at = 0;
3933 	}
3934 #endif
3935 	asoc = &stcb->asoc;
3936 	old_rwnd = asoc->peers_rwnd;
3937 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3938 		/* old ack */
3939 		return;
3940 	} else if (asoc->last_acked_seq == cumack) {
3941 		/* Window update sack */
3942 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3943 		    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3944 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3945 			/* SWS sender side engages */
3946 			asoc->peers_rwnd = 0;
3947 		}
3948 		if (asoc->peers_rwnd > old_rwnd) {
3949 			goto again;
3950 		}
3951 		return;
3952 	}
3953 
3954 	/* First setup for CC stuff */
3955 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3956 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3957 			/* Drag along the window_tsn for cwr's */
3958 			net->cwr_window_tsn = cumack;
3959 		}
3960 		net->prev_cwnd = net->cwnd;
3961 		net->net_ack = 0;
3962 		net->net_ack2 = 0;
3963 
3964 		/*
3965 		 * CMT: Reset CUC and Fast recovery algo variables before
3966 		 * SACK processing
3967 		 */
3968 		net->new_pseudo_cumack = 0;
3969 		net->will_exit_fast_recovery = 0;
3970 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3971 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3972 		}
3973 	}
3974 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3975 		tp1 = TAILQ_LAST(&asoc->sent_queue,
3976 		    sctpchunk_listhead);
3977 		send_s = tp1->rec.data.tsn + 1;
3978 	} else {
3979 		send_s = asoc->sending_seq;
3980 	}
3981 	if (SCTP_TSN_GE(cumack, send_s)) {
3982 		struct mbuf *op_err;
3983 		char msg[SCTP_DIAG_INFO_LEN];
3984 
3985 		*abort_now = 1;
3986 		/* XXX */
3987 		SCTP_SNPRINTF(msg, sizeof(msg),
3988 		    "Cum ack %8.8x greater or equal than TSN %8.8x",
3989 		    cumack, send_s);
3990 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3991 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3992 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
3993 		return;
3994 	}
3995 	asoc->this_sack_highest_gap = cumack;
3996 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3997 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3998 		    stcb->asoc.overall_error_count,
3999 		    0,
4000 		    SCTP_FROM_SCTP_INDATA,
4001 		    __LINE__);
4002 	}
4003 	stcb->asoc.overall_error_count = 0;
4004 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4005 		/* process the new consecutive TSN first */
4006 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4007 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4008 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4009 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
4010 				}
4011 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4012 					/*
4013 					 * If it is less than ACKED, it is
4014 					 * now no-longer in flight. Higher
4015 					 * values may occur during marking
4016 					 */
4017 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4018 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4019 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4020 							    tp1->whoTo->flight_size,
4021 							    tp1->book_size,
4022 							    (uint32_t)(uintptr_t)tp1->whoTo,
4023 							    tp1->rec.data.tsn);
4024 						}
4025 						sctp_flight_size_decrease(tp1);
4026 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4027 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4028 							    tp1);
4029 						}
4030 						/* sa_ignore NO_NULL_CHK */
4031 						sctp_total_flight_decrease(stcb, tp1);
4032 					}
4033 					tp1->whoTo->net_ack += tp1->send_size;
4034 					if (tp1->snd_count < 2) {
4035 						/*
4036 						 * True non-retransmitted
4037 						 * chunk
4038 						 */
4039 						tp1->whoTo->net_ack2 +=
4040 						    tp1->send_size;
4041 
4042 						/* update RTO too? */
4043 						if (tp1->do_rtt) {
4044 							if (rto_ok &&
4045 							    sctp_calculate_rto(stcb,
4046 							    &stcb->asoc,
4047 							    tp1->whoTo,
4048 							    &tp1->sent_rcv_time,
4049 							    SCTP_RTT_FROM_DATA)) {
4050 								rto_ok = 0;
4051 							}
4052 							if (tp1->whoTo->rto_needed == 0) {
4053 								tp1->whoTo->rto_needed = 1;
4054 							}
4055 							tp1->do_rtt = 0;
4056 						}
4057 					}
4058 					/*
4059 					 * CMT: CUCv2 algorithm. From the
4060 					 * cumack'd TSNs, for each TSN being
4061 					 * acked for the first time, set the
4062 					 * following variables for the
4063 					 * corresp destination.
4064 					 * new_pseudo_cumack will trigger a
4065 					 * cwnd update.
4066 					 * find_(rtx_)pseudo_cumack will
4067 					 * trigger search for the next
4068 					 * expected (rtx-)pseudo-cumack.
4069 					 */
4070 					tp1->whoTo->new_pseudo_cumack = 1;
4071 					tp1->whoTo->find_pseudo_cumack = 1;
4072 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4073 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4074 						/* sa_ignore NO_NULL_CHK */
4075 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4076 					}
4077 				}
4078 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4079 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4080 				}
4081 				if (tp1->rec.data.chunk_was_revoked) {
4082 					/* deflate the cwnd */
4083 					tp1->whoTo->cwnd -= tp1->book_size;
4084 					tp1->rec.data.chunk_was_revoked = 0;
4085 				}
4086 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4087 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4088 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4089 #ifdef INVARIANTS
4090 					} else {
4091 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4092 #endif
4093 					}
4094 				}
4095 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4096 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4097 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4098 					asoc->trigger_reset = 1;
4099 				}
4100 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4101 				if (tp1->data) {
4102 					/* sa_ignore NO_NULL_CHK */
4103 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4104 					sctp_m_freem(tp1->data);
4105 					tp1->data = NULL;
4106 				}
4107 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4108 					sctp_log_sack(asoc->last_acked_seq,
4109 					    cumack,
4110 					    tp1->rec.data.tsn,
4111 					    0,
4112 					    0,
4113 					    SCTP_LOG_FREE_SENT);
4114 				}
4115 				asoc->sent_queue_cnt--;
4116 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4117 			} else {
4118 				break;
4119 			}
4120 		}
4121 	}
4122 	/* sa_ignore NO_NULL_CHK */
4123 	if (stcb->sctp_socket) {
4124 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4125 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4126 			/* sa_ignore NO_NULL_CHK */
4127 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4128 		}
4129 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4130 	} else {
4131 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4132 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4133 		}
4134 	}
4135 
4136 	/* JRS - Use the congestion control given in the CC module */
4137 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4138 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4139 			if (net->net_ack2 > 0) {
4140 				/*
4141 				 * Karn's rule applies to clearing error
4142 				 * count, this is optional.
4143 				 */
4144 				net->error_count = 0;
4145 				if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
4146 					/* addr came good */
4147 					net->dest_state |= SCTP_ADDR_REACHABLE;
4148 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4149 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4150 				}
4151 				if (net == stcb->asoc.primary_destination) {
4152 					if (stcb->asoc.alternate) {
4153 						/*
4154 						 * release the alternate,
4155 						 * primary is good
4156 						 */
4157 						sctp_free_remote_addr(stcb->asoc.alternate);
4158 						stcb->asoc.alternate = NULL;
4159 					}
4160 				}
4161 				if (net->dest_state & SCTP_ADDR_PF) {
4162 					net->dest_state &= ~SCTP_ADDR_PF;
4163 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4164 					    stcb->sctp_ep, stcb, net,
4165 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4166 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4167 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4168 					/* Done with this net */
4169 					net->net_ack = 0;
4170 				}
4171 				/* restore any doubled timers */
4172 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4173 				if (net->RTO < stcb->asoc.minrto) {
4174 					net->RTO = stcb->asoc.minrto;
4175 				}
4176 				if (net->RTO > stcb->asoc.maxrto) {
4177 					net->RTO = stcb->asoc.maxrto;
4178 				}
4179 			}
4180 		}
4181 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4182 	}
4183 	asoc->last_acked_seq = cumack;
4184 
4185 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4186 		/* nothing left in-flight */
4187 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4188 			net->flight_size = 0;
4189 			net->partial_bytes_acked = 0;
4190 		}
4191 		asoc->total_flight = 0;
4192 		asoc->total_flight_count = 0;
4193 	}
4194 
4195 	/* RWND update */
4196 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4197 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4198 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4199 		/* SWS sender side engages */
4200 		asoc->peers_rwnd = 0;
4201 	}
4202 	if (asoc->peers_rwnd > old_rwnd) {
4203 		win_probe_recovery = 1;
4204 	}
4205 	/* Now assure a timer where data is queued at */
4206 again:
4207 	j = 0;
4208 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4209 		if (win_probe_recovery && (net->window_probe)) {
4210 			win_probe_recovered = 1;
4211 			/*
4212 			 * Find first chunk that was used with window probe
4213 			 * and clear the sent
4214 			 */
4215 			/* sa_ignore FREED_MEMORY */
4216 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4217 				if (tp1->window_probe) {
4218 					/* move back to data send queue */
4219 					sctp_window_probe_recovery(stcb, asoc, tp1);
4220 					break;
4221 				}
4222 			}
4223 		}
4224 		if (net->flight_size) {
4225 			j++;
4226 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4227 			if (net->window_probe) {
4228 				net->window_probe = 0;
4229 			}
4230 		} else {
4231 			if (net->window_probe) {
4232 				/*
4233 				 * In window probes we must assure a timer
4234 				 * is still running there
4235 				 */
4236 				net->window_probe = 0;
4237 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4238 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4239 				}
4240 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4241 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4242 				    stcb, net,
4243 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4244 			}
4245 		}
4246 	}
4247 	if ((j == 0) &&
4248 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4249 	    (asoc->sent_queue_retran_cnt == 0) &&
4250 	    (win_probe_recovered == 0) &&
4251 	    (done_once == 0)) {
4252 		/*
4253 		 * huh, this should not happen unless all packets are
4254 		 * PR-SCTP and marked to skip of course.
4255 		 */
4256 		if (sctp_fs_audit(asoc)) {
4257 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4258 				net->flight_size = 0;
4259 			}
4260 			asoc->total_flight = 0;
4261 			asoc->total_flight_count = 0;
4262 			asoc->sent_queue_retran_cnt = 0;
4263 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4264 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4265 					sctp_flight_size_increase(tp1);
4266 					sctp_total_flight_increase(stcb, tp1);
4267 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4268 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4269 				}
4270 			}
4271 		}
4272 		done_once = 1;
4273 		goto again;
4274 	}
4275 	/**********************************/
4276 	/* Now what about shutdown issues */
4277 	/**********************************/
4278 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4279 		/* nothing left on sendqueue.. consider done */
4280 		/* clean up */
4281 		if ((asoc->stream_queue_cnt == 1) &&
4282 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4283 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4284 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4285 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4286 		}
4287 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4288 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4289 		    (asoc->stream_queue_cnt == 1) &&
4290 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4291 			struct mbuf *op_err;
4292 
4293 			*abort_now = 1;
4294 			/* XXX */
4295 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4296 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4297 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4298 			return;
4299 		}
4300 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4301 		    (asoc->stream_queue_cnt == 0)) {
4302 			struct sctp_nets *netp;
4303 
4304 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4305 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4306 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4307 			}
4308 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4309 			sctp_stop_timers_for_shutdown(stcb);
4310 			if (asoc->alternate) {
4311 				netp = asoc->alternate;
4312 			} else {
4313 				netp = asoc->primary_destination;
4314 			}
4315 			sctp_send_shutdown(stcb, netp);
4316 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4317 			    stcb->sctp_ep, stcb, netp);
4318 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4319 			    stcb->sctp_ep, stcb, NULL);
4320 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4321 		    (asoc->stream_queue_cnt == 0)) {
4322 			struct sctp_nets *netp;
4323 
4324 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4325 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4326 			sctp_stop_timers_for_shutdown(stcb);
4327 			if (asoc->alternate) {
4328 				netp = asoc->alternate;
4329 			} else {
4330 				netp = asoc->primary_destination;
4331 			}
4332 			sctp_send_shutdown_ack(stcb, netp);
4333 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4334 			    stcb->sctp_ep, stcb, netp);
4335 		}
4336 	}
4337 	/*********************************************/
4338 	/* Here we perform PR-SCTP procedures        */
4339 	/* (section 4.2)                             */
4340 	/*********************************************/
4341 	/* C1. update advancedPeerAckPoint */
4342 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4343 		asoc->advanced_peer_ack_point = cumack;
4344 	}
4345 	/* PR-Sctp issues need to be addressed too */
4346 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4347 		struct sctp_tmit_chunk *lchk;
4348 		uint32_t old_adv_peer_ack_point;
4349 
4350 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4351 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4352 		/* C3. See if we need to send a Fwd-TSN */
4353 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4354 			/*
4355 			 * ISSUE with ECN, see FWD-TSN processing.
4356 			 */
4357 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4358 				send_forward_tsn(stcb, asoc);
4359 			} else if (lchk) {
4360 				/* try to FR fwd-tsn's that get lost too */
4361 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4362 					send_forward_tsn(stcb, asoc);
4363 				}
4364 			}
4365 		}
4366 		for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4367 			if (lchk->whoTo != NULL) {
4368 				break;
4369 			}
4370 		}
4371 		if (lchk != NULL) {
4372 			/* Assure a timer is up */
4373 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4374 			    stcb->sctp_ep, stcb, lchk->whoTo);
4375 		}
4376 	}
4377 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4378 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4379 		    rwnd,
4380 		    stcb->asoc.peers_rwnd,
4381 		    stcb->asoc.total_flight,
4382 		    stcb->asoc.total_output_queue_size);
4383 	}
4384 }
4385 
4386 void
sctp_handle_sack(struct mbuf * m,int offset_seg,int offset_dup,struct sctp_tcb * stcb,uint16_t num_seg,uint16_t num_nr_seg,uint16_t num_dup,int * abort_now,uint8_t flags,uint32_t cum_ack,uint32_t rwnd,int ecne_seen)4387 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4388     struct sctp_tcb *stcb,
4389     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4390     int *abort_now, uint8_t flags,
4391     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4392 {
4393 	struct sctp_association *asoc;
4394 	struct sctp_tmit_chunk *tp1, *tp2;
4395 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4396 	uint16_t wake_him = 0;
4397 	uint32_t send_s = 0;
4398 	long j;
4399 	int accum_moved = 0;
4400 	int will_exit_fast_recovery = 0;
4401 	uint32_t a_rwnd, old_rwnd;
4402 	int win_probe_recovery = 0;
4403 	int win_probe_recovered = 0;
4404 	struct sctp_nets *net = NULL;
4405 	int done_once;
4406 	int rto_ok = 1;
4407 	uint8_t reneged_all = 0;
4408 	uint8_t cmt_dac_flag;
4409 
4410 	/*
4411 	 * we take any chance we can to service our queues since we cannot
4412 	 * get awoken when the socket is read from :<
4413 	 */
4414 	/*
4415 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4416 	 * old sack, if so discard. 2) If there is nothing left in the send
4417 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4418 	 * too, update any rwnd change and verify no timers are running.
4419 	 * then return. 3) Process any new consecutive data i.e. cum-ack
4420 	 * moved process these first and note that it moved. 4) Process any
4421 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4422 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4423 	 * sync up flightsizes and things, stop all timers and also check
4424 	 * for shutdown_pending state. If so then go ahead and send off the
4425 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4426 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4427 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4428 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4429 	 * if in shutdown_recv state.
4430 	 */
4431 	SCTP_TCB_LOCK_ASSERT(stcb);
4432 	/* CMT DAC algo */
4433 	this_sack_lowest_newack = 0;
4434 	SCTP_STAT_INCR(sctps_slowpath_sack);
4435 	last_tsn = cum_ack;
4436 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4437 #ifdef SCTP_ASOCLOG_OF_TSNS
4438 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4439 	stcb->asoc.cumack_log_at++;
4440 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4441 		stcb->asoc.cumack_log_at = 0;
4442 	}
4443 #endif
4444 	a_rwnd = rwnd;
4445 
4446 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4447 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4448 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4449 	}
4450 
4451 	old_rwnd = stcb->asoc.peers_rwnd;
4452 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4453 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4454 		    stcb->asoc.overall_error_count,
4455 		    0,
4456 		    SCTP_FROM_SCTP_INDATA,
4457 		    __LINE__);
4458 	}
4459 	stcb->asoc.overall_error_count = 0;
4460 	asoc = &stcb->asoc;
4461 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4462 		sctp_log_sack(asoc->last_acked_seq,
4463 		    cum_ack,
4464 		    0,
4465 		    num_seg,
4466 		    num_dup,
4467 		    SCTP_LOG_NEW_SACK);
4468 	}
4469 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4470 		uint16_t i;
4471 		uint32_t *dupdata, dblock;
4472 
4473 		for (i = 0; i < num_dup; i++) {
4474 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4475 			    sizeof(uint32_t), (uint8_t *)&dblock);
4476 			if (dupdata == NULL) {
4477 				break;
4478 			}
4479 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4480 		}
4481 	}
4482 	/* reality check */
4483 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4484 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4485 		    sctpchunk_listhead);
4486 		send_s = tp1->rec.data.tsn + 1;
4487 	} else {
4488 		tp1 = NULL;
4489 		send_s = asoc->sending_seq;
4490 	}
4491 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4492 		struct mbuf *op_err;
4493 		char msg[SCTP_DIAG_INFO_LEN];
4494 
4495 		/*
4496 		 * no way, we have not even sent this TSN out yet. Peer is
4497 		 * hopelessly messed up with us.
4498 		 */
4499 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4500 		    cum_ack, send_s);
4501 		if (tp1) {
4502 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4503 			    tp1->rec.data.tsn, (void *)tp1);
4504 		}
4505 hopeless_peer:
4506 		*abort_now = 1;
4507 		/* XXX */
4508 		SCTP_SNPRINTF(msg, sizeof(msg),
4509 		    "Cum ack %8.8x greater or equal than TSN %8.8x",
4510 		    cum_ack, send_s);
4511 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4512 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29;
4513 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4514 		return;
4515 	}
4516 	/**********************/
4517 	/* 1) check the range */
4518 	/**********************/
4519 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4520 		/* acking something behind */
4521 		return;
4522 	}
4523 
4524 	/* update the Rwnd of the peer */
4525 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4526 	    TAILQ_EMPTY(&asoc->send_queue) &&
4527 	    (asoc->stream_queue_cnt == 0)) {
4528 		/* nothing left on send/sent and strmq */
4529 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4530 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4531 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4532 		}
4533 		asoc->peers_rwnd = a_rwnd;
4534 		if (asoc->sent_queue_retran_cnt) {
4535 			asoc->sent_queue_retran_cnt = 0;
4536 		}
4537 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4538 			/* SWS sender side engages */
4539 			asoc->peers_rwnd = 0;
4540 		}
4541 		/* stop any timers */
4542 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4543 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4544 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4545 			net->partial_bytes_acked = 0;
4546 			net->flight_size = 0;
4547 		}
4548 		asoc->total_flight = 0;
4549 		asoc->total_flight_count = 0;
4550 		return;
4551 	}
4552 	/*
4553 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4554 	 * things. The total byte count acked is tracked in netAckSz AND
4555 	 * netAck2 is used to track the total bytes acked that are un-
4556 	 * ambiguous and were never retransmitted. We track these on a per
4557 	 * destination address basis.
4558 	 */
4559 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4560 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4561 			/* Drag along the window_tsn for cwr's */
4562 			net->cwr_window_tsn = cum_ack;
4563 		}
4564 		net->prev_cwnd = net->cwnd;
4565 		net->net_ack = 0;
4566 		net->net_ack2 = 0;
4567 
4568 		/*
4569 		 * CMT: Reset CUC and Fast recovery algo variables before
4570 		 * SACK processing
4571 		 */
4572 		net->new_pseudo_cumack = 0;
4573 		net->will_exit_fast_recovery = 0;
4574 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4575 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4576 		}
4577 
4578 		/*
4579 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4580 		 * to be greater than the cumack. Also reset saw_newack to 0
4581 		 * for all dests.
4582 		 */
4583 		net->saw_newack = 0;
4584 		net->this_sack_highest_newack = last_tsn;
4585 	}
4586 	/* process the new consecutive TSN first */
4587 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4588 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4589 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4590 				accum_moved = 1;
4591 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4592 					/*
4593 					 * If it is less than ACKED, it is
4594 					 * now no-longer in flight. Higher
4595 					 * values may occur during marking
4596 					 */
4597 					if ((tp1->whoTo->dest_state &
4598 					    SCTP_ADDR_UNCONFIRMED) &&
4599 					    (tp1->snd_count < 2)) {
4600 						/*
4601 						 * If there was no retran
4602 						 * and the address is
4603 						 * un-confirmed and we sent
4604 						 * there and are now
4605 						 * sacked.. its confirmed,
4606 						 * mark it so.
4607 						 */
4608 						tp1->whoTo->dest_state &=
4609 						    ~SCTP_ADDR_UNCONFIRMED;
4610 					}
4611 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4612 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4613 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4614 							    tp1->whoTo->flight_size,
4615 							    tp1->book_size,
4616 							    (uint32_t)(uintptr_t)tp1->whoTo,
4617 							    tp1->rec.data.tsn);
4618 						}
4619 						sctp_flight_size_decrease(tp1);
4620 						sctp_total_flight_decrease(stcb, tp1);
4621 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4622 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4623 							    tp1);
4624 						}
4625 					}
4626 					tp1->whoTo->net_ack += tp1->send_size;
4627 
4628 					/* CMT SFR and DAC algos */
4629 					this_sack_lowest_newack = tp1->rec.data.tsn;
4630 					tp1->whoTo->saw_newack = 1;
4631 
4632 					if (tp1->snd_count < 2) {
4633 						/*
4634 						 * True non-retransmitted
4635 						 * chunk
4636 						 */
4637 						tp1->whoTo->net_ack2 +=
4638 						    tp1->send_size;
4639 
4640 						/* update RTO too? */
4641 						if (tp1->do_rtt) {
4642 							if (rto_ok &&
4643 							    sctp_calculate_rto(stcb,
4644 							    &stcb->asoc,
4645 							    tp1->whoTo,
4646 							    &tp1->sent_rcv_time,
4647 							    SCTP_RTT_FROM_DATA)) {
4648 								rto_ok = 0;
4649 							}
4650 							if (tp1->whoTo->rto_needed == 0) {
4651 								tp1->whoTo->rto_needed = 1;
4652 							}
4653 							tp1->do_rtt = 0;
4654 						}
4655 					}
4656 					/*
4657 					 * CMT: CUCv2 algorithm. From the
4658 					 * cumack'd TSNs, for each TSN being
4659 					 * acked for the first time, set the
4660 					 * following variables for the
4661 					 * corresp destination.
4662 					 * new_pseudo_cumack will trigger a
4663 					 * cwnd update.
4664 					 * find_(rtx_)pseudo_cumack will
4665 					 * trigger search for the next
4666 					 * expected (rtx-)pseudo-cumack.
4667 					 */
4668 					tp1->whoTo->new_pseudo_cumack = 1;
4669 					tp1->whoTo->find_pseudo_cumack = 1;
4670 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4671 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4672 						sctp_log_sack(asoc->last_acked_seq,
4673 						    cum_ack,
4674 						    tp1->rec.data.tsn,
4675 						    0,
4676 						    0,
4677 						    SCTP_LOG_TSN_ACKED);
4678 					}
4679 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4680 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4681 					}
4682 				}
4683 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4684 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4685 #ifdef SCTP_AUDITING_ENABLED
4686 					sctp_audit_log(0xB3,
4687 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4688 #endif
4689 				}
4690 				if (tp1->rec.data.chunk_was_revoked) {
4691 					/* deflate the cwnd */
4692 					tp1->whoTo->cwnd -= tp1->book_size;
4693 					tp1->rec.data.chunk_was_revoked = 0;
4694 				}
4695 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4696 					tp1->sent = SCTP_DATAGRAM_ACKED;
4697 				}
4698 			}
4699 		} else {
4700 			break;
4701 		}
4702 	}
4703 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4704 	/* always set this up to cum-ack */
4705 	asoc->this_sack_highest_gap = last_tsn;
4706 
4707 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4708 		/*
4709 		 * thisSackHighestGap will increase while handling NEW
4710 		 * segments this_sack_highest_newack will increase while
4711 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4712 		 * used for CMT DAC algo. saw_newack will also change.
4713 		 */
4714 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4715 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4716 		    num_seg, num_nr_seg, &rto_ok)) {
4717 			wake_him++;
4718 		}
4719 		/*
4720 		 * validate the biggest_tsn_acked in the gap acks if strict
4721 		 * adherence is wanted.
4722 		 */
4723 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4724 			/*
4725 			 * peer is either confused or we are under attack.
4726 			 * We must abort.
4727 			 */
4728 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4729 			    biggest_tsn_acked, send_s);
4730 			goto hopeless_peer;
4731 		}
4732 	}
4733 	/*******************************************/
4734 	/* cancel ALL T3-send timer if accum moved */
4735 	/*******************************************/
4736 	if (asoc->sctp_cmt_on_off > 0) {
4737 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4738 			if (net->new_pseudo_cumack)
4739 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4740 				    stcb, net,
4741 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4742 		}
4743 	} else {
4744 		if (accum_moved) {
4745 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4746 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4747 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4748 			}
4749 		}
4750 	}
4751 	/********************************************/
4752 	/* drop the acked chunks from the sentqueue */
4753 	/********************************************/
4754 	asoc->last_acked_seq = cum_ack;
4755 
4756 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4757 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4758 			break;
4759 		}
4760 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4761 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4762 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4763 #ifdef INVARIANTS
4764 			} else {
4765 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4766 #endif
4767 			}
4768 		}
4769 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4770 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4771 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4772 			asoc->trigger_reset = 1;
4773 		}
4774 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4775 		if (PR_SCTP_ENABLED(tp1->flags)) {
4776 			if (asoc->pr_sctp_cnt != 0)
4777 				asoc->pr_sctp_cnt--;
4778 		}
4779 		asoc->sent_queue_cnt--;
4780 		if (tp1->data) {
4781 			/* sa_ignore NO_NULL_CHK */
4782 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4783 			sctp_m_freem(tp1->data);
4784 			tp1->data = NULL;
4785 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4786 				asoc->sent_queue_cnt_removeable--;
4787 			}
4788 		}
4789 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4790 			sctp_log_sack(asoc->last_acked_seq,
4791 			    cum_ack,
4792 			    tp1->rec.data.tsn,
4793 			    0,
4794 			    0,
4795 			    SCTP_LOG_FREE_SENT);
4796 		}
4797 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4798 		wake_him++;
4799 	}
4800 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4801 #ifdef INVARIANTS
4802 		panic("Warning flight size is positive and should be 0");
4803 #else
4804 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4805 		    asoc->total_flight);
4806 #endif
4807 		asoc->total_flight = 0;
4808 	}
4809 
4810 	/* sa_ignore NO_NULL_CHK */
4811 	if ((wake_him) && (stcb->sctp_socket)) {
4812 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4813 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4814 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4815 		}
4816 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4817 	} else {
4818 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4819 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4820 		}
4821 	}
4822 
4823 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4824 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4825 			/* Setup so we will exit RFC2582 fast recovery */
4826 			will_exit_fast_recovery = 1;
4827 		}
4828 	}
4829 	/*
4830 	 * Check for revoked fragments:
4831 	 *
4832 	 * if Previous sack - Had no frags then we can't have any revoked if
4833 	 * Previous sack - Had frag's then - If we now have frags aka
4834 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4835 	 * some of them. else - The peer revoked all ACKED fragments, since
4836 	 * we had some before and now we have NONE.
4837 	 */
4838 
4839 	if (num_seg) {
4840 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4841 		asoc->saw_sack_with_frags = 1;
4842 	} else if (asoc->saw_sack_with_frags) {
4843 		int cnt_revoked = 0;
4844 
4845 		/* Peer revoked all dg's marked or acked */
4846 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4847 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4848 				tp1->sent = SCTP_DATAGRAM_SENT;
4849 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4850 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4851 					    tp1->whoTo->flight_size,
4852 					    tp1->book_size,
4853 					    (uint32_t)(uintptr_t)tp1->whoTo,
4854 					    tp1->rec.data.tsn);
4855 				}
4856 				sctp_flight_size_increase(tp1);
4857 				sctp_total_flight_increase(stcb, tp1);
4858 				tp1->rec.data.chunk_was_revoked = 1;
4859 				/*
4860 				 * To ensure that this increase in
4861 				 * flightsize, which is artificial, does not
4862 				 * throttle the sender, we also increase the
4863 				 * cwnd artificially.
4864 				 */
4865 				tp1->whoTo->cwnd += tp1->book_size;
4866 				cnt_revoked++;
4867 			}
4868 		}
4869 		if (cnt_revoked) {
4870 			reneged_all = 1;
4871 		}
4872 		asoc->saw_sack_with_frags = 0;
4873 	}
4874 	if (num_nr_seg > 0)
4875 		asoc->saw_sack_with_nr_frags = 1;
4876 	else
4877 		asoc->saw_sack_with_nr_frags = 0;
4878 
4879 	/* JRS - Use the congestion control given in the CC module */
4880 	if (ecne_seen == 0) {
4881 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4882 			if (net->net_ack2 > 0) {
4883 				/*
4884 				 * Karn's rule applies to clearing error
4885 				 * count, this is optional.
4886 				 */
4887 				net->error_count = 0;
4888 				if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
4889 					/* addr came good */
4890 					net->dest_state |= SCTP_ADDR_REACHABLE;
4891 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4892 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4893 				}
4894 
4895 				if (net == stcb->asoc.primary_destination) {
4896 					if (stcb->asoc.alternate) {
4897 						/*
4898 						 * release the alternate,
4899 						 * primary is good
4900 						 */
4901 						sctp_free_remote_addr(stcb->asoc.alternate);
4902 						stcb->asoc.alternate = NULL;
4903 					}
4904 				}
4905 
4906 				if (net->dest_state & SCTP_ADDR_PF) {
4907 					net->dest_state &= ~SCTP_ADDR_PF;
4908 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4909 					    stcb->sctp_ep, stcb, net,
4910 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
4911 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4912 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4913 					/* Done with this net */
4914 					net->net_ack = 0;
4915 				}
4916 				/* restore any doubled timers */
4917 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4918 				if (net->RTO < stcb->asoc.minrto) {
4919 					net->RTO = stcb->asoc.minrto;
4920 				}
4921 				if (net->RTO > stcb->asoc.maxrto) {
4922 					net->RTO = stcb->asoc.maxrto;
4923 				}
4924 			}
4925 		}
4926 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4927 	}
4928 
4929 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4930 		/* nothing left in-flight */
4931 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4932 			/* stop all timers */
4933 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4934 			    stcb, net,
4935 			    SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
4936 			net->flight_size = 0;
4937 			net->partial_bytes_acked = 0;
4938 		}
4939 		asoc->total_flight = 0;
4940 		asoc->total_flight_count = 0;
4941 	}
4942 
4943 	/**********************************/
4944 	/* Now what about shutdown issues */
4945 	/**********************************/
4946 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4947 		/* nothing left on sendqueue.. consider done */
4948 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4949 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4950 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4951 		}
4952 		asoc->peers_rwnd = a_rwnd;
4953 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4954 			/* SWS sender side engages */
4955 			asoc->peers_rwnd = 0;
4956 		}
4957 		/* clean up */
4958 		if ((asoc->stream_queue_cnt == 1) &&
4959 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4960 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4961 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4962 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4963 		}
4964 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4965 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4966 		    (asoc->stream_queue_cnt == 1) &&
4967 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4968 			struct mbuf *op_err;
4969 
4970 			*abort_now = 1;
4971 			/* XXX */
4972 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4973 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
4974 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4975 			return;
4976 		}
4977 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4978 		    (asoc->stream_queue_cnt == 0)) {
4979 			struct sctp_nets *netp;
4980 
4981 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4982 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4983 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4984 			}
4985 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4986 			sctp_stop_timers_for_shutdown(stcb);
4987 			if (asoc->alternate) {
4988 				netp = asoc->alternate;
4989 			} else {
4990 				netp = asoc->primary_destination;
4991 			}
4992 			sctp_send_shutdown(stcb, netp);
4993 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4994 			    stcb->sctp_ep, stcb, netp);
4995 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4996 			    stcb->sctp_ep, stcb, NULL);
4997 			return;
4998 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4999 		    (asoc->stream_queue_cnt == 0)) {
5000 			struct sctp_nets *netp;
5001 
5002 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5003 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5004 			sctp_stop_timers_for_shutdown(stcb);
5005 			if (asoc->alternate) {
5006 				netp = asoc->alternate;
5007 			} else {
5008 				netp = asoc->primary_destination;
5009 			}
5010 			sctp_send_shutdown_ack(stcb, netp);
5011 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5012 			    stcb->sctp_ep, stcb, netp);
5013 			return;
5014 		}
5015 	}
5016 	/*
5017 	 * Now here we are going to recycle net_ack for a different use...
5018 	 * HEADS UP.
5019 	 */
5020 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5021 		net->net_ack = 0;
5022 	}
5023 
5024 	/*
5025 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5026 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5027 	 * automatically ensure that.
5028 	 */
5029 	if ((asoc->sctp_cmt_on_off > 0) &&
5030 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5031 	    (cmt_dac_flag == 0)) {
5032 		this_sack_lowest_newack = cum_ack;
5033 	}
5034 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5035 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5036 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5037 	}
5038 	/* JRS - Use the congestion control given in the CC module */
5039 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5040 
5041 	/* Now are we exiting loss recovery ? */
5042 	if (will_exit_fast_recovery) {
5043 		/* Ok, we must exit fast recovery */
5044 		asoc->fast_retran_loss_recovery = 0;
5045 	}
5046 	if ((asoc->sat_t3_loss_recovery) &&
5047 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5048 		/* end satellite t3 loss recovery */
5049 		asoc->sat_t3_loss_recovery = 0;
5050 	}
5051 	/*
5052 	 * CMT Fast recovery
5053 	 */
5054 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5055 		if (net->will_exit_fast_recovery) {
5056 			/* Ok, we must exit fast recovery */
5057 			net->fast_retran_loss_recovery = 0;
5058 		}
5059 	}
5060 
5061 	/* Adjust and set the new rwnd value */
5062 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5063 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5064 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5065 	}
5066 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5067 	    (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5068 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5069 		/* SWS sender side engages */
5070 		asoc->peers_rwnd = 0;
5071 	}
5072 	if (asoc->peers_rwnd > old_rwnd) {
5073 		win_probe_recovery = 1;
5074 	}
5075 
5076 	/*
5077 	 * Now we must setup so we have a timer up for anyone with
5078 	 * outstanding data.
5079 	 */
5080 	done_once = 0;
5081 again:
5082 	j = 0;
5083 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5084 		if (win_probe_recovery && (net->window_probe)) {
5085 			win_probe_recovered = 1;
5086 			/*-
5087 			 * Find first chunk that was used with
5088 			 * window probe and clear the event. Put
5089 			 * it back into the send queue as if has
5090 			 * not been sent.
5091 			 */
5092 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5093 				if (tp1->window_probe) {
5094 					sctp_window_probe_recovery(stcb, asoc, tp1);
5095 					break;
5096 				}
5097 			}
5098 		}
5099 		if (net->flight_size) {
5100 			j++;
5101 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5102 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5103 				    stcb->sctp_ep, stcb, net);
5104 			}
5105 			if (net->window_probe) {
5106 				net->window_probe = 0;
5107 			}
5108 		} else {
5109 			if (net->window_probe) {
5110 				/*
5111 				 * In window probes we must assure a timer
5112 				 * is still running there
5113 				 */
5114 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5115 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5116 					    stcb->sctp_ep, stcb, net);
5117 				}
5118 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5119 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5120 				    stcb, net,
5121 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_36);
5122 			}
5123 		}
5124 	}
5125 	if ((j == 0) &&
5126 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5127 	    (asoc->sent_queue_retran_cnt == 0) &&
5128 	    (win_probe_recovered == 0) &&
5129 	    (done_once == 0)) {
5130 		/*
5131 		 * huh, this should not happen unless all packets are
5132 		 * PR-SCTP and marked to skip of course.
5133 		 */
5134 		if (sctp_fs_audit(asoc)) {
5135 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5136 				net->flight_size = 0;
5137 			}
5138 			asoc->total_flight = 0;
5139 			asoc->total_flight_count = 0;
5140 			asoc->sent_queue_retran_cnt = 0;
5141 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5142 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5143 					sctp_flight_size_increase(tp1);
5144 					sctp_total_flight_increase(stcb, tp1);
5145 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5146 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5147 				}
5148 			}
5149 		}
5150 		done_once = 1;
5151 		goto again;
5152 	}
5153 	/*********************************************/
5154 	/* Here we perform PR-SCTP procedures        */
5155 	/* (section 4.2)                             */
5156 	/*********************************************/
5157 	/* C1. update advancedPeerAckPoint */
5158 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5159 		asoc->advanced_peer_ack_point = cum_ack;
5160 	}
5161 	/* C2. try to further move advancedPeerAckPoint ahead */
5162 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5163 		struct sctp_tmit_chunk *lchk;
5164 		uint32_t old_adv_peer_ack_point;
5165 
5166 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5167 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5168 		/* C3. See if we need to send a Fwd-TSN */
5169 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5170 			/*
5171 			 * ISSUE with ECN, see FWD-TSN processing.
5172 			 */
5173 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5174 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5175 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
5176 				    old_adv_peer_ack_point);
5177 			}
5178 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5179 				send_forward_tsn(stcb, asoc);
5180 			} else if (lchk) {
5181 				/* try to FR fwd-tsn's that get lost too */
5182 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5183 					send_forward_tsn(stcb, asoc);
5184 				}
5185 			}
5186 		}
5187 		for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5188 			if (lchk->whoTo != NULL) {
5189 				break;
5190 			}
5191 		}
5192 		if (lchk != NULL) {
5193 			/* Assure a timer is up */
5194 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5195 			    stcb->sctp_ep, stcb, lchk->whoTo);
5196 		}
5197 	}
5198 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5199 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5200 		    a_rwnd,
5201 		    stcb->asoc.peers_rwnd,
5202 		    stcb->asoc.total_flight,
5203 		    stcb->asoc.total_output_queue_size);
5204 	}
5205 }
5206 
5207 void
sctp_update_acked(struct sctp_tcb * stcb,struct sctp_shutdown_chunk * cp,int * abort_flag)5208 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5209 {
5210 	/* Copy cum-ack */
5211 	uint32_t cum_ack, a_rwnd;
5212 
5213 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5214 	/* Arrange so a_rwnd does NOT change */
5215 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5216 
5217 	/* Now call the express sack handling */
5218 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5219 }
5220 
5221 static void
sctp_kick_prsctp_reorder_queue(struct sctp_tcb * stcb,struct sctp_stream_in * strmin)5222 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5223     struct sctp_stream_in *strmin)
5224 {
5225 	struct sctp_queued_to_read *control, *ncontrol;
5226 	struct sctp_association *asoc;
5227 	uint32_t mid;
5228 	int need_reasm_check = 0;
5229 
5230 	KASSERT(stcb != NULL, ("stcb == NULL"));
5231 	SCTP_TCB_LOCK_ASSERT(stcb);
5232 	SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep);
5233 
5234 	asoc = &stcb->asoc;
5235 	mid = strmin->last_mid_delivered;
5236 	/*
5237 	 * First deliver anything prior to and including the stream no that
5238 	 * came in.
5239 	 */
5240 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5241 		if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5242 			/* this is deliverable now */
5243 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5244 				if (control->on_strm_q) {
5245 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5246 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5247 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5248 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5249 #ifdef INVARIANTS
5250 					} else {
5251 						panic("strmin: %p ctl: %p unknown %d",
5252 						    strmin, control, control->on_strm_q);
5253 #endif
5254 					}
5255 					control->on_strm_q = 0;
5256 				}
5257 				/* subtract pending on streams */
5258 				if (asoc->size_on_all_streams >= control->length) {
5259 					asoc->size_on_all_streams -= control->length;
5260 				} else {
5261 #ifdef INVARIANTS
5262 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5263 #else
5264 					asoc->size_on_all_streams = 0;
5265 #endif
5266 				}
5267 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5268 				/* deliver it to at least the delivery-q */
5269 				if (stcb->sctp_socket) {
5270 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5271 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
5272 					    &stcb->sctp_socket->so_rcv, 1,
5273 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5274 				}
5275 			} else {
5276 				/* Its a fragmented message */
5277 				if (control->first_frag_seen) {
5278 					/*
5279 					 * Make it so this is next to
5280 					 * deliver, we restore later
5281 					 */
5282 					strmin->last_mid_delivered = control->mid - 1;
5283 					need_reasm_check = 1;
5284 					break;
5285 				}
5286 			}
5287 		} else {
5288 			/* no more delivery now. */
5289 			break;
5290 		}
5291 	}
5292 	if (need_reasm_check) {
5293 		int ret;
5294 
5295 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5296 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5297 			/* Restore the next to deliver unless we are ahead */
5298 			strmin->last_mid_delivered = mid;
5299 		}
5300 		if (ret == 0) {
5301 			/* Left the front Partial one on */
5302 			return;
5303 		}
5304 		need_reasm_check = 0;
5305 	}
5306 	/*
5307 	 * now we must deliver things in queue the normal way  if any are
5308 	 * now ready.
5309 	 */
5310 	mid = strmin->last_mid_delivered + 1;
5311 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5312 		if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5313 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5314 				/* this is deliverable now */
5315 				if (control->on_strm_q) {
5316 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5317 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5318 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5319 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5320 #ifdef INVARIANTS
5321 					} else {
5322 						panic("strmin: %p ctl: %p unknown %d",
5323 						    strmin, control, control->on_strm_q);
5324 #endif
5325 					}
5326 					control->on_strm_q = 0;
5327 				}
5328 				/* subtract pending on streams */
5329 				if (asoc->size_on_all_streams >= control->length) {
5330 					asoc->size_on_all_streams -= control->length;
5331 				} else {
5332 #ifdef INVARIANTS
5333 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5334 #else
5335 					asoc->size_on_all_streams = 0;
5336 #endif
5337 				}
5338 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5339 				/* deliver it to at least the delivery-q */
5340 				strmin->last_mid_delivered = control->mid;
5341 				if (stcb->sctp_socket) {
5342 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5343 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
5344 					    &stcb->sctp_socket->so_rcv, 1,
5345 					    SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5346 				}
5347 				mid = strmin->last_mid_delivered + 1;
5348 			} else {
5349 				/* Its a fragmented message */
5350 				if (control->first_frag_seen) {
5351 					/*
5352 					 * Make it so this is next to
5353 					 * deliver
5354 					 */
5355 					strmin->last_mid_delivered = control->mid - 1;
5356 					need_reasm_check = 1;
5357 					break;
5358 				}
5359 			}
5360 		} else {
5361 			break;
5362 		}
5363 	}
5364 	if (need_reasm_check) {
5365 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5366 	}
5367 }
5368 
5369 static void
sctp_flush_reassm_for_str_seq(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_stream_in * strm,struct sctp_queued_to_read * control,int ordered,uint32_t cumtsn)5370 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5371     struct sctp_association *asoc, struct sctp_stream_in *strm,
5372     struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn)
5373 {
5374 	struct sctp_tmit_chunk *chk, *nchk;
5375 
5376 	/*
5377 	 * For now large messages held on the stream reasm that are complete
5378 	 * will be tossed too. We could in theory do more work to spin
5379 	 * through and stop after dumping one msg aka seeing the start of a
5380 	 * new msg at the head, and call the delivery function... to see if
5381 	 * it can be delivered... But for now we just dump everything on the
5382 	 * queue.
5383 	 */
5384 
5385 	KASSERT(stcb != NULL, ("stcb == NULL"));
5386 	SCTP_TCB_LOCK_ASSERT(stcb);
5387 	SCTP_INP_READ_LOCK_ASSERT(stcb->sctp_ep);
5388 
5389 	if (!asoc->idata_supported && !ordered &&
5390 	    control->first_frag_seen &&
5391 	    SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5392 		return;
5393 	}
5394 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5395 		/* Purge hanging chunks */
5396 		if (!asoc->idata_supported && !ordered) {
5397 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5398 				break;
5399 			}
5400 		}
5401 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5402 		if (asoc->size_on_reasm_queue >= chk->send_size) {
5403 			asoc->size_on_reasm_queue -= chk->send_size;
5404 		} else {
5405 #ifdef INVARIANTS
5406 			panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5407 #else
5408 			asoc->size_on_reasm_queue = 0;
5409 #endif
5410 		}
5411 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5412 		if (chk->data) {
5413 			sctp_m_freem(chk->data);
5414 			chk->data = NULL;
5415 		}
5416 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5417 	}
5418 	if (!TAILQ_EMPTY(&control->reasm)) {
5419 		KASSERT(!asoc->idata_supported,
5420 		    ("Reassembly queue not empty for I-DATA"));
5421 		KASSERT(!ordered,
5422 		    ("Reassembly queue not empty for ordered data"));
5423 		if (control->data) {
5424 			sctp_m_freem(control->data);
5425 			control->data = NULL;
5426 		}
5427 		control->fsn_included = 0xffffffff;
5428 		control->first_frag_seen = 0;
5429 		control->last_frag_seen = 0;
5430 		if (control->on_read_q) {
5431 			/*
5432 			 * We have to purge it from there, hopefully this
5433 			 * will work :-)
5434 			 */
5435 			TAILQ_REMOVE(&stcb->sctp_ep->read_queue, control, next);
5436 			control->on_read_q = 0;
5437 		}
5438 		chk = TAILQ_FIRST(&control->reasm);
5439 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5440 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5441 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5442 			    chk, SCTP_READ_LOCK_HELD);
5443 		}
5444 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5445 		return;
5446 	}
5447 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5448 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5449 		if (asoc->size_on_all_streams >= control->length) {
5450 			asoc->size_on_all_streams -= control->length;
5451 		} else {
5452 #ifdef INVARIANTS
5453 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5454 #else
5455 			asoc->size_on_all_streams = 0;
5456 #endif
5457 		}
5458 		sctp_ucount_decr(asoc->cnt_on_all_streams);
5459 		control->on_strm_q = 0;
5460 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5461 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5462 		control->on_strm_q = 0;
5463 #ifdef INVARIANTS
5464 	} else if (control->on_strm_q) {
5465 		panic("strm: %p ctl: %p unknown %d",
5466 		    strm, control, control->on_strm_q);
5467 #endif
5468 	}
5469 	control->on_strm_q = 0;
5470 	if (control->on_read_q == 0) {
5471 		sctp_free_remote_addr(control->whoFrom);
5472 		if (control->data) {
5473 			sctp_m_freem(control->data);
5474 			control->data = NULL;
5475 		}
5476 		sctp_free_a_readq(stcb, control);
5477 	}
5478 }
5479 
5480 void
sctp_handle_forward_tsn(struct sctp_tcb * stcb,struct sctp_forward_tsn_chunk * fwd,int * abort_flag,struct mbuf * m,int offset)5481 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5482     struct sctp_forward_tsn_chunk *fwd,
5483     int *abort_flag, struct mbuf *m, int offset)
5484 {
5485 	/* The pr-sctp fwd tsn */
5486 	/*
5487 	 * here we will perform all the data receiver side steps for
5488 	 * processing FwdTSN, as required in by pr-sctp draft:
5489 	 *
5490 	 * Assume we get FwdTSN(x):
5491 	 *
5492 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5493 	 * + others we have 3) examine and update re-ordering queue on
5494 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5495 	 * report where we are.
5496 	 */
5497 	struct sctp_association *asoc;
5498 	uint32_t new_cum_tsn, gap;
5499 	unsigned int i, fwd_sz, m_size;
5500 	struct sctp_stream_in *strm;
5501 	struct sctp_queued_to_read *control, *ncontrol;
5502 
5503 	asoc = &stcb->asoc;
5504 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5505 		SCTPDBG(SCTP_DEBUG_INDATA1,
5506 		    "Bad size too small/big fwd-tsn\n");
5507 		return;
5508 	}
5509 	m_size = (stcb->asoc.mapping_array_size << 3);
5510 	/*************************************************************/
5511 	/* 1. Here we update local cumTSN and shift the bitmap array */
5512 	/*************************************************************/
5513 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5514 
5515 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5516 		/* Already got there ... */
5517 		return;
5518 	}
5519 	/*
5520 	 * now we know the new TSN is more advanced, let's find the actual
5521 	 * gap
5522 	 */
5523 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5524 	asoc->cumulative_tsn = new_cum_tsn;
5525 	if (gap >= m_size) {
5526 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5527 			struct mbuf *op_err;
5528 			char msg[SCTP_DIAG_INFO_LEN];
5529 
5530 			/*
5531 			 * out of range (of single byte chunks in the rwnd I
5532 			 * give out). This must be an attacker.
5533 			 */
5534 			*abort_flag = 1;
5535 			SCTP_SNPRINTF(msg, sizeof(msg),
5536 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5537 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5538 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5539 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37;
5540 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
5541 			return;
5542 		}
5543 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5544 
5545 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5546 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5547 		asoc->highest_tsn_inside_map = new_cum_tsn;
5548 
5549 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5550 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5551 
5552 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5553 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5554 		}
5555 	} else {
5556 		SCTP_TCB_LOCK_ASSERT(stcb);
5557 		for (i = 0; i <= gap; i++) {
5558 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5559 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5560 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5561 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5562 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5563 				}
5564 			}
5565 		}
5566 	}
5567 	/*************************************************************/
5568 	/* 2. Clear up re-assembly queue                             */
5569 	/*************************************************************/
5570 
5571 	/* This is now done as part of clearing up the stream/seq */
5572 	if (asoc->idata_supported == 0) {
5573 		uint16_t sid;
5574 
5575 		/* Flush all the un-ordered data based on cum-tsn */
5576 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5577 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5578 			strm = &asoc->strmin[sid];
5579 			if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5580 				sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn);
5581 			}
5582 		}
5583 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5584 	}
5585 	/*******************************************************/
5586 	/* 3. Update the PR-stream re-ordering queues and fix  */
5587 	/* delivery issues as needed.                       */
5588 	/*******************************************************/
5589 	fwd_sz -= sizeof(*fwd);
5590 	if (m && fwd_sz) {
5591 		/* New method. */
5592 		unsigned int num_str;
5593 		uint32_t mid;
5594 		uint16_t sid;
5595 		uint16_t ordered, flags;
5596 		struct sctp_strseq *stseq, strseqbuf;
5597 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5598 
5599 		offset += sizeof(*fwd);
5600 
5601 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5602 		if (asoc->idata_supported) {
5603 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5604 		} else {
5605 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5606 		}
5607 		for (i = 0; i < num_str; i++) {
5608 			if (asoc->idata_supported) {
5609 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5610 				    sizeof(struct sctp_strseq_mid),
5611 				    (uint8_t *)&strseqbuf_m);
5612 				offset += sizeof(struct sctp_strseq_mid);
5613 				if (stseq_m == NULL) {
5614 					break;
5615 				}
5616 				sid = ntohs(stseq_m->sid);
5617 				mid = ntohl(stseq_m->mid);
5618 				flags = ntohs(stseq_m->flags);
5619 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5620 					ordered = 0;
5621 				} else {
5622 					ordered = 1;
5623 				}
5624 			} else {
5625 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5626 				    sizeof(struct sctp_strseq),
5627 				    (uint8_t *)&strseqbuf);
5628 				offset += sizeof(struct sctp_strseq);
5629 				if (stseq == NULL) {
5630 					break;
5631 				}
5632 				sid = ntohs(stseq->sid);
5633 				mid = (uint32_t)ntohs(stseq->ssn);
5634 				ordered = 1;
5635 			}
5636 			/* Convert */
5637 
5638 			/* now process */
5639 
5640 			/*
5641 			 * Ok we now look for the stream/seq on the read
5642 			 * queue where its not all delivered. If we find it
5643 			 * we transmute the read entry into a PDI_ABORTED.
5644 			 */
5645 			if (sid >= asoc->streamincnt) {
5646 				/* screwed up streams, stop!  */
5647 				break;
5648 			}
5649 			if ((asoc->str_of_pdapi == sid) &&
5650 			    (asoc->ssn_of_pdapi == mid)) {
5651 				/*
5652 				 * If this is the one we were partially
5653 				 * delivering now then we no longer are.
5654 				 * Note this will change with the reassembly
5655 				 * re-write.
5656 				 */
5657 				asoc->fragmented_delivery_inprogress = 0;
5658 			}
5659 			strm = &asoc->strmin[sid];
5660 			if (ordered) {
5661 				TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, ncontrol) {
5662 					if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5663 						sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5664 					}
5665 				}
5666 			} else {
5667 				if (asoc->idata_supported) {
5668 					TAILQ_FOREACH_SAFE(control, &strm->uno_inqueue, next_instrm, ncontrol) {
5669 						if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5670 							sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5671 						}
5672 					}
5673 				} else {
5674 					if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5675 						sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn);
5676 					}
5677 				}
5678 			}
5679 			TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5680 				if ((control->sinfo_stream == sid) &&
5681 				    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5682 					control->pdapi_aborted = 1;
5683 					control->end_added = 1;
5684 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5685 						TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5686 						if (asoc->size_on_all_streams >= control->length) {
5687 							asoc->size_on_all_streams -= control->length;
5688 						} else {
5689 #ifdef INVARIANTS
5690 							panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5691 #else
5692 							asoc->size_on_all_streams = 0;
5693 #endif
5694 						}
5695 						sctp_ucount_decr(asoc->cnt_on_all_streams);
5696 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5697 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5698 #ifdef INVARIANTS
5699 					} else if (control->on_strm_q) {
5700 						panic("strm: %p ctl: %p unknown %d",
5701 						    strm, control, control->on_strm_q);
5702 #endif
5703 					}
5704 					control->on_strm_q = 0;
5705 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5706 					    stcb,
5707 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5708 					    (void *)control,
5709 					    SCTP_SO_NOT_LOCKED);
5710 					break;
5711 				} else if ((control->sinfo_stream == sid) &&
5712 				    SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5713 					/* We are past our victim SSN */
5714 					break;
5715 				}
5716 			}
5717 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5718 				/* Update the sequence number */
5719 				strm->last_mid_delivered = mid;
5720 			}
5721 			/* now kick the stream the new way */
5722 			/* sa_ignore NO_NULL_CHK */
5723 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5724 		}
5725 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5726 	}
5727 	/*
5728 	 * Now slide thing forward.
5729 	 */
5730 	sctp_slide_mapping_arrays(stcb);
5731 }
5732