xref: /freebsd/sys/netinet/sctputil.c (revision 33af26323013eeffb4b7b1cb78ce5258486f9c57)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #include <netinet6/sctp6_var.h>
45 #endif
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_output.h>
48 #include <netinet/sctp_uio.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_auth.h>
52 #include <netinet/sctp_asconf.h>
53 #include <netinet/sctp_bsd_addr.h>
54 #include <netinet/sctp_kdtrace.h>
55 #if defined(INET6) || defined(INET)
56 #include <netinet/tcp_var.h>
57 #endif
58 #include <netinet/udp.h>
59 #include <netinet/udp_var.h>
60 #include <sys/proc.h>
61 #ifdef INET6
62 #include <netinet/icmp6.h>
63 #endif
64 
65 
66 #ifndef KTR_SCTP
67 #define KTR_SCTP KTR_SUBSYS
68 #endif
69 
70 extern const struct sctp_cc_functions sctp_cc_functions[];
71 extern const struct sctp_ss_functions sctp_ss_functions[];
72 
73 void
74 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
75 {
76 #if defined(SCTP_LOCAL_TRACE_BUF)
77 	struct sctp_cwnd_log sctp_clog;
78 
79 	sctp_clog.x.sb.stcb = stcb;
80 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
81 	if (stcb)
82 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
83 	else
84 		sctp_clog.x.sb.stcb_sbcc = 0;
85 	sctp_clog.x.sb.incr = incr;
86 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
87 	    SCTP_LOG_EVENT_SB,
88 	    from,
89 	    sctp_clog.x.misc.log1,
90 	    sctp_clog.x.misc.log2,
91 	    sctp_clog.x.misc.log3,
92 	    sctp_clog.x.misc.log4);
93 #endif
94 }
95 
96 void
97 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
98 {
99 #if defined(SCTP_LOCAL_TRACE_BUF)
100 	struct sctp_cwnd_log sctp_clog;
101 
102 	sctp_clog.x.close.inp = (void *)inp;
103 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
104 	if (stcb) {
105 		sctp_clog.x.close.stcb = (void *)stcb;
106 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
107 	} else {
108 		sctp_clog.x.close.stcb = 0;
109 		sctp_clog.x.close.state = 0;
110 	}
111 	sctp_clog.x.close.loc = loc;
112 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
113 	    SCTP_LOG_EVENT_CLOSE,
114 	    0,
115 	    sctp_clog.x.misc.log1,
116 	    sctp_clog.x.misc.log2,
117 	    sctp_clog.x.misc.log3,
118 	    sctp_clog.x.misc.log4);
119 #endif
120 }
121 
122 void
123 rto_logging(struct sctp_nets *net, int from)
124 {
125 #if defined(SCTP_LOCAL_TRACE_BUF)
126 	struct sctp_cwnd_log sctp_clog;
127 
128 	memset(&sctp_clog, 0, sizeof(sctp_clog));
129 	sctp_clog.x.rto.net = (void *)net;
130 	sctp_clog.x.rto.rtt = net->rtt / 1000;
131 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
132 	    SCTP_LOG_EVENT_RTT,
133 	    from,
134 	    sctp_clog.x.misc.log1,
135 	    sctp_clog.x.misc.log2,
136 	    sctp_clog.x.misc.log3,
137 	    sctp_clog.x.misc.log4);
138 #endif
139 }
140 
141 void
142 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
143 {
144 #if defined(SCTP_LOCAL_TRACE_BUF)
145 	struct sctp_cwnd_log sctp_clog;
146 
147 	sctp_clog.x.strlog.stcb = stcb;
148 	sctp_clog.x.strlog.n_tsn = tsn;
149 	sctp_clog.x.strlog.n_sseq = sseq;
150 	sctp_clog.x.strlog.e_tsn = 0;
151 	sctp_clog.x.strlog.e_sseq = 0;
152 	sctp_clog.x.strlog.strm = stream;
153 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
154 	    SCTP_LOG_EVENT_STRM,
155 	    from,
156 	    sctp_clog.x.misc.log1,
157 	    sctp_clog.x.misc.log2,
158 	    sctp_clog.x.misc.log3,
159 	    sctp_clog.x.misc.log4);
160 #endif
161 }
162 
163 void
164 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
165 {
166 #if defined(SCTP_LOCAL_TRACE_BUF)
167 	struct sctp_cwnd_log sctp_clog;
168 
169 	sctp_clog.x.nagle.stcb = (void *)stcb;
170 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
171 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
172 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
173 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
174 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
175 	    SCTP_LOG_EVENT_NAGLE,
176 	    action,
177 	    sctp_clog.x.misc.log1,
178 	    sctp_clog.x.misc.log2,
179 	    sctp_clog.x.misc.log3,
180 	    sctp_clog.x.misc.log4);
181 #endif
182 }
183 
184 void
185 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
186 {
187 #if defined(SCTP_LOCAL_TRACE_BUF)
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	sctp_clog.x.sack.cumack = cumack;
191 	sctp_clog.x.sack.oldcumack = old_cumack;
192 	sctp_clog.x.sack.tsn = tsn;
193 	sctp_clog.x.sack.numGaps = gaps;
194 	sctp_clog.x.sack.numDups = dups;
195 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
196 	    SCTP_LOG_EVENT_SACK,
197 	    from,
198 	    sctp_clog.x.misc.log1,
199 	    sctp_clog.x.misc.log2,
200 	    sctp_clog.x.misc.log3,
201 	    sctp_clog.x.misc.log4);
202 #endif
203 }
204 
205 void
206 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
207 {
208 #if defined(SCTP_LOCAL_TRACE_BUF)
209 	struct sctp_cwnd_log sctp_clog;
210 
211 	memset(&sctp_clog, 0, sizeof(sctp_clog));
212 	sctp_clog.x.map.base = map;
213 	sctp_clog.x.map.cum = cum;
214 	sctp_clog.x.map.high = high;
215 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
216 	    SCTP_LOG_EVENT_MAP,
217 	    from,
218 	    sctp_clog.x.misc.log1,
219 	    sctp_clog.x.misc.log2,
220 	    sctp_clog.x.misc.log3,
221 	    sctp_clog.x.misc.log4);
222 #endif
223 }
224 
225 void
226 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
227 {
228 #if defined(SCTP_LOCAL_TRACE_BUF)
229 	struct sctp_cwnd_log sctp_clog;
230 
231 	memset(&sctp_clog, 0, sizeof(sctp_clog));
232 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
233 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
234 	sctp_clog.x.fr.tsn = tsn;
235 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
236 	    SCTP_LOG_EVENT_FR,
237 	    from,
238 	    sctp_clog.x.misc.log1,
239 	    sctp_clog.x.misc.log2,
240 	    sctp_clog.x.misc.log3,
241 	    sctp_clog.x.misc.log4);
242 #endif
243 }
244 
245 #ifdef SCTP_MBUF_LOGGING
246 void
247 sctp_log_mb(struct mbuf *m, int from)
248 {
249 #if defined(SCTP_LOCAL_TRACE_BUF)
250 	struct sctp_cwnd_log sctp_clog;
251 
252 	sctp_clog.x.mb.mp = m;
253 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
254 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
255 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
256 	if (SCTP_BUF_IS_EXTENDED(m)) {
257 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
258 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
259 	} else {
260 		sctp_clog.x.mb.ext = 0;
261 		sctp_clog.x.mb.refcnt = 0;
262 	}
263 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
264 	    SCTP_LOG_EVENT_MBUF,
265 	    from,
266 	    sctp_clog.x.misc.log1,
267 	    sctp_clog.x.misc.log2,
268 	    sctp_clog.x.misc.log3,
269 	    sctp_clog.x.misc.log4);
270 #endif
271 }
272 
273 void
274 sctp_log_mbc(struct mbuf *m, int from)
275 {
276 	struct mbuf *mat;
277 
278 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
279 		sctp_log_mb(mat, from);
280 	}
281 }
282 #endif
283 
284 void
285 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
286 {
287 #if defined(SCTP_LOCAL_TRACE_BUF)
288 	struct sctp_cwnd_log sctp_clog;
289 
290 	if (control == NULL) {
291 		SCTP_PRINTF("Gak log of NULL?\n");
292 		return;
293 	}
294 	sctp_clog.x.strlog.stcb = control->stcb;
295 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
296 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
297 	sctp_clog.x.strlog.strm = control->sinfo_stream;
298 	if (poschk != NULL) {
299 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
300 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
301 	} else {
302 		sctp_clog.x.strlog.e_tsn = 0;
303 		sctp_clog.x.strlog.e_sseq = 0;
304 	}
305 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
306 	    SCTP_LOG_EVENT_STRM,
307 	    from,
308 	    sctp_clog.x.misc.log1,
309 	    sctp_clog.x.misc.log2,
310 	    sctp_clog.x.misc.log3,
311 	    sctp_clog.x.misc.log4);
312 #endif
313 }
314 
315 void
316 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
317 {
318 #if defined(SCTP_LOCAL_TRACE_BUF)
319 	struct sctp_cwnd_log sctp_clog;
320 
321 	sctp_clog.x.cwnd.net = net;
322 	if (stcb->asoc.send_queue_cnt > 255)
323 		sctp_clog.x.cwnd.cnt_in_send = 255;
324 	else
325 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
326 	if (stcb->asoc.stream_queue_cnt > 255)
327 		sctp_clog.x.cwnd.cnt_in_str = 255;
328 	else
329 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
330 
331 	if (net) {
332 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
333 		sctp_clog.x.cwnd.inflight = net->flight_size;
334 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
335 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
336 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
337 	}
338 	if (SCTP_CWNDLOG_PRESEND == from) {
339 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
340 	}
341 	sctp_clog.x.cwnd.cwnd_augment = augment;
342 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
343 	    SCTP_LOG_EVENT_CWND,
344 	    from,
345 	    sctp_clog.x.misc.log1,
346 	    sctp_clog.x.misc.log2,
347 	    sctp_clog.x.misc.log3,
348 	    sctp_clog.x.misc.log4);
349 #endif
350 }
351 
352 void
353 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
354 {
355 #if defined(SCTP_LOCAL_TRACE_BUF)
356 	struct sctp_cwnd_log sctp_clog;
357 
358 	memset(&sctp_clog, 0, sizeof(sctp_clog));
359 	if (inp) {
360 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
361 
362 	} else {
363 		sctp_clog.x.lock.sock = (void *)NULL;
364 	}
365 	sctp_clog.x.lock.inp = (void *)inp;
366 	if (stcb) {
367 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
368 	} else {
369 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
370 	}
371 	if (inp) {
372 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
373 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
374 	} else {
375 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
376 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
377 	}
378 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
379 	if (inp && (inp->sctp_socket)) {
380 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
381 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
382 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
383 	} else {
384 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
385 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
386 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
387 	}
388 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
389 	    SCTP_LOG_LOCK_EVENT,
390 	    from,
391 	    sctp_clog.x.misc.log1,
392 	    sctp_clog.x.misc.log2,
393 	    sctp_clog.x.misc.log3,
394 	    sctp_clog.x.misc.log4);
395 #endif
396 }
397 
398 void
399 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
400 {
401 #if defined(SCTP_LOCAL_TRACE_BUF)
402 	struct sctp_cwnd_log sctp_clog;
403 
404 	memset(&sctp_clog, 0, sizeof(sctp_clog));
405 	sctp_clog.x.cwnd.net = net;
406 	sctp_clog.x.cwnd.cwnd_new_value = error;
407 	sctp_clog.x.cwnd.inflight = net->flight_size;
408 	sctp_clog.x.cwnd.cwnd_augment = burst;
409 	if (stcb->asoc.send_queue_cnt > 255)
410 		sctp_clog.x.cwnd.cnt_in_send = 255;
411 	else
412 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
413 	if (stcb->asoc.stream_queue_cnt > 255)
414 		sctp_clog.x.cwnd.cnt_in_str = 255;
415 	else
416 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
417 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
418 	    SCTP_LOG_EVENT_MAXBURST,
419 	    from,
420 	    sctp_clog.x.misc.log1,
421 	    sctp_clog.x.misc.log2,
422 	    sctp_clog.x.misc.log3,
423 	    sctp_clog.x.misc.log4);
424 #endif
425 }
426 
427 void
428 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
429 {
430 #if defined(SCTP_LOCAL_TRACE_BUF)
431 	struct sctp_cwnd_log sctp_clog;
432 
433 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
434 	sctp_clog.x.rwnd.send_size = snd_size;
435 	sctp_clog.x.rwnd.overhead = overhead;
436 	sctp_clog.x.rwnd.new_rwnd = 0;
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_EVENT_RWND,
439 	    from,
440 	    sctp_clog.x.misc.log1,
441 	    sctp_clog.x.misc.log2,
442 	    sctp_clog.x.misc.log3,
443 	    sctp_clog.x.misc.log4);
444 #endif
445 }
446 
447 void
448 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
449 {
450 #if defined(SCTP_LOCAL_TRACE_BUF)
451 	struct sctp_cwnd_log sctp_clog;
452 
453 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
454 	sctp_clog.x.rwnd.send_size = flight_size;
455 	sctp_clog.x.rwnd.overhead = overhead;
456 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
457 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
458 	    SCTP_LOG_EVENT_RWND,
459 	    from,
460 	    sctp_clog.x.misc.log1,
461 	    sctp_clog.x.misc.log2,
462 	    sctp_clog.x.misc.log3,
463 	    sctp_clog.x.misc.log4);
464 #endif
465 }
466 
467 #ifdef SCTP_MBCNT_LOGGING
468 static void
469 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
470 {
471 #if defined(SCTP_LOCAL_TRACE_BUF)
472 	struct sctp_cwnd_log sctp_clog;
473 
474 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
475 	sctp_clog.x.mbcnt.size_change = book;
476 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
477 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
478 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
479 	    SCTP_LOG_EVENT_MBCNT,
480 	    from,
481 	    sctp_clog.x.misc.log1,
482 	    sctp_clog.x.misc.log2,
483 	    sctp_clog.x.misc.log3,
484 	    sctp_clog.x.misc.log4);
485 #endif
486 }
487 #endif
488 
489 void
490 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
491 {
492 #if defined(SCTP_LOCAL_TRACE_BUF)
493 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
494 	    SCTP_LOG_MISC_EVENT,
495 	    from,
496 	    a, b, c, d);
497 #endif
498 }
499 
500 void
501 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
502 {
503 #if defined(SCTP_LOCAL_TRACE_BUF)
504 	struct sctp_cwnd_log sctp_clog;
505 
506 	sctp_clog.x.wake.stcb = (void *)stcb;
507 	sctp_clog.x.wake.wake_cnt = wake_cnt;
508 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
509 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
510 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
511 
512 	if (stcb->asoc.stream_queue_cnt < 0xff)
513 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
514 	else
515 		sctp_clog.x.wake.stream_qcnt = 0xff;
516 
517 	if (stcb->asoc.chunks_on_out_queue < 0xff)
518 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
519 	else
520 		sctp_clog.x.wake.chunks_on_oque = 0xff;
521 
522 	sctp_clog.x.wake.sctpflags = 0;
523 	/* set in the defered mode stuff */
524 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
525 		sctp_clog.x.wake.sctpflags |= 1;
526 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
527 		sctp_clog.x.wake.sctpflags |= 2;
528 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
529 		sctp_clog.x.wake.sctpflags |= 4;
530 	/* what about the sb */
531 	if (stcb->sctp_socket) {
532 		struct socket *so = stcb->sctp_socket;
533 
534 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
535 	} else {
536 		sctp_clog.x.wake.sbflags = 0xff;
537 	}
538 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
539 	    SCTP_LOG_EVENT_WAKE,
540 	    from,
541 	    sctp_clog.x.misc.log1,
542 	    sctp_clog.x.misc.log2,
543 	    sctp_clog.x.misc.log3,
544 	    sctp_clog.x.misc.log4);
545 #endif
546 }
547 
548 void
549 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen)
550 {
551 #if defined(SCTP_LOCAL_TRACE_BUF)
552 	struct sctp_cwnd_log sctp_clog;
553 
554 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
555 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
556 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
557 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
558 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
559 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
560 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
561 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
562 	    SCTP_LOG_EVENT_BLOCK,
563 	    from,
564 	    sctp_clog.x.misc.log1,
565 	    sctp_clog.x.misc.log2,
566 	    sctp_clog.x.misc.log3,
567 	    sctp_clog.x.misc.log4);
568 #endif
569 }
570 
571 int
572 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
573 {
574 	/* May need to fix this if ktrdump does not work */
575 	return (0);
576 }
577 
578 #ifdef SCTP_AUDITING_ENABLED
579 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
580 static int sctp_audit_indx = 0;
581 
582 static
583 void
584 sctp_print_audit_report(void)
585 {
586 	int i;
587 	int cnt;
588 
589 	cnt = 0;
590 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
591 		if ((sctp_audit_data[i][0] == 0xe0) &&
592 		    (sctp_audit_data[i][1] == 0x01)) {
593 			cnt = 0;
594 			SCTP_PRINTF("\n");
595 		} else if (sctp_audit_data[i][0] == 0xf0) {
596 			cnt = 0;
597 			SCTP_PRINTF("\n");
598 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
599 		    (sctp_audit_data[i][1] == 0x01)) {
600 			SCTP_PRINTF("\n");
601 			cnt = 0;
602 		}
603 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
604 		    (uint32_t)sctp_audit_data[i][1]);
605 		cnt++;
606 		if ((cnt % 14) == 0)
607 			SCTP_PRINTF("\n");
608 	}
609 	for (i = 0; i < sctp_audit_indx; i++) {
610 		if ((sctp_audit_data[i][0] == 0xe0) &&
611 		    (sctp_audit_data[i][1] == 0x01)) {
612 			cnt = 0;
613 			SCTP_PRINTF("\n");
614 		} else if (sctp_audit_data[i][0] == 0xf0) {
615 			cnt = 0;
616 			SCTP_PRINTF("\n");
617 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
618 		    (sctp_audit_data[i][1] == 0x01)) {
619 			SCTP_PRINTF("\n");
620 			cnt = 0;
621 		}
622 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
623 		    (uint32_t)sctp_audit_data[i][1]);
624 		cnt++;
625 		if ((cnt % 14) == 0)
626 			SCTP_PRINTF("\n");
627 	}
628 	SCTP_PRINTF("\n");
629 }
630 
631 void
632 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
633     struct sctp_nets *net)
634 {
635 	int resend_cnt, tot_out, rep, tot_book_cnt;
636 	struct sctp_nets *lnet;
637 	struct sctp_tmit_chunk *chk;
638 
639 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
640 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
641 	sctp_audit_indx++;
642 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
643 		sctp_audit_indx = 0;
644 	}
645 	if (inp == NULL) {
646 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
647 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
648 		sctp_audit_indx++;
649 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
650 			sctp_audit_indx = 0;
651 		}
652 		return;
653 	}
654 	if (stcb == NULL) {
655 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
656 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
657 		sctp_audit_indx++;
658 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
659 			sctp_audit_indx = 0;
660 		}
661 		return;
662 	}
663 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
664 	sctp_audit_data[sctp_audit_indx][1] =
665 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
666 	sctp_audit_indx++;
667 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
668 		sctp_audit_indx = 0;
669 	}
670 	rep = 0;
671 	tot_book_cnt = 0;
672 	resend_cnt = tot_out = 0;
673 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
674 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
675 			resend_cnt++;
676 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
677 			tot_out += chk->book_size;
678 			tot_book_cnt++;
679 		}
680 	}
681 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
682 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
683 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
684 		sctp_audit_indx++;
685 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
686 			sctp_audit_indx = 0;
687 		}
688 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
689 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
690 		rep = 1;
691 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
692 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
693 		sctp_audit_data[sctp_audit_indx][1] =
694 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
695 		sctp_audit_indx++;
696 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
697 			sctp_audit_indx = 0;
698 		}
699 	}
700 	if (tot_out != stcb->asoc.total_flight) {
701 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
702 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
703 		sctp_audit_indx++;
704 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
705 			sctp_audit_indx = 0;
706 		}
707 		rep = 1;
708 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
709 		    (int)stcb->asoc.total_flight);
710 		stcb->asoc.total_flight = tot_out;
711 	}
712 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
713 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
714 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
715 		sctp_audit_indx++;
716 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
717 			sctp_audit_indx = 0;
718 		}
719 		rep = 1;
720 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
721 
722 		stcb->asoc.total_flight_count = tot_book_cnt;
723 	}
724 	tot_out = 0;
725 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
726 		tot_out += lnet->flight_size;
727 	}
728 	if (tot_out != stcb->asoc.total_flight) {
729 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
730 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
731 		sctp_audit_indx++;
732 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
733 			sctp_audit_indx = 0;
734 		}
735 		rep = 1;
736 		SCTP_PRINTF("real flight:%d net total was %d\n",
737 		    stcb->asoc.total_flight, tot_out);
738 		/* now corrective action */
739 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
740 
741 			tot_out = 0;
742 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
743 				if ((chk->whoTo == lnet) &&
744 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
745 					tot_out += chk->book_size;
746 				}
747 			}
748 			if (lnet->flight_size != tot_out) {
749 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
750 				    (void *)lnet, lnet->flight_size,
751 				    tot_out);
752 				lnet->flight_size = tot_out;
753 			}
754 		}
755 	}
756 	if (rep) {
757 		sctp_print_audit_report();
758 	}
759 }
760 
761 void
762 sctp_audit_log(uint8_t ev, uint8_t fd)
763 {
764 
765 	sctp_audit_data[sctp_audit_indx][0] = ev;
766 	sctp_audit_data[sctp_audit_indx][1] = fd;
767 	sctp_audit_indx++;
768 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
769 		sctp_audit_indx = 0;
770 	}
771 }
772 
773 #endif
774 
775 /*
776  * The conversion from time to ticks and vice versa is done by rounding
777  * upwards. This way we can test in the code the time to be positive and
778  * know that this corresponds to a positive number of ticks.
779  */
780 
781 uint32_t
782 sctp_msecs_to_ticks(uint32_t msecs)
783 {
784 	uint64_t temp;
785 	uint32_t ticks;
786 
787 	if (hz == 1000) {
788 		ticks = msecs;
789 	} else {
790 		temp = (((uint64_t)msecs * hz) + 999) / 1000;
791 		if (temp > UINT32_MAX) {
792 			ticks = UINT32_MAX;
793 		} else {
794 			ticks = (uint32_t)temp;
795 		}
796 	}
797 	return (ticks);
798 }
799 
800 uint32_t
801 sctp_ticks_to_msecs(uint32_t ticks)
802 {
803 	uint64_t temp;
804 	uint32_t msecs;
805 
806 	if (hz == 1000) {
807 		msecs = ticks;
808 	} else {
809 		temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz;
810 		if (temp > UINT32_MAX) {
811 			msecs = UINT32_MAX;
812 		} else {
813 			msecs = (uint32_t)temp;
814 		}
815 	}
816 	return (msecs);
817 }
818 
819 uint32_t
820 sctp_secs_to_ticks(uint32_t secs)
821 {
822 	uint64_t temp;
823 	uint32_t ticks;
824 
825 	temp = (uint64_t)secs * hz;
826 	if (temp > UINT32_MAX) {
827 		ticks = UINT32_MAX;
828 	} else {
829 		ticks = (uint32_t)temp;
830 	}
831 	return (ticks);
832 }
833 
834 uint32_t
835 sctp_ticks_to_secs(uint32_t ticks)
836 {
837 	uint64_t temp;
838 	uint32_t secs;
839 
840 	temp = ((uint64_t)ticks + (hz - 1)) / hz;
841 	if (temp > UINT32_MAX) {
842 		secs = UINT32_MAX;
843 	} else {
844 		secs = (uint32_t)temp;
845 	}
846 	return (secs);
847 }
848 
849 /*
850  * sctp_stop_timers_for_shutdown() should be called
851  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
852  * state to make sure that all timers are stopped.
853  */
854 void
855 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
856 {
857 	struct sctp_inpcb *inp;
858 	struct sctp_nets *net;
859 
860 	inp = stcb->sctp_ep;
861 
862 	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
863 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_12);
864 	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
865 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_13);
866 	sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
867 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_14);
868 	sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
869 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_15);
870 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
871 		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
872 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_16);
873 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
874 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_17);
875 	}
876 }
877 
878 void
879 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer)
880 {
881 	struct sctp_inpcb *inp;
882 	struct sctp_nets *net;
883 
884 	inp = stcb->sctp_ep;
885 	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
886 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_18);
887 	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
888 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_19);
889 	if (stop_assoc_kill_timer) {
890 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
891 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_20);
892 	}
893 	sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
894 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_21);
895 	sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
896 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_22);
897 	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL,
898 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_23);
899 	/* Mobility adaptation */
900 	sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL,
901 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_24);
902 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
903 		sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
904 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_25);
905 		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net,
906 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_26);
907 		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net,
908 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_27);
909 		sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net,
910 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_28);
911 		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net,
912 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_29);
913 		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
914 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_30);
915 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
916 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_31);
917 	}
918 }
919 
920 /*
921  * A list of sizes based on typical mtu's, used only if next hop size not
922  * returned. These values MUST be multiples of 4 and MUST be ordered.
923  */
924 static uint32_t sctp_mtu_sizes[] = {
925 	68,
926 	296,
927 	508,
928 	512,
929 	544,
930 	576,
931 	1004,
932 	1492,
933 	1500,
934 	1536,
935 	2000,
936 	2048,
937 	4352,
938 	4464,
939 	8168,
940 	17912,
941 	32000,
942 	65532
943 };
944 
945 /*
946  * Return the largest MTU in sctp_mtu_sizes smaller than val.
947  * If val is smaller than the minimum, just return the largest
948  * multiple of 4 smaller or equal to val.
949  * Ensure that the result is a multiple of 4.
950  */
951 uint32_t
952 sctp_get_prev_mtu(uint32_t val)
953 {
954 	uint32_t i;
955 
956 	val &= 0xfffffffc;
957 	if (val <= sctp_mtu_sizes[0]) {
958 		return (val);
959 	}
960 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
961 		if (val <= sctp_mtu_sizes[i]) {
962 			break;
963 		}
964 	}
965 	KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0,
966 	    ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1));
967 	return (sctp_mtu_sizes[i - 1]);
968 }
969 
970 /*
971  * Return the smallest MTU in sctp_mtu_sizes larger than val.
972  * If val is larger than the maximum, just return the largest multiple of 4 smaller
973  * or equal to val.
974  * Ensure that the result is a multiple of 4.
975  */
976 uint32_t
977 sctp_get_next_mtu(uint32_t val)
978 {
979 	/* select another MTU that is just bigger than this one */
980 	uint32_t i;
981 
982 	val &= 0xfffffffc;
983 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
984 		if (val < sctp_mtu_sizes[i]) {
985 			KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0,
986 			    ("sctp_mtu_sizes[%u] not a multiple of 4", i));
987 			return (sctp_mtu_sizes[i]);
988 		}
989 	}
990 	return (val);
991 }
992 
993 void
994 sctp_fill_random_store(struct sctp_pcb *m)
995 {
996 	/*
997 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
998 	 * our counter. The result becomes our good random numbers and we
999 	 * then setup to give these out. Note that we do no locking to
1000 	 * protect this. This is ok, since if competing folks call this we
1001 	 * will get more gobbled gook in the random store which is what we
1002 	 * want. There is a danger that two guys will use the same random
1003 	 * numbers, but thats ok too since that is random as well :->
1004 	 */
1005 	m->store_at = 0;
1006 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
1007 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
1008 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
1009 	m->random_counter++;
1010 }
1011 
1012 uint32_t
1013 sctp_select_initial_TSN(struct sctp_pcb *inp)
1014 {
1015 	/*
1016 	 * A true implementation should use random selection process to get
1017 	 * the initial stream sequence number, using RFC1750 as a good
1018 	 * guideline
1019 	 */
1020 	uint32_t x, *xp;
1021 	uint8_t *p;
1022 	int store_at, new_store;
1023 
1024 	if (inp->initial_sequence_debug != 0) {
1025 		uint32_t ret;
1026 
1027 		ret = inp->initial_sequence_debug;
1028 		inp->initial_sequence_debug++;
1029 		return (ret);
1030 	}
1031 retry:
1032 	store_at = inp->store_at;
1033 	new_store = store_at + sizeof(uint32_t);
1034 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
1035 		new_store = 0;
1036 	}
1037 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
1038 		goto retry;
1039 	}
1040 	if (new_store == 0) {
1041 		/* Refill the random store */
1042 		sctp_fill_random_store(inp);
1043 	}
1044 	p = &inp->random_store[store_at];
1045 	xp = (uint32_t *)p;
1046 	x = *xp;
1047 	return (x);
1048 }
1049 
1050 uint32_t
1051 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
1052 {
1053 	uint32_t x;
1054 	struct timeval now;
1055 
1056 	if (check) {
1057 		(void)SCTP_GETTIME_TIMEVAL(&now);
1058 	}
1059 	for (;;) {
1060 		x = sctp_select_initial_TSN(&inp->sctp_ep);
1061 		if (x == 0) {
1062 			/* we never use 0 */
1063 			continue;
1064 		}
1065 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
1066 			break;
1067 		}
1068 	}
1069 	return (x);
1070 }
1071 
1072 int32_t
1073 sctp_map_assoc_state(int kernel_state)
1074 {
1075 	int32_t user_state;
1076 
1077 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
1078 		user_state = SCTP_CLOSED;
1079 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
1080 		user_state = SCTP_SHUTDOWN_PENDING;
1081 	} else {
1082 		switch (kernel_state & SCTP_STATE_MASK) {
1083 		case SCTP_STATE_EMPTY:
1084 			user_state = SCTP_CLOSED;
1085 			break;
1086 		case SCTP_STATE_INUSE:
1087 			user_state = SCTP_CLOSED;
1088 			break;
1089 		case SCTP_STATE_COOKIE_WAIT:
1090 			user_state = SCTP_COOKIE_WAIT;
1091 			break;
1092 		case SCTP_STATE_COOKIE_ECHOED:
1093 			user_state = SCTP_COOKIE_ECHOED;
1094 			break;
1095 		case SCTP_STATE_OPEN:
1096 			user_state = SCTP_ESTABLISHED;
1097 			break;
1098 		case SCTP_STATE_SHUTDOWN_SENT:
1099 			user_state = SCTP_SHUTDOWN_SENT;
1100 			break;
1101 		case SCTP_STATE_SHUTDOWN_RECEIVED:
1102 			user_state = SCTP_SHUTDOWN_RECEIVED;
1103 			break;
1104 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
1105 			user_state = SCTP_SHUTDOWN_ACK_SENT;
1106 			break;
1107 		default:
1108 			user_state = SCTP_CLOSED;
1109 			break;
1110 		}
1111 	}
1112 	return (user_state);
1113 }
1114 
1115 int
1116 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1117     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
1118 {
1119 	struct sctp_association *asoc;
1120 
1121 	/*
1122 	 * Anything set to zero is taken care of by the allocation routine's
1123 	 * bzero
1124 	 */
1125 
1126 	/*
1127 	 * Up front select what scoping to apply on addresses I tell my peer
1128 	 * Not sure what to do with these right now, we will need to come up
1129 	 * with a way to set them. We may need to pass them through from the
1130 	 * caller in the sctp_aloc_assoc() function.
1131 	 */
1132 	int i;
1133 #if defined(SCTP_DETAILED_STR_STATS)
1134 	int j;
1135 #endif
1136 
1137 	asoc = &stcb->asoc;
1138 	/* init all variables to a known value. */
1139 	SCTP_SET_STATE(stcb, SCTP_STATE_INUSE);
1140 	asoc->max_burst = inp->sctp_ep.max_burst;
1141 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
1142 	asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
1143 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
1144 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
1145 	asoc->ecn_supported = inp->ecn_supported;
1146 	asoc->prsctp_supported = inp->prsctp_supported;
1147 	asoc->auth_supported = inp->auth_supported;
1148 	asoc->asconf_supported = inp->asconf_supported;
1149 	asoc->reconfig_supported = inp->reconfig_supported;
1150 	asoc->nrsack_supported = inp->nrsack_supported;
1151 	asoc->pktdrop_supported = inp->pktdrop_supported;
1152 	asoc->idata_supported = inp->idata_supported;
1153 	asoc->sctp_cmt_pf = (uint8_t)0;
1154 	asoc->sctp_frag_point = inp->sctp_frag_point;
1155 	asoc->sctp_features = inp->sctp_features;
1156 	asoc->default_dscp = inp->sctp_ep.default_dscp;
1157 	asoc->max_cwnd = inp->max_cwnd;
1158 #ifdef INET6
1159 	if (inp->sctp_ep.default_flowlabel) {
1160 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
1161 	} else {
1162 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
1163 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
1164 			asoc->default_flowlabel &= 0x000fffff;
1165 			asoc->default_flowlabel |= 0x80000000;
1166 		} else {
1167 			asoc->default_flowlabel = 0;
1168 		}
1169 	}
1170 #endif
1171 	asoc->sb_send_resv = 0;
1172 	if (override_tag) {
1173 		asoc->my_vtag = override_tag;
1174 	} else {
1175 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1176 	}
1177 	/* Get the nonce tags */
1178 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1179 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1180 	asoc->vrf_id = vrf_id;
1181 
1182 #ifdef SCTP_ASOCLOG_OF_TSNS
1183 	asoc->tsn_in_at = 0;
1184 	asoc->tsn_out_at = 0;
1185 	asoc->tsn_in_wrapped = 0;
1186 	asoc->tsn_out_wrapped = 0;
1187 	asoc->cumack_log_at = 0;
1188 	asoc->cumack_log_atsnt = 0;
1189 #endif
1190 #ifdef SCTP_FS_SPEC_LOG
1191 	asoc->fs_index = 0;
1192 #endif
1193 	asoc->refcnt = 0;
1194 	asoc->assoc_up_sent = 0;
1195 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1196 	    sctp_select_initial_TSN(&inp->sctp_ep);
1197 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1198 	/* we are optimisitic here */
1199 	asoc->peer_supports_nat = 0;
1200 	asoc->sent_queue_retran_cnt = 0;
1201 
1202 	/* for CMT */
1203 	asoc->last_net_cmt_send_started = NULL;
1204 
1205 	/* This will need to be adjusted */
1206 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1207 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1208 	asoc->asconf_seq_in = asoc->last_acked_seq;
1209 
1210 	/* here we are different, we hold the next one we expect */
1211 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1212 
1213 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1214 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1215 
1216 	asoc->default_mtu = inp->sctp_ep.default_mtu;
1217 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1218 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1219 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1220 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1221 	asoc->free_chunk_cnt = 0;
1222 
1223 	asoc->iam_blocking = 0;
1224 	asoc->context = inp->sctp_context;
1225 	asoc->local_strreset_support = inp->local_strreset_support;
1226 	asoc->def_send = inp->def_send;
1227 	asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1228 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1229 	asoc->pr_sctp_cnt = 0;
1230 	asoc->total_output_queue_size = 0;
1231 
1232 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1233 		asoc->scope.ipv6_addr_legal = 1;
1234 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1235 			asoc->scope.ipv4_addr_legal = 1;
1236 		} else {
1237 			asoc->scope.ipv4_addr_legal = 0;
1238 		}
1239 	} else {
1240 		asoc->scope.ipv6_addr_legal = 0;
1241 		asoc->scope.ipv4_addr_legal = 1;
1242 	}
1243 
1244 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1245 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1246 
1247 	asoc->smallest_mtu = inp->sctp_frag_point;
1248 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1249 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1250 
1251 	asoc->stream_locked_on = 0;
1252 	asoc->ecn_echo_cnt_onq = 0;
1253 	asoc->stream_locked = 0;
1254 
1255 	asoc->send_sack = 1;
1256 
1257 	LIST_INIT(&asoc->sctp_restricted_addrs);
1258 
1259 	TAILQ_INIT(&asoc->nets);
1260 	TAILQ_INIT(&asoc->pending_reply_queue);
1261 	TAILQ_INIT(&asoc->asconf_ack_sent);
1262 	/* Setup to fill the hb random cache at first HB */
1263 	asoc->hb_random_idx = 4;
1264 
1265 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1266 
1267 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1268 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1269 
1270 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1271 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1272 
1273 	/*
1274 	 * Now the stream parameters, here we allocate space for all streams
1275 	 * that we request by default.
1276 	 */
1277 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1278 	    o_strms;
1279 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1280 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1281 	    SCTP_M_STRMO);
1282 	if (asoc->strmout == NULL) {
1283 		/* big trouble no memory */
1284 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1285 		return (ENOMEM);
1286 	}
1287 	for (i = 0; i < asoc->streamoutcnt; i++) {
1288 		/*
1289 		 * inbound side must be set to 0xffff, also NOTE when we get
1290 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1291 		 * count (streamoutcnt) but first check if we sent to any of
1292 		 * the upper streams that were dropped (if some were). Those
1293 		 * that were dropped must be notified to the upper layer as
1294 		 * failed to send.
1295 		 */
1296 		asoc->strmout[i].next_mid_ordered = 0;
1297 		asoc->strmout[i].next_mid_unordered = 0;
1298 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1299 		asoc->strmout[i].chunks_on_queues = 0;
1300 #if defined(SCTP_DETAILED_STR_STATS)
1301 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1302 			asoc->strmout[i].abandoned_sent[j] = 0;
1303 			asoc->strmout[i].abandoned_unsent[j] = 0;
1304 		}
1305 #else
1306 		asoc->strmout[i].abandoned_sent[0] = 0;
1307 		asoc->strmout[i].abandoned_unsent[0] = 0;
1308 #endif
1309 		asoc->strmout[i].sid = i;
1310 		asoc->strmout[i].last_msg_incomplete = 0;
1311 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1312 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1313 	}
1314 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1315 
1316 	/* Now the mapping array */
1317 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1318 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1319 	    SCTP_M_MAP);
1320 	if (asoc->mapping_array == NULL) {
1321 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1322 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1323 		return (ENOMEM);
1324 	}
1325 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1326 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1327 	    SCTP_M_MAP);
1328 	if (asoc->nr_mapping_array == NULL) {
1329 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1330 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1331 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1332 		return (ENOMEM);
1333 	}
1334 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1335 
1336 	/* Now the init of the other outqueues */
1337 	TAILQ_INIT(&asoc->free_chunks);
1338 	TAILQ_INIT(&asoc->control_send_queue);
1339 	TAILQ_INIT(&asoc->asconf_send_queue);
1340 	TAILQ_INIT(&asoc->send_queue);
1341 	TAILQ_INIT(&asoc->sent_queue);
1342 	TAILQ_INIT(&asoc->resetHead);
1343 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1344 	TAILQ_INIT(&asoc->asconf_queue);
1345 	/* authentication fields */
1346 	asoc->authinfo.random = NULL;
1347 	asoc->authinfo.active_keyid = 0;
1348 	asoc->authinfo.assoc_key = NULL;
1349 	asoc->authinfo.assoc_keyid = 0;
1350 	asoc->authinfo.recv_key = NULL;
1351 	asoc->authinfo.recv_keyid = 0;
1352 	LIST_INIT(&asoc->shared_keys);
1353 	asoc->marked_retrans = 0;
1354 	asoc->port = inp->sctp_ep.port;
1355 	asoc->timoinit = 0;
1356 	asoc->timodata = 0;
1357 	asoc->timosack = 0;
1358 	asoc->timoshutdown = 0;
1359 	asoc->timoheartbeat = 0;
1360 	asoc->timocookie = 0;
1361 	asoc->timoshutdownack = 0;
1362 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1363 	asoc->discontinuity_time = asoc->start_time;
1364 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1365 		asoc->abandoned_unsent[i] = 0;
1366 		asoc->abandoned_sent[i] = 0;
1367 	}
1368 	/*
1369 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1370 	 * freed later when the association is freed.
1371 	 */
1372 	return (0);
1373 }
1374 
1375 void
1376 sctp_print_mapping_array(struct sctp_association *asoc)
1377 {
1378 	unsigned int i, limit;
1379 
1380 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1381 	    asoc->mapping_array_size,
1382 	    asoc->mapping_array_base_tsn,
1383 	    asoc->cumulative_tsn,
1384 	    asoc->highest_tsn_inside_map,
1385 	    asoc->highest_tsn_inside_nr_map);
1386 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1387 		if (asoc->mapping_array[limit - 1] != 0) {
1388 			break;
1389 		}
1390 	}
1391 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1392 	for (i = 0; i < limit; i++) {
1393 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1394 	}
1395 	if (limit % 16)
1396 		SCTP_PRINTF("\n");
1397 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1398 		if (asoc->nr_mapping_array[limit - 1]) {
1399 			break;
1400 		}
1401 	}
1402 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1403 	for (i = 0; i < limit; i++) {
1404 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1405 	}
1406 	if (limit % 16)
1407 		SCTP_PRINTF("\n");
1408 }
1409 
1410 int
1411 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1412 {
1413 	/* mapping array needs to grow */
1414 	uint8_t *new_array1, *new_array2;
1415 	uint32_t new_size;
1416 
1417 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1418 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1419 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1420 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1421 		/* can't get more, forget it */
1422 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1423 		if (new_array1) {
1424 			SCTP_FREE(new_array1, SCTP_M_MAP);
1425 		}
1426 		if (new_array2) {
1427 			SCTP_FREE(new_array2, SCTP_M_MAP);
1428 		}
1429 		return (-1);
1430 	}
1431 	memset(new_array1, 0, new_size);
1432 	memset(new_array2, 0, new_size);
1433 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1434 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1435 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1436 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1437 	asoc->mapping_array = new_array1;
1438 	asoc->nr_mapping_array = new_array2;
1439 	asoc->mapping_array_size = new_size;
1440 	return (0);
1441 }
1442 
1443 
1444 static void
1445 sctp_iterator_work(struct sctp_iterator *it)
1446 {
1447 	struct epoch_tracker et;
1448 	struct sctp_inpcb *tinp;
1449 	int iteration_count = 0;
1450 	int inp_skip = 0;
1451 	int first_in = 1;
1452 
1453 	NET_EPOCH_ENTER(et);
1454 	SCTP_INP_INFO_RLOCK();
1455 	SCTP_ITERATOR_LOCK();
1456 	sctp_it_ctl.cur_it = it;
1457 	if (it->inp) {
1458 		SCTP_INP_RLOCK(it->inp);
1459 		SCTP_INP_DECR_REF(it->inp);
1460 	}
1461 	if (it->inp == NULL) {
1462 		/* iterator is complete */
1463 done_with_iterator:
1464 		sctp_it_ctl.cur_it = NULL;
1465 		SCTP_ITERATOR_UNLOCK();
1466 		SCTP_INP_INFO_RUNLOCK();
1467 		if (it->function_atend != NULL) {
1468 			(*it->function_atend) (it->pointer, it->val);
1469 		}
1470 		SCTP_FREE(it, SCTP_M_ITER);
1471 		NET_EPOCH_EXIT(et);
1472 		return;
1473 	}
1474 select_a_new_ep:
1475 	if (first_in) {
1476 		first_in = 0;
1477 	} else {
1478 		SCTP_INP_RLOCK(it->inp);
1479 	}
1480 	while (((it->pcb_flags) &&
1481 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1482 	    ((it->pcb_features) &&
1483 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1484 		/* endpoint flags or features don't match, so keep looking */
1485 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1486 			SCTP_INP_RUNLOCK(it->inp);
1487 			goto done_with_iterator;
1488 		}
1489 		tinp = it->inp;
1490 		it->inp = LIST_NEXT(it->inp, sctp_list);
1491 		it->stcb = NULL;
1492 		SCTP_INP_RUNLOCK(tinp);
1493 		if (it->inp == NULL) {
1494 			goto done_with_iterator;
1495 		}
1496 		SCTP_INP_RLOCK(it->inp);
1497 	}
1498 	/* now go through each assoc which is in the desired state */
1499 	if (it->done_current_ep == 0) {
1500 		if (it->function_inp != NULL)
1501 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1502 		it->done_current_ep = 1;
1503 	}
1504 	if (it->stcb == NULL) {
1505 		/* run the per instance function */
1506 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1507 	}
1508 	if ((inp_skip) || it->stcb == NULL) {
1509 		if (it->function_inp_end != NULL) {
1510 			inp_skip = (*it->function_inp_end) (it->inp,
1511 			    it->pointer,
1512 			    it->val);
1513 		}
1514 		SCTP_INP_RUNLOCK(it->inp);
1515 		goto no_stcb;
1516 	}
1517 	while (it->stcb) {
1518 		SCTP_TCB_LOCK(it->stcb);
1519 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1520 			/* not in the right state... keep looking */
1521 			SCTP_TCB_UNLOCK(it->stcb);
1522 			goto next_assoc;
1523 		}
1524 		/* see if we have limited out the iterator loop */
1525 		iteration_count++;
1526 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1527 			/* Pause to let others grab the lock */
1528 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1529 			SCTP_TCB_UNLOCK(it->stcb);
1530 			SCTP_INP_INCR_REF(it->inp);
1531 			SCTP_INP_RUNLOCK(it->inp);
1532 			SCTP_ITERATOR_UNLOCK();
1533 			SCTP_INP_INFO_RUNLOCK();
1534 			SCTP_INP_INFO_RLOCK();
1535 			SCTP_ITERATOR_LOCK();
1536 			if (sctp_it_ctl.iterator_flags) {
1537 				/* We won't be staying here */
1538 				SCTP_INP_DECR_REF(it->inp);
1539 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1540 				if (sctp_it_ctl.iterator_flags &
1541 				    SCTP_ITERATOR_STOP_CUR_IT) {
1542 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1543 					goto done_with_iterator;
1544 				}
1545 				if (sctp_it_ctl.iterator_flags &
1546 				    SCTP_ITERATOR_STOP_CUR_INP) {
1547 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1548 					goto no_stcb;
1549 				}
1550 				/* If we reach here huh? */
1551 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1552 				    sctp_it_ctl.iterator_flags);
1553 				sctp_it_ctl.iterator_flags = 0;
1554 			}
1555 			SCTP_INP_RLOCK(it->inp);
1556 			SCTP_INP_DECR_REF(it->inp);
1557 			SCTP_TCB_LOCK(it->stcb);
1558 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1559 			iteration_count = 0;
1560 		}
1561 		KASSERT(it->inp == it->stcb->sctp_ep,
1562 		    ("%s: stcb %p does not belong to inp %p, but inp %p",
1563 		    __func__, it->stcb, it->inp, it->stcb->sctp_ep));
1564 
1565 		/* run function on this one */
1566 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1567 
1568 		/*
1569 		 * we lie here, it really needs to have its own type but
1570 		 * first I must verify that this won't effect things :-0
1571 		 */
1572 		if (it->no_chunk_output == 0)
1573 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1574 
1575 		SCTP_TCB_UNLOCK(it->stcb);
1576 next_assoc:
1577 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1578 		if (it->stcb == NULL) {
1579 			/* Run last function */
1580 			if (it->function_inp_end != NULL) {
1581 				inp_skip = (*it->function_inp_end) (it->inp,
1582 				    it->pointer,
1583 				    it->val);
1584 			}
1585 		}
1586 	}
1587 	SCTP_INP_RUNLOCK(it->inp);
1588 no_stcb:
1589 	/* done with all assocs on this endpoint, move on to next endpoint */
1590 	it->done_current_ep = 0;
1591 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1592 		it->inp = NULL;
1593 	} else {
1594 		it->inp = LIST_NEXT(it->inp, sctp_list);
1595 	}
1596 	it->stcb = NULL;
1597 	if (it->inp == NULL) {
1598 		goto done_with_iterator;
1599 	}
1600 	goto select_a_new_ep;
1601 }
1602 
1603 void
1604 sctp_iterator_worker(void)
1605 {
1606 	struct sctp_iterator *it;
1607 
1608 	/* This function is called with the WQ lock in place */
1609 	sctp_it_ctl.iterator_running = 1;
1610 	while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) {
1611 		/* now lets work on this one */
1612 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1613 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1614 		CURVNET_SET(it->vn);
1615 		sctp_iterator_work(it);
1616 		CURVNET_RESTORE();
1617 		SCTP_IPI_ITERATOR_WQ_LOCK();
1618 		/* sa_ignore FREED_MEMORY */
1619 	}
1620 	sctp_it_ctl.iterator_running = 0;
1621 	return;
1622 }
1623 
1624 
1625 static void
1626 sctp_handle_addr_wq(void)
1627 {
1628 	/* deal with the ADDR wq from the rtsock calls */
1629 	struct sctp_laddr *wi, *nwi;
1630 	struct sctp_asconf_iterator *asc;
1631 
1632 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1633 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1634 	if (asc == NULL) {
1635 		/* Try later, no memory */
1636 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1637 		    (struct sctp_inpcb *)NULL,
1638 		    (struct sctp_tcb *)NULL,
1639 		    (struct sctp_nets *)NULL);
1640 		return;
1641 	}
1642 	LIST_INIT(&asc->list_of_work);
1643 	asc->cnt = 0;
1644 
1645 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1646 		LIST_REMOVE(wi, sctp_nxt_addr);
1647 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1648 		asc->cnt++;
1649 	}
1650 
1651 	if (asc->cnt == 0) {
1652 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1653 	} else {
1654 		int ret;
1655 
1656 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1657 		    sctp_asconf_iterator_stcb,
1658 		    NULL,	/* No ep end for boundall */
1659 		    SCTP_PCB_FLAGS_BOUNDALL,
1660 		    SCTP_PCB_ANY_FEATURES,
1661 		    SCTP_ASOC_ANY_STATE,
1662 		    (void *)asc, 0,
1663 		    sctp_asconf_iterator_end, NULL, 0);
1664 		if (ret) {
1665 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1666 			/*
1667 			 * Freeing if we are stopping or put back on the
1668 			 * addr_wq.
1669 			 */
1670 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1671 				sctp_asconf_iterator_end(asc, 0);
1672 			} else {
1673 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1674 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1675 				}
1676 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1677 			}
1678 		}
1679 	}
1680 }
1681 
1682 /*-
1683  * The following table shows which pointers for the inp, stcb, or net are
1684  * stored for each timer after it was started.
1685  *
1686  *|Name                         |Timer                        |inp |stcb|net |
1687  *|-----------------------------|-----------------------------|----|----|----|
1688  *|SCTP_TIMER_TYPE_SEND         |net->rxt_timer               |Yes |Yes |Yes |
1689  *|SCTP_TIMER_TYPE_INIT         |net->rxt_timer               |Yes |Yes |Yes |
1690  *|SCTP_TIMER_TYPE_RECV         |stcb->asoc.dack_timer        |Yes |Yes |No  |
1691  *|SCTP_TIMER_TYPE_SHUTDOWN     |net->rxt_timer               |Yes |Yes |Yes |
1692  *|SCTP_TIMER_TYPE_HEARTBEAT    |net->hb_timer                |Yes |Yes |Yes |
1693  *|SCTP_TIMER_TYPE_COOKIE       |net->rxt_timer               |Yes |Yes |Yes |
1694  *|SCTP_TIMER_TYPE_NEWCOOKIE    |inp->sctp_ep.signature_change|Yes |No  |No  |
1695  *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer              |Yes |Yes |Yes |
1696  *|SCTP_TIMER_TYPE_SHUTDOWNACK  |net->rxt_timer               |Yes |Yes |Yes |
1697  *|SCTP_TIMER_TYPE_ASCONF       |stcb->asoc.asconf_timer      |Yes |Yes |Yes |
1698  *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer  |Yes |Yes |No  |
1699  *|SCTP_TIMER_TYPE_AUTOCLOSE    |stcb->asoc.autoclose_timer   |Yes |Yes |No  |
1700  *|SCTP_TIMER_TYPE_STRRESET     |stcb->asoc.strreset_timer    |Yes |Yes |No  |
1701  *|SCTP_TIMER_TYPE_INPKILL      |inp->sctp_ep.signature_change|Yes |No  |No  |
1702  *|SCTP_TIMER_TYPE_ASOCKILL     |stcb->asoc.strreset_timer    |Yes |Yes |No  |
1703  *|SCTP_TIMER_TYPE_ADDR_WQ      |SCTP_BASE_INFO(addr_wq_timer)|No  |No  |No  |
1704  *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No  |
1705  */
1706 
1707 void
1708 sctp_timeout_handler(void *t)
1709 {
1710 	struct epoch_tracker et;
1711 	struct timeval tv;
1712 	struct sctp_inpcb *inp;
1713 	struct sctp_tcb *stcb;
1714 	struct sctp_nets *net;
1715 	struct sctp_timer *tmr;
1716 	struct mbuf *op_err;
1717 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1718 	struct socket *so;
1719 #endif
1720 	int did_output;
1721 	int type;
1722 	int i, secret;
1723 
1724 	tmr = (struct sctp_timer *)t;
1725 	inp = (struct sctp_inpcb *)tmr->ep;
1726 	stcb = (struct sctp_tcb *)tmr->tcb;
1727 	net = (struct sctp_nets *)tmr->net;
1728 	CURVNET_SET((struct vnet *)tmr->vnet);
1729 	did_output = 1;
1730 
1731 #ifdef SCTP_AUDITING_ENABLED
1732 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1733 	sctp_auditing(3, inp, stcb, net);
1734 #endif
1735 
1736 	/* sanity checks... */
1737 	KASSERT(tmr->self == tmr,
1738 	    ("sctp_timeout_handler: tmr->self corrupted"));
1739 	KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type),
1740 	    ("sctp_timeout_handler: invalid timer type %d", tmr->type));
1741 	type = tmr->type;
1742 	KASSERT(stcb == NULL || stcb->sctp_ep == inp,
1743 	    ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p",
1744 	    type, stcb, stcb->sctp_ep));
1745 	if (inp) {
1746 		SCTP_INP_INCR_REF(inp);
1747 	}
1748 	tmr->stopped_from = 0xa001;
1749 	if (stcb) {
1750 		atomic_add_int(&stcb->asoc.refcnt, 1);
1751 		if (stcb->asoc.state == 0) {
1752 			atomic_add_int(&stcb->asoc.refcnt, -1);
1753 			if (inp) {
1754 				SCTP_INP_DECR_REF(inp);
1755 			}
1756 			SCTPDBG(SCTP_DEBUG_TIMER2,
1757 			    "Timer type %d handler exiting due to CLOSED association.\n",
1758 			    type);
1759 			CURVNET_RESTORE();
1760 			return;
1761 		}
1762 	}
1763 	tmr->stopped_from = 0xa002;
1764 	SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type);
1765 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1766 		if (inp) {
1767 			SCTP_INP_DECR_REF(inp);
1768 		}
1769 		if (stcb) {
1770 			atomic_add_int(&stcb->asoc.refcnt, -1);
1771 		}
1772 		SCTPDBG(SCTP_DEBUG_TIMER2,
1773 		    "Timer type %d handler exiting due to not being active.\n",
1774 		    type);
1775 		CURVNET_RESTORE();
1776 		return;
1777 	}
1778 
1779 	tmr->stopped_from = 0xa003;
1780 	if (stcb) {
1781 		SCTP_TCB_LOCK(stcb);
1782 		atomic_add_int(&stcb->asoc.refcnt, -1);
1783 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1784 		    ((stcb->asoc.state == 0) ||
1785 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1786 			SCTP_TCB_UNLOCK(stcb);
1787 			if (inp) {
1788 				SCTP_INP_DECR_REF(inp);
1789 			}
1790 			SCTPDBG(SCTP_DEBUG_TIMER2,
1791 			    "Timer type %d handler exiting due to CLOSED association.\n",
1792 			    type);
1793 			CURVNET_RESTORE();
1794 			return;
1795 		}
1796 	} else if (inp != NULL) {
1797 		SCTP_INP_WLOCK(inp);
1798 	} else {
1799 		SCTP_WQ_ADDR_LOCK();
1800 	}
1801 
1802 	/* Record in stopped_from which timeout occurred. */
1803 	tmr->stopped_from = type;
1804 	NET_EPOCH_ENTER(et);
1805 	/* mark as being serviced now */
1806 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1807 		/*
1808 		 * Callout has been rescheduled.
1809 		 */
1810 		goto get_out;
1811 	}
1812 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1813 		/*
1814 		 * Not active, so no action.
1815 		 */
1816 		goto get_out;
1817 	}
1818 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1819 
1820 	/* call the handler for the appropriate timer type */
1821 	switch (type) {
1822 	case SCTP_TIMER_TYPE_SEND:
1823 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1824 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1825 		    type, inp, stcb, net));
1826 		SCTP_STAT_INCR(sctps_timodata);
1827 		stcb->asoc.timodata++;
1828 		stcb->asoc.num_send_timers_up--;
1829 		if (stcb->asoc.num_send_timers_up < 0) {
1830 			stcb->asoc.num_send_timers_up = 0;
1831 		}
1832 		SCTP_TCB_LOCK_ASSERT(stcb);
1833 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1834 			/* no need to unlock on tcb its gone */
1835 
1836 			goto out_decr;
1837 		}
1838 		SCTP_TCB_LOCK_ASSERT(stcb);
1839 #ifdef SCTP_AUDITING_ENABLED
1840 		sctp_auditing(4, inp, stcb, net);
1841 #endif
1842 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1843 		if ((stcb->asoc.num_send_timers_up == 0) &&
1844 		    (stcb->asoc.sent_queue_cnt > 0)) {
1845 			struct sctp_tmit_chunk *chk;
1846 
1847 			/*
1848 			 * Safeguard. If there on some on the sent queue
1849 			 * somewhere but no timers running something is
1850 			 * wrong... so we start a timer on the first chunk
1851 			 * on the send queue on whatever net it is sent to.
1852 			 */
1853 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1854 				if (chk->whoTo != NULL) {
1855 					break;
1856 				}
1857 			}
1858 			if (chk != NULL) {
1859 				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
1860 			}
1861 		}
1862 		break;
1863 	case SCTP_TIMER_TYPE_INIT:
1864 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1865 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1866 		    type, inp, stcb, net));
1867 		SCTP_STAT_INCR(sctps_timoinit);
1868 		stcb->asoc.timoinit++;
1869 		if (sctp_t1init_timer(inp, stcb, net)) {
1870 			/* no need to unlock on tcb its gone */
1871 			goto out_decr;
1872 		}
1873 		/* We do output but not here */
1874 		did_output = 0;
1875 		break;
1876 	case SCTP_TIMER_TYPE_RECV:
1877 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
1878 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1879 		    type, inp, stcb, net));
1880 		SCTP_STAT_INCR(sctps_timosack);
1881 		stcb->asoc.timosack++;
1882 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1883 #ifdef SCTP_AUDITING_ENABLED
1884 		sctp_auditing(4, inp, stcb, NULL);
1885 #endif
1886 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1887 		break;
1888 	case SCTP_TIMER_TYPE_SHUTDOWN:
1889 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1890 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1891 		    type, inp, stcb, net));
1892 		SCTP_STAT_INCR(sctps_timoshutdown);
1893 		stcb->asoc.timoshutdown++;
1894 		if (sctp_shutdown_timer(inp, stcb, net)) {
1895 			/* no need to unlock on tcb its gone */
1896 			goto out_decr;
1897 		}
1898 #ifdef SCTP_AUDITING_ENABLED
1899 		sctp_auditing(4, inp, stcb, net);
1900 #endif
1901 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1902 		break;
1903 	case SCTP_TIMER_TYPE_HEARTBEAT:
1904 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1905 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1906 		    type, inp, stcb, net));
1907 		SCTP_STAT_INCR(sctps_timoheartbeat);
1908 		stcb->asoc.timoheartbeat++;
1909 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1910 			/* no need to unlock on tcb its gone */
1911 			goto out_decr;
1912 		}
1913 #ifdef SCTP_AUDITING_ENABLED
1914 		sctp_auditing(4, inp, stcb, net);
1915 #endif
1916 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1917 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1918 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1919 		}
1920 		break;
1921 	case SCTP_TIMER_TYPE_COOKIE:
1922 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1923 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1924 		    type, inp, stcb, net));
1925 		SCTP_STAT_INCR(sctps_timocookie);
1926 		stcb->asoc.timocookie++;
1927 		if (sctp_cookie_timer(inp, stcb, net)) {
1928 			/* no need to unlock on tcb its gone */
1929 			goto out_decr;
1930 		}
1931 #ifdef SCTP_AUDITING_ENABLED
1932 		sctp_auditing(4, inp, stcb, net);
1933 #endif
1934 		/*
1935 		 * We consider T3 and Cookie timer pretty much the same with
1936 		 * respect to where from in chunk_output.
1937 		 */
1938 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1939 		break;
1940 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1941 		KASSERT(inp != NULL && stcb == NULL && net == NULL,
1942 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1943 		    type, inp, stcb, net));
1944 		SCTP_STAT_INCR(sctps_timosecret);
1945 		(void)SCTP_GETTIME_TIMEVAL(&tv);
1946 		inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1947 		inp->sctp_ep.last_secret_number =
1948 		    inp->sctp_ep.current_secret_number;
1949 		inp->sctp_ep.current_secret_number++;
1950 		if (inp->sctp_ep.current_secret_number >=
1951 		    SCTP_HOW_MANY_SECRETS) {
1952 			inp->sctp_ep.current_secret_number = 0;
1953 		}
1954 		secret = (int)inp->sctp_ep.current_secret_number;
1955 		for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1956 			inp->sctp_ep.secret_key[secret][i] =
1957 			    sctp_select_initial_TSN(&inp->sctp_ep);
1958 		}
1959 		sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL);
1960 		did_output = 0;
1961 		break;
1962 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1963 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1964 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1965 		    type, inp, stcb, net));
1966 		SCTP_STAT_INCR(sctps_timopathmtu);
1967 		sctp_pathmtu_timer(inp, stcb, net);
1968 		did_output = 0;
1969 		break;
1970 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1971 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1972 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1973 		    type, inp, stcb, net));
1974 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1975 			/* no need to unlock on tcb its gone */
1976 			goto out_decr;
1977 		}
1978 		SCTP_STAT_INCR(sctps_timoshutdownack);
1979 		stcb->asoc.timoshutdownack++;
1980 #ifdef SCTP_AUDITING_ENABLED
1981 		sctp_auditing(4, inp, stcb, net);
1982 #endif
1983 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1984 		break;
1985 	case SCTP_TIMER_TYPE_ASCONF:
1986 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1987 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1988 		    type, inp, stcb, net));
1989 		SCTP_STAT_INCR(sctps_timoasconf);
1990 		if (sctp_asconf_timer(inp, stcb, net)) {
1991 			/* no need to unlock on tcb its gone */
1992 			goto out_decr;
1993 		}
1994 #ifdef SCTP_AUDITING_ENABLED
1995 		sctp_auditing(4, inp, stcb, net);
1996 #endif
1997 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1998 		break;
1999 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2000 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
2001 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2002 		    type, inp, stcb, net));
2003 		SCTP_STAT_INCR(sctps_timoshutdownguard);
2004 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
2005 		    "Shutdown guard timer expired");
2006 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2007 		/* no need to unlock on tcb its gone */
2008 		goto out_decr;
2009 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2010 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
2011 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2012 		    type, inp, stcb, net));
2013 		SCTP_STAT_INCR(sctps_timoautoclose);
2014 		sctp_autoclose_timer(inp, stcb);
2015 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
2016 		did_output = 0;
2017 		break;
2018 	case SCTP_TIMER_TYPE_STRRESET:
2019 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
2020 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2021 		    type, inp, stcb, net));
2022 		SCTP_STAT_INCR(sctps_timostrmrst);
2023 		if (sctp_strreset_timer(inp, stcb)) {
2024 			/* no need to unlock on tcb its gone */
2025 			goto out_decr;
2026 		}
2027 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
2028 		break;
2029 	case SCTP_TIMER_TYPE_INPKILL:
2030 		KASSERT(inp != NULL && stcb == NULL && net == NULL,
2031 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2032 		    type, inp, stcb, net));
2033 		SCTP_STAT_INCR(sctps_timoinpkill);
2034 		/*
2035 		 * special case, take away our increment since WE are the
2036 		 * killer
2037 		 */
2038 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
2039 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
2040 		SCTP_INP_DECR_REF(inp);
2041 		SCTP_INP_WUNLOCK(inp);
2042 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
2043 		    SCTP_CALLED_FROM_INPKILL_TIMER);
2044 		inp = NULL;
2045 		goto out_no_decr;
2046 	case SCTP_TIMER_TYPE_ASOCKILL:
2047 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
2048 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2049 		    type, inp, stcb, net));
2050 		SCTP_STAT_INCR(sctps_timoassockill);
2051 		/* Can we free it yet? */
2052 		SCTP_INP_DECR_REF(inp);
2053 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
2054 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
2055 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2056 		so = SCTP_INP_SO(inp);
2057 		atomic_add_int(&stcb->asoc.refcnt, 1);
2058 		SCTP_TCB_UNLOCK(stcb);
2059 		SCTP_SOCKET_LOCK(so, 1);
2060 		SCTP_TCB_LOCK(stcb);
2061 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2062 #endif
2063 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2064 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
2065 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2066 		SCTP_SOCKET_UNLOCK(so, 1);
2067 #endif
2068 		/*
2069 		 * free asoc, always unlocks (or destroy's) so prevent
2070 		 * duplicate unlock or unlock of a free mtx :-0
2071 		 */
2072 		stcb = NULL;
2073 		goto out_no_decr;
2074 	case SCTP_TIMER_TYPE_ADDR_WQ:
2075 		KASSERT(inp == NULL && stcb == NULL && net == NULL,
2076 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2077 		    type, inp, stcb, net));
2078 		sctp_handle_addr_wq();
2079 		break;
2080 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2081 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
2082 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2083 		    type, inp, stcb, net));
2084 		SCTP_STAT_INCR(sctps_timodelprim);
2085 		sctp_delete_prim_timer(inp, stcb);
2086 		break;
2087 	default:
2088 #ifdef INVARIANTS
2089 		panic("Unknown timer type %d", type);
2090 #else
2091 		goto get_out;
2092 #endif
2093 	}
2094 #ifdef SCTP_AUDITING_ENABLED
2095 	sctp_audit_log(0xF1, (uint8_t)type);
2096 	if (inp)
2097 		sctp_auditing(5, inp, stcb, net);
2098 #endif
2099 	if ((did_output) && stcb) {
2100 		/*
2101 		 * Now we need to clean up the control chunk chain if an
2102 		 * ECNE is on it. It must be marked as UNSENT again so next
2103 		 * call will continue to send it until such time that we get
2104 		 * a CWR, to remove it. It is, however, less likely that we
2105 		 * will find a ecn echo on the chain though.
2106 		 */
2107 		sctp_fix_ecn_echo(&stcb->asoc);
2108 	}
2109 get_out:
2110 	if (stcb) {
2111 		SCTP_TCB_UNLOCK(stcb);
2112 	} else if (inp != NULL) {
2113 		SCTP_INP_WUNLOCK(inp);
2114 	} else {
2115 		SCTP_WQ_ADDR_UNLOCK();
2116 	}
2117 
2118 out_decr:
2119 	if (inp) {
2120 		SCTP_INP_DECR_REF(inp);
2121 	}
2122 
2123 out_no_decr:
2124 	SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type);
2125 	CURVNET_RESTORE();
2126 	NET_EPOCH_EXIT(et);
2127 }
2128 
2129 /*-
2130  * The following table shows which parameters must be provided
2131  * when calling sctp_timer_start(). For parameters not being
2132  * provided, NULL must be used.
2133  *
2134  * |Name                         |inp |stcb|net |
2135  * |-----------------------------|----|----|----|
2136  * |SCTP_TIMER_TYPE_SEND         |Yes |Yes |Yes |
2137  * |SCTP_TIMER_TYPE_INIT         |Yes |Yes |Yes |
2138  * |SCTP_TIMER_TYPE_RECV         |Yes |Yes |No  |
2139  * |SCTP_TIMER_TYPE_SHUTDOWN     |Yes |Yes |Yes |
2140  * |SCTP_TIMER_TYPE_HEARTBEAT    |Yes |Yes |Yes |
2141  * |SCTP_TIMER_TYPE_COOKIE       |Yes |Yes |Yes |
2142  * |SCTP_TIMER_TYPE_NEWCOOKIE    |Yes |No  |No  |
2143  * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes |
2144  * |SCTP_TIMER_TYPE_SHUTDOWNACK  |Yes |Yes |Yes |
2145  * |SCTP_TIMER_TYPE_ASCONF       |Yes |Yes |Yes |
2146  * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No  |
2147  * |SCTP_TIMER_TYPE_AUTOCLOSE    |Yes |Yes |No  |
2148  * |SCTP_TIMER_TYPE_STRRESET     |Yes |Yes |Yes |
2149  * |SCTP_TIMER_TYPE_INPKILL      |Yes |No  |No  |
2150  * |SCTP_TIMER_TYPE_ASOCKILL     |Yes |Yes |No  |
2151  * |SCTP_TIMER_TYPE_ADDR_WQ      |No  |No  |No  |
2152  * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No  |
2153  *
2154  */
2155 
2156 void
2157 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2158     struct sctp_nets *net)
2159 {
2160 	struct sctp_timer *tmr;
2161 	uint32_t to_ticks;
2162 	uint32_t rndval, jitter;
2163 
2164 	KASSERT(stcb == NULL || stcb->sctp_ep == inp,
2165 	    ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p",
2166 	    t_type, stcb, stcb->sctp_ep));
2167 	tmr = NULL;
2168 	to_ticks = 0;
2169 	if (stcb != NULL) {
2170 		SCTP_TCB_LOCK_ASSERT(stcb);
2171 	} else if (inp != NULL) {
2172 		SCTP_INP_WLOCK_ASSERT(inp);
2173 	} else {
2174 		SCTP_WQ_ADDR_LOCK_ASSERT();
2175 	}
2176 	if (stcb != NULL) {
2177 		/*
2178 		 * Don't restart timer on association that's about to be
2179 		 * killed.
2180 		 */
2181 		if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) &&
2182 		    (t_type != SCTP_TIMER_TYPE_ASOCKILL)) {
2183 			SCTPDBG(SCTP_DEBUG_TIMER2,
2184 			    "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n",
2185 			    t_type, inp, stcb, net);
2186 			return;
2187 		}
2188 		/* Don't restart timer on net that's been removed. */
2189 		if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) {
2190 			SCTPDBG(SCTP_DEBUG_TIMER2,
2191 			    "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n",
2192 			    t_type, inp, stcb, net);
2193 			return;
2194 		}
2195 	}
2196 	switch (t_type) {
2197 	case SCTP_TIMER_TYPE_SEND:
2198 		/* Here we use the RTO timer. */
2199 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2200 #ifdef INVARIANTS
2201 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2202 			    t_type, inp, stcb, net);
2203 #else
2204 			return;
2205 #endif
2206 		}
2207 		tmr = &net->rxt_timer;
2208 		if (net->RTO == 0) {
2209 			to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2210 		} else {
2211 			to_ticks = sctp_msecs_to_ticks(net->RTO);
2212 		}
2213 		break;
2214 	case SCTP_TIMER_TYPE_INIT:
2215 		/*
2216 		 * Here we use the INIT timer default usually about 1
2217 		 * second.
2218 		 */
2219 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2220 #ifdef INVARIANTS
2221 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2222 			    t_type, inp, stcb, net);
2223 #else
2224 			return;
2225 #endif
2226 		}
2227 		tmr = &net->rxt_timer;
2228 		if (net->RTO == 0) {
2229 			to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2230 		} else {
2231 			to_ticks = sctp_msecs_to_ticks(net->RTO);
2232 		}
2233 		break;
2234 	case SCTP_TIMER_TYPE_RECV:
2235 		/*
2236 		 * Here we use the Delayed-Ack timer value from the inp,
2237 		 * ususually about 200ms.
2238 		 */
2239 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2240 #ifdef INVARIANTS
2241 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2242 			    t_type, inp, stcb, net);
2243 #else
2244 			return;
2245 #endif
2246 		}
2247 		tmr = &stcb->asoc.dack_timer;
2248 		to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack);
2249 		break;
2250 	case SCTP_TIMER_TYPE_SHUTDOWN:
2251 		/* Here we use the RTO of the destination. */
2252 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2253 #ifdef INVARIANTS
2254 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2255 			    t_type, inp, stcb, net);
2256 #else
2257 			return;
2258 #endif
2259 		}
2260 		tmr = &net->rxt_timer;
2261 		if (net->RTO == 0) {
2262 			to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2263 		} else {
2264 			to_ticks = sctp_msecs_to_ticks(net->RTO);
2265 		}
2266 		break;
2267 	case SCTP_TIMER_TYPE_HEARTBEAT:
2268 		/*
2269 		 * The net is used here so that we can add in the RTO. Even
2270 		 * though we use a different timer. We also add the HB timer
2271 		 * PLUS a random jitter.
2272 		 */
2273 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2274 #ifdef INVARIANTS
2275 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2276 			    t_type, inp, stcb, net);
2277 #else
2278 			return;
2279 #endif
2280 		}
2281 		if ((net->dest_state & SCTP_ADDR_NOHB) &&
2282 		    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2283 			SCTPDBG(SCTP_DEBUG_TIMER2,
2284 			    "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n",
2285 			    t_type, inp, stcb, net);
2286 			return;
2287 		}
2288 		tmr = &net->hb_timer;
2289 		if (net->RTO == 0) {
2290 			to_ticks = stcb->asoc.initial_rto;
2291 		} else {
2292 			to_ticks = net->RTO;
2293 		}
2294 		rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2295 		jitter = rndval % to_ticks;
2296 		if (jitter >= (to_ticks >> 1)) {
2297 			to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2298 		} else {
2299 			to_ticks = to_ticks - jitter;
2300 		}
2301 		if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2302 		    !(net->dest_state & SCTP_ADDR_PF)) {
2303 			to_ticks += net->heart_beat_delay;
2304 		}
2305 		/*
2306 		 * Now we must convert the to_ticks that are now in ms to
2307 		 * ticks.
2308 		 */
2309 		to_ticks = sctp_msecs_to_ticks(to_ticks);
2310 		break;
2311 	case SCTP_TIMER_TYPE_COOKIE:
2312 		/*
2313 		 * Here we can use the RTO timer from the network since one
2314 		 * RTT was complete. If a retransmission happened then we
2315 		 * will be using the RTO initial value.
2316 		 */
2317 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2318 #ifdef INVARIANTS
2319 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2320 			    t_type, inp, stcb, net);
2321 #else
2322 			return;
2323 #endif
2324 		}
2325 		tmr = &net->rxt_timer;
2326 		if (net->RTO == 0) {
2327 			to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2328 		} else {
2329 			to_ticks = sctp_msecs_to_ticks(net->RTO);
2330 		}
2331 		break;
2332 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2333 		/*
2334 		 * Nothing needed but the endpoint here ususually about 60
2335 		 * minutes.
2336 		 */
2337 		if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2338 #ifdef INVARIANTS
2339 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2340 			    t_type, inp, stcb, net);
2341 #else
2342 			return;
2343 #endif
2344 		}
2345 		tmr = &inp->sctp_ep.signature_change;
2346 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2347 		break;
2348 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2349 		/*
2350 		 * Here we use the value found in the EP for PMTUD,
2351 		 * ususually about 10 minutes.
2352 		 */
2353 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2354 #ifdef INVARIANTS
2355 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2356 			    t_type, inp, stcb, net);
2357 #else
2358 			return;
2359 #endif
2360 		}
2361 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2362 			SCTPDBG(SCTP_DEBUG_TIMER2,
2363 			    "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n",
2364 			    t_type, inp, stcb, net);
2365 			return;
2366 		}
2367 		tmr = &net->pmtu_timer;
2368 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2369 		break;
2370 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2371 		/* Here we use the RTO of the destination. */
2372 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2373 #ifdef INVARIANTS
2374 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2375 			    t_type, inp, stcb, net);
2376 #else
2377 			return;
2378 #endif
2379 		}
2380 		tmr = &net->rxt_timer;
2381 		if (net->RTO == 0) {
2382 			to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2383 		} else {
2384 			to_ticks = sctp_msecs_to_ticks(net->RTO);
2385 		}
2386 		break;
2387 	case SCTP_TIMER_TYPE_ASCONF:
2388 		/*
2389 		 * Here the timer comes from the stcb but its value is from
2390 		 * the net's RTO.
2391 		 */
2392 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2393 #ifdef INVARIANTS
2394 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2395 			    t_type, inp, stcb, net);
2396 #else
2397 			return;
2398 #endif
2399 		}
2400 		tmr = &stcb->asoc.asconf_timer;
2401 		if (net->RTO == 0) {
2402 			to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2403 		} else {
2404 			to_ticks = sctp_msecs_to_ticks(net->RTO);
2405 		}
2406 		break;
2407 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2408 		/*
2409 		 * Here we use the endpoints shutdown guard timer usually
2410 		 * about 3 minutes.
2411 		 */
2412 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2413 #ifdef INVARIANTS
2414 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2415 			    t_type, inp, stcb, net);
2416 #else
2417 			return;
2418 #endif
2419 		}
2420 		tmr = &stcb->asoc.shut_guard_timer;
2421 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2422 			if (stcb->asoc.maxrto < UINT32_MAX / 5) {
2423 				to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto);
2424 			} else {
2425 				to_ticks = sctp_msecs_to_ticks(UINT32_MAX);
2426 			}
2427 		} else {
2428 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2429 		}
2430 		break;
2431 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2432 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2433 #ifdef INVARIANTS
2434 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2435 			    t_type, inp, stcb, net);
2436 #else
2437 			return;
2438 #endif
2439 		}
2440 		tmr = &stcb->asoc.autoclose_timer;
2441 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2442 		break;
2443 	case SCTP_TIMER_TYPE_STRRESET:
2444 		/*
2445 		 * Here the timer comes from the stcb but its value is from
2446 		 * the net's RTO.
2447 		 */
2448 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2449 #ifdef INVARIANTS
2450 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2451 			    t_type, inp, stcb, net);
2452 #else
2453 			return;
2454 #endif
2455 		}
2456 		tmr = &stcb->asoc.strreset_timer;
2457 		if (net->RTO == 0) {
2458 			to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2459 		} else {
2460 			to_ticks = sctp_msecs_to_ticks(net->RTO);
2461 		}
2462 		break;
2463 	case SCTP_TIMER_TYPE_INPKILL:
2464 		/*
2465 		 * The inp is setup to die. We re-use the signature_chage
2466 		 * timer since that has stopped and we are in the GONE
2467 		 * state.
2468 		 */
2469 		if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2470 #ifdef INVARIANTS
2471 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2472 			    t_type, inp, stcb, net);
2473 #else
2474 			return;
2475 #endif
2476 		}
2477 		tmr = &inp->sctp_ep.signature_change;
2478 		to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT);
2479 		break;
2480 	case SCTP_TIMER_TYPE_ASOCKILL:
2481 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2482 #ifdef INVARIANTS
2483 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2484 			    t_type, inp, stcb, net);
2485 #else
2486 			return;
2487 #endif
2488 		}
2489 		tmr = &stcb->asoc.strreset_timer;
2490 		to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT);
2491 		break;
2492 	case SCTP_TIMER_TYPE_ADDR_WQ:
2493 		if ((inp != NULL) || (stcb != NULL) || (net != NULL)) {
2494 #ifdef INVARIANTS
2495 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2496 			    t_type, inp, stcb, net);
2497 #else
2498 			return;
2499 #endif
2500 		}
2501 		/* Only 1 tick away :-) */
2502 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2503 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
2504 		break;
2505 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2506 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2507 #ifdef INVARIANTS
2508 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2509 			    t_type, inp, stcb, net);
2510 #else
2511 			return;
2512 #endif
2513 		}
2514 		tmr = &stcb->asoc.delete_prim_timer;
2515 		to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2516 		break;
2517 	default:
2518 #ifdef INVARIANTS
2519 		panic("Unknown timer type %d", t_type);
2520 #else
2521 		return;
2522 #endif
2523 	}
2524 	KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type));
2525 	KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type));
2526 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2527 		/*
2528 		 * We do NOT allow you to have it already running. If it is,
2529 		 * we leave the current one up unchanged.
2530 		 */
2531 		SCTPDBG(SCTP_DEBUG_TIMER2,
2532 		    "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n",
2533 		    t_type, inp, stcb, net);
2534 		return;
2535 	}
2536 	/* At this point we can proceed. */
2537 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2538 		stcb->asoc.num_send_timers_up++;
2539 	}
2540 	tmr->stopped_from = 0;
2541 	tmr->type = t_type;
2542 	tmr->ep = (void *)inp;
2543 	tmr->tcb = (void *)stcb;
2544 	if (t_type == SCTP_TIMER_TYPE_STRRESET) {
2545 		tmr->net = NULL;
2546 	} else {
2547 		tmr->net = (void *)net;
2548 	}
2549 	tmr->self = (void *)tmr;
2550 	tmr->vnet = (void *)curvnet;
2551 	tmr->ticks = sctp_get_tick_count();
2552 	if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) {
2553 		SCTPDBG(SCTP_DEBUG_TIMER2,
2554 		    "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n",
2555 		    t_type, to_ticks, inp, stcb, net);
2556 	} else {
2557 		/*
2558 		 * This should not happen, since we checked for pending
2559 		 * above.
2560 		 */
2561 		SCTPDBG(SCTP_DEBUG_TIMER2,
2562 		    "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n",
2563 		    t_type, to_ticks, inp, stcb, net);
2564 	}
2565 	return;
2566 }
2567 
2568 /*-
2569  * The following table shows which parameters must be provided
2570  * when calling sctp_timer_stop(). For parameters not being
2571  * provided, NULL must be used.
2572  *
2573  * |Name                         |inp |stcb|net |
2574  * |-----------------------------|----|----|----|
2575  * |SCTP_TIMER_TYPE_SEND         |Yes |Yes |Yes |
2576  * |SCTP_TIMER_TYPE_INIT         |Yes |Yes |Yes |
2577  * |SCTP_TIMER_TYPE_RECV         |Yes |Yes |No  |
2578  * |SCTP_TIMER_TYPE_SHUTDOWN     |Yes |Yes |Yes |
2579  * |SCTP_TIMER_TYPE_HEARTBEAT    |Yes |Yes |Yes |
2580  * |SCTP_TIMER_TYPE_COOKIE       |Yes |Yes |Yes |
2581  * |SCTP_TIMER_TYPE_NEWCOOKIE    |Yes |No  |No  |
2582  * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes |
2583  * |SCTP_TIMER_TYPE_SHUTDOWNACK  |Yes |Yes |Yes |
2584  * |SCTP_TIMER_TYPE_ASCONF       |Yes |Yes |No  |
2585  * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No  |
2586  * |SCTP_TIMER_TYPE_AUTOCLOSE    |Yes |Yes |No  |
2587  * |SCTP_TIMER_TYPE_STRRESET     |Yes |Yes |No  |
2588  * |SCTP_TIMER_TYPE_INPKILL      |Yes |No  |No  |
2589  * |SCTP_TIMER_TYPE_ASOCKILL     |Yes |Yes |No  |
2590  * |SCTP_TIMER_TYPE_ADDR_WQ      |No  |No  |No  |
2591  * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No  |
2592  *
2593  */
2594 
2595 void
2596 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2597     struct sctp_nets *net, uint32_t from)
2598 {
2599 	struct sctp_timer *tmr;
2600 
2601 	KASSERT(stcb == NULL || stcb->sctp_ep == inp,
2602 	    ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p",
2603 	    t_type, stcb, stcb->sctp_ep));
2604 	if (stcb != NULL) {
2605 		SCTP_TCB_LOCK_ASSERT(stcb);
2606 	} else if (inp != NULL) {
2607 		SCTP_INP_WLOCK_ASSERT(inp);
2608 	} else {
2609 		SCTP_WQ_ADDR_LOCK_ASSERT();
2610 	}
2611 	tmr = NULL;
2612 	switch (t_type) {
2613 	case SCTP_TIMER_TYPE_SEND:
2614 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2615 #ifdef INVARIANTS
2616 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2617 			    t_type, inp, stcb, net);
2618 #else
2619 			return;
2620 #endif
2621 		}
2622 		tmr = &net->rxt_timer;
2623 		break;
2624 	case SCTP_TIMER_TYPE_INIT:
2625 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2626 #ifdef INVARIANTS
2627 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2628 			    t_type, inp, stcb, net);
2629 #else
2630 			return;
2631 #endif
2632 		}
2633 		tmr = &net->rxt_timer;
2634 		break;
2635 	case SCTP_TIMER_TYPE_RECV:
2636 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2637 #ifdef INVARIANTS
2638 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2639 			    t_type, inp, stcb, net);
2640 #else
2641 			return;
2642 #endif
2643 		}
2644 		tmr = &stcb->asoc.dack_timer;
2645 		break;
2646 	case SCTP_TIMER_TYPE_SHUTDOWN:
2647 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2648 #ifdef INVARIANTS
2649 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2650 			    t_type, inp, stcb, net);
2651 #else
2652 			return;
2653 #endif
2654 		}
2655 		tmr = &net->rxt_timer;
2656 		break;
2657 	case SCTP_TIMER_TYPE_HEARTBEAT:
2658 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2659 #ifdef INVARIANTS
2660 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2661 			    t_type, inp, stcb, net);
2662 #else
2663 			return;
2664 #endif
2665 		}
2666 		tmr = &net->hb_timer;
2667 		break;
2668 	case SCTP_TIMER_TYPE_COOKIE:
2669 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2670 #ifdef INVARIANTS
2671 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2672 			    t_type, inp, stcb, net);
2673 #else
2674 			return;
2675 #endif
2676 		}
2677 		tmr = &net->rxt_timer;
2678 		break;
2679 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2680 		if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2681 #ifdef INVARIANTS
2682 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2683 			    t_type, inp, stcb, net);
2684 #else
2685 			return;
2686 #endif
2687 		}
2688 		tmr = &inp->sctp_ep.signature_change;
2689 		break;
2690 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2691 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2692 #ifdef INVARIANTS
2693 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2694 			    t_type, inp, stcb, net);
2695 #else
2696 			return;
2697 #endif
2698 		}
2699 		tmr = &net->pmtu_timer;
2700 		break;
2701 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2702 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2703 #ifdef INVARIANTS
2704 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2705 			    t_type, inp, stcb, net);
2706 #else
2707 			return;
2708 #endif
2709 		}
2710 		tmr = &net->rxt_timer;
2711 		break;
2712 	case SCTP_TIMER_TYPE_ASCONF:
2713 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2714 #ifdef INVARIANTS
2715 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2716 			    t_type, inp, stcb, net);
2717 #else
2718 			return;
2719 #endif
2720 		}
2721 		tmr = &stcb->asoc.asconf_timer;
2722 		break;
2723 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2724 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2725 #ifdef INVARIANTS
2726 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2727 			    t_type, inp, stcb, net);
2728 #else
2729 			return;
2730 #endif
2731 		}
2732 		tmr = &stcb->asoc.shut_guard_timer;
2733 		break;
2734 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2735 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2736 #ifdef INVARIANTS
2737 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2738 			    t_type, inp, stcb, net);
2739 #else
2740 			return;
2741 #endif
2742 		}
2743 		tmr = &stcb->asoc.autoclose_timer;
2744 		break;
2745 	case SCTP_TIMER_TYPE_STRRESET:
2746 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2747 #ifdef INVARIANTS
2748 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2749 			    t_type, inp, stcb, net);
2750 #else
2751 			return;
2752 #endif
2753 		}
2754 		tmr = &stcb->asoc.strreset_timer;
2755 		break;
2756 	case SCTP_TIMER_TYPE_INPKILL:
2757 		/*
2758 		 * The inp is setup to die. We re-use the signature_chage
2759 		 * timer since that has stopped and we are in the GONE
2760 		 * state.
2761 		 */
2762 		if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2763 #ifdef INVARIANTS
2764 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2765 			    t_type, inp, stcb, net);
2766 #else
2767 			return;
2768 #endif
2769 		}
2770 		tmr = &inp->sctp_ep.signature_change;
2771 		break;
2772 	case SCTP_TIMER_TYPE_ASOCKILL:
2773 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2774 #ifdef INVARIANTS
2775 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2776 			    t_type, inp, stcb, net);
2777 #else
2778 			return;
2779 #endif
2780 		}
2781 		tmr = &stcb->asoc.strreset_timer;
2782 		break;
2783 	case SCTP_TIMER_TYPE_ADDR_WQ:
2784 		if ((inp != NULL) || (stcb != NULL) || (net != NULL)) {
2785 #ifdef INVARIANTS
2786 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2787 			    t_type, inp, stcb, net);
2788 #else
2789 			return;
2790 #endif
2791 		}
2792 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2793 		break;
2794 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2795 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2796 #ifdef INVARIANTS
2797 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2798 			    t_type, inp, stcb, net);
2799 #else
2800 			return;
2801 #endif
2802 		}
2803 		tmr = &stcb->asoc.delete_prim_timer;
2804 		break;
2805 	default:
2806 #ifdef INVARIANTS
2807 		panic("Unknown timer type %d", t_type);
2808 #else
2809 		return;
2810 #endif
2811 	}
2812 	KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type));
2813 	if ((tmr->type != SCTP_TIMER_TYPE_NONE) &&
2814 	    (tmr->type != t_type)) {
2815 		/*
2816 		 * Ok we have a timer that is under joint use. Cookie timer
2817 		 * per chance with the SEND timer. We therefore are NOT
2818 		 * running the timer that the caller wants stopped.  So just
2819 		 * return.
2820 		 */
2821 		SCTPDBG(SCTP_DEBUG_TIMER2,
2822 		    "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n",
2823 		    t_type, inp, stcb, net);
2824 		return;
2825 	}
2826 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2827 		stcb->asoc.num_send_timers_up--;
2828 		if (stcb->asoc.num_send_timers_up < 0) {
2829 			stcb->asoc.num_send_timers_up = 0;
2830 		}
2831 	}
2832 	tmr->self = NULL;
2833 	tmr->stopped_from = from;
2834 	if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) {
2835 		KASSERT(tmr->ep == inp,
2836 		    ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p",
2837 		    t_type, inp, tmr->ep));
2838 		KASSERT(tmr->tcb == stcb,
2839 		    ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p",
2840 		    t_type, stcb, tmr->tcb));
2841 		KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) ||
2842 		    ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)),
2843 		    ("sctp_timer_stop of type %d: net = %p, tmr->net = %p",
2844 		    t_type, net, tmr->net));
2845 		SCTPDBG(SCTP_DEBUG_TIMER2,
2846 		    "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n",
2847 		    t_type, inp, stcb, net);
2848 		tmr->ep = NULL;
2849 		tmr->tcb = NULL;
2850 		tmr->net = NULL;
2851 	} else {
2852 		SCTPDBG(SCTP_DEBUG_TIMER2,
2853 		    "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n",
2854 		    t_type, inp, stcb, net);
2855 	}
2856 	return;
2857 }
2858 
2859 uint32_t
2860 sctp_calculate_len(struct mbuf *m)
2861 {
2862 	uint32_t tlen = 0;
2863 	struct mbuf *at;
2864 
2865 	at = m;
2866 	while (at) {
2867 		tlen += SCTP_BUF_LEN(at);
2868 		at = SCTP_BUF_NEXT(at);
2869 	}
2870 	return (tlen);
2871 }
2872 
2873 void
2874 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2875     struct sctp_association *asoc, uint32_t mtu)
2876 {
2877 	/*
2878 	 * Reset the P-MTU size on this association, this involves changing
2879 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2880 	 * allow the DF flag to be cleared.
2881 	 */
2882 	struct sctp_tmit_chunk *chk;
2883 	unsigned int eff_mtu, ovh;
2884 
2885 	asoc->smallest_mtu = mtu;
2886 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2887 		ovh = SCTP_MIN_OVERHEAD;
2888 	} else {
2889 		ovh = SCTP_MIN_V4_OVERHEAD;
2890 	}
2891 	eff_mtu = mtu - ovh;
2892 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2893 		if (chk->send_size > eff_mtu) {
2894 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2895 		}
2896 	}
2897 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2898 		if (chk->send_size > eff_mtu) {
2899 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2900 		}
2901 	}
2902 }
2903 
2904 
2905 /*
2906  * Given an association and starting time of the current RTT period, update
2907  * RTO in number of msecs. net should point to the current network.
2908  * Return 1, if an RTO update was performed, return 0 if no update was
2909  * performed due to invalid starting point.
2910  */
2911 
2912 int
2913 sctp_calculate_rto(struct sctp_tcb *stcb,
2914     struct sctp_association *asoc,
2915     struct sctp_nets *net,
2916     struct timeval *old,
2917     int rtt_from_sack)
2918 {
2919 	struct timeval now;
2920 	uint64_t rtt_us;	/* RTT in us */
2921 	int32_t rtt;		/* RTT in ms */
2922 	uint32_t new_rto;
2923 	int first_measure = 0;
2924 
2925 	/************************/
2926 	/* 1. calculate new RTT */
2927 	/************************/
2928 	/* get the current time */
2929 	if (stcb->asoc.use_precise_time) {
2930 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2931 	} else {
2932 		(void)SCTP_GETTIME_TIMEVAL(&now);
2933 	}
2934 	if ((old->tv_sec > now.tv_sec) ||
2935 	    ((old->tv_sec == now.tv_sec) && (old->tv_usec > now.tv_usec))) {
2936 		/* The starting point is in the future. */
2937 		return (0);
2938 	}
2939 	timevalsub(&now, old);
2940 	rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec;
2941 	if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) {
2942 		/* The RTT is larger than a sane value. */
2943 		return (0);
2944 	}
2945 	/* store the current RTT in us */
2946 	net->rtt = rtt_us;
2947 	/* compute rtt in ms */
2948 	rtt = (int32_t)(net->rtt / 1000);
2949 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2950 		/*
2951 		 * Tell the CC module that a new update has just occurred
2952 		 * from a sack
2953 		 */
2954 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2955 	}
2956 	/*
2957 	 * Do we need to determine the lan? We do this only on sacks i.e.
2958 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2959 	 */
2960 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2961 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2962 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2963 			net->lan_type = SCTP_LAN_INTERNET;
2964 		} else {
2965 			net->lan_type = SCTP_LAN_LOCAL;
2966 		}
2967 	}
2968 
2969 	/***************************/
2970 	/* 2. update RTTVAR & SRTT */
2971 	/***************************/
2972 	/*-
2973 	 * Compute the scaled average lastsa and the
2974 	 * scaled variance lastsv as described in van Jacobson
2975 	 * Paper "Congestion Avoidance and Control", Annex A.
2976 	 *
2977 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2978 	 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar
2979 	 */
2980 	if (net->RTO_measured) {
2981 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2982 		net->lastsa += rtt;
2983 		if (rtt < 0) {
2984 			rtt = -rtt;
2985 		}
2986 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2987 		net->lastsv += rtt;
2988 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2989 			rto_logging(net, SCTP_LOG_RTTVAR);
2990 		}
2991 	} else {
2992 		/* First RTO measurment */
2993 		net->RTO_measured = 1;
2994 		first_measure = 1;
2995 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2996 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2997 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2998 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2999 		}
3000 	}
3001 	if (net->lastsv == 0) {
3002 		net->lastsv = SCTP_CLOCK_GRANULARITY;
3003 	}
3004 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
3005 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
3006 	    (stcb->asoc.sat_network_lockout == 0)) {
3007 		stcb->asoc.sat_network = 1;
3008 	} else if ((!first_measure) && stcb->asoc.sat_network) {
3009 		stcb->asoc.sat_network = 0;
3010 		stcb->asoc.sat_network_lockout = 1;
3011 	}
3012 	/* bound it, per C6/C7 in Section 5.3.1 */
3013 	if (new_rto < stcb->asoc.minrto) {
3014 		new_rto = stcb->asoc.minrto;
3015 	}
3016 	if (new_rto > stcb->asoc.maxrto) {
3017 		new_rto = stcb->asoc.maxrto;
3018 	}
3019 	net->RTO = new_rto;
3020 	return (1);
3021 }
3022 
3023 /*
3024  * return a pointer to a contiguous piece of data from the given mbuf chain
3025  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
3026  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
3027  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
3028  */
3029 caddr_t
3030 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
3031 {
3032 	uint32_t count;
3033 	uint8_t *ptr;
3034 
3035 	ptr = in_ptr;
3036 	if ((off < 0) || (len <= 0))
3037 		return (NULL);
3038 
3039 	/* find the desired start location */
3040 	while ((m != NULL) && (off > 0)) {
3041 		if (off < SCTP_BUF_LEN(m))
3042 			break;
3043 		off -= SCTP_BUF_LEN(m);
3044 		m = SCTP_BUF_NEXT(m);
3045 	}
3046 	if (m == NULL)
3047 		return (NULL);
3048 
3049 	/* is the current mbuf large enough (eg. contiguous)? */
3050 	if ((SCTP_BUF_LEN(m) - off) >= len) {
3051 		return (mtod(m, caddr_t)+off);
3052 	} else {
3053 		/* else, it spans more than one mbuf, so save a temp copy... */
3054 		while ((m != NULL) && (len > 0)) {
3055 			count = min(SCTP_BUF_LEN(m) - off, len);
3056 			memcpy(ptr, mtod(m, caddr_t)+off, count);
3057 			len -= count;
3058 			ptr += count;
3059 			off = 0;
3060 			m = SCTP_BUF_NEXT(m);
3061 		}
3062 		if ((m == NULL) && (len > 0))
3063 			return (NULL);
3064 		else
3065 			return ((caddr_t)in_ptr);
3066 	}
3067 }
3068 
3069 
3070 
3071 struct sctp_paramhdr *
3072 sctp_get_next_param(struct mbuf *m,
3073     int offset,
3074     struct sctp_paramhdr *pull,
3075     int pull_limit)
3076 {
3077 	/* This just provides a typed signature to Peter's Pull routine */
3078 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
3079 	    (uint8_t *)pull));
3080 }
3081 
3082 
3083 struct mbuf *
3084 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
3085 {
3086 	struct mbuf *m_last;
3087 	caddr_t dp;
3088 
3089 	if (padlen > 3) {
3090 		return (NULL);
3091 	}
3092 	if (padlen <= M_TRAILINGSPACE(m)) {
3093 		/*
3094 		 * The easy way. We hope the majority of the time we hit
3095 		 * here :)
3096 		 */
3097 		m_last = m;
3098 	} else {
3099 		/* Hard way we must grow the mbuf chain */
3100 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
3101 		if (m_last == NULL) {
3102 			return (NULL);
3103 		}
3104 		SCTP_BUF_LEN(m_last) = 0;
3105 		SCTP_BUF_NEXT(m_last) = NULL;
3106 		SCTP_BUF_NEXT(m) = m_last;
3107 	}
3108 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
3109 	SCTP_BUF_LEN(m_last) += padlen;
3110 	memset(dp, 0, padlen);
3111 	return (m_last);
3112 }
3113 
3114 struct mbuf *
3115 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
3116 {
3117 	/* find the last mbuf in chain and pad it */
3118 	struct mbuf *m_at;
3119 
3120 	if (last_mbuf != NULL) {
3121 		return (sctp_add_pad_tombuf(last_mbuf, padval));
3122 	} else {
3123 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3124 			if (SCTP_BUF_NEXT(m_at) == NULL) {
3125 				return (sctp_add_pad_tombuf(m_at, padval));
3126 			}
3127 		}
3128 	}
3129 	return (NULL);
3130 }
3131 
3132 static void
3133 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
3134     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
3135 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3136     SCTP_UNUSED
3137 #endif
3138 )
3139 {
3140 	struct mbuf *m_notify;
3141 	struct sctp_assoc_change *sac;
3142 	struct sctp_queued_to_read *control;
3143 	unsigned int notif_len;
3144 	uint16_t abort_len;
3145 	unsigned int i;
3146 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3147 	struct socket *so;
3148 #endif
3149 
3150 	if (stcb == NULL) {
3151 		return;
3152 	}
3153 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
3154 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
3155 		if (abort != NULL) {
3156 			abort_len = ntohs(abort->ch.chunk_length);
3157 			/*
3158 			 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
3159 			 * contiguous.
3160 			 */
3161 			if (abort_len > SCTP_CHUNK_BUFFER_SIZE) {
3162 				abort_len = SCTP_CHUNK_BUFFER_SIZE;
3163 			}
3164 		} else {
3165 			abort_len = 0;
3166 		}
3167 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
3168 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
3169 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
3170 			notif_len += abort_len;
3171 		}
3172 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3173 		if (m_notify == NULL) {
3174 			/* Retry with smaller value. */
3175 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
3176 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3177 			if (m_notify == NULL) {
3178 				goto set_error;
3179 			}
3180 		}
3181 		SCTP_BUF_NEXT(m_notify) = NULL;
3182 		sac = mtod(m_notify, struct sctp_assoc_change *);
3183 		memset(sac, 0, notif_len);
3184 		sac->sac_type = SCTP_ASSOC_CHANGE;
3185 		sac->sac_flags = 0;
3186 		sac->sac_length = sizeof(struct sctp_assoc_change);
3187 		sac->sac_state = state;
3188 		sac->sac_error = error;
3189 		/* XXX verify these stream counts */
3190 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
3191 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
3192 		sac->sac_assoc_id = sctp_get_associd(stcb);
3193 		if (notif_len > sizeof(struct sctp_assoc_change)) {
3194 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
3195 				i = 0;
3196 				if (stcb->asoc.prsctp_supported == 1) {
3197 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
3198 				}
3199 				if (stcb->asoc.auth_supported == 1) {
3200 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
3201 				}
3202 				if (stcb->asoc.asconf_supported == 1) {
3203 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
3204 				}
3205 				if (stcb->asoc.idata_supported == 1) {
3206 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
3207 				}
3208 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
3209 				if (stcb->asoc.reconfig_supported == 1) {
3210 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
3211 				}
3212 				sac->sac_length += i;
3213 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
3214 				memcpy(sac->sac_info, abort, abort_len);
3215 				sac->sac_length += abort_len;
3216 			}
3217 		}
3218 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
3219 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3220 		    0, 0, stcb->asoc.context, 0, 0, 0,
3221 		    m_notify);
3222 		if (control != NULL) {
3223 			control->length = SCTP_BUF_LEN(m_notify);
3224 			control->spec_flags = M_NOTIFICATION;
3225 			/* not that we need this */
3226 			control->tail_mbuf = m_notify;
3227 			sctp_add_to_readq(stcb->sctp_ep, stcb,
3228 			    control,
3229 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
3230 			    so_locked);
3231 		} else {
3232 			sctp_m_freem(m_notify);
3233 		}
3234 	}
3235 	/*
3236 	 * For 1-to-1 style sockets, we send up and error when an ABORT
3237 	 * comes in.
3238 	 */
3239 set_error:
3240 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3241 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
3242 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
3243 		SOCK_LOCK(stcb->sctp_socket);
3244 		if (from_peer) {
3245 			if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
3246 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
3247 				stcb->sctp_socket->so_error = ECONNREFUSED;
3248 			} else {
3249 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
3250 				stcb->sctp_socket->so_error = ECONNRESET;
3251 			}
3252 		} else {
3253 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3254 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3255 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
3256 				stcb->sctp_socket->so_error = ETIMEDOUT;
3257 			} else {
3258 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
3259 				stcb->sctp_socket->so_error = ECONNABORTED;
3260 			}
3261 		}
3262 		SOCK_UNLOCK(stcb->sctp_socket);
3263 	}
3264 	/* Wake ANY sleepers */
3265 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3266 	so = SCTP_INP_SO(stcb->sctp_ep);
3267 	if (!so_locked) {
3268 		atomic_add_int(&stcb->asoc.refcnt, 1);
3269 		SCTP_TCB_UNLOCK(stcb);
3270 		SCTP_SOCKET_LOCK(so, 1);
3271 		SCTP_TCB_LOCK(stcb);
3272 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3273 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3274 			SCTP_SOCKET_UNLOCK(so, 1);
3275 			return;
3276 		}
3277 	}
3278 #endif
3279 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3280 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
3281 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
3282 		socantrcvmore(stcb->sctp_socket);
3283 	}
3284 	sorwakeup(stcb->sctp_socket);
3285 	sowwakeup(stcb->sctp_socket);
3286 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3287 	if (!so_locked) {
3288 		SCTP_SOCKET_UNLOCK(so, 1);
3289 	}
3290 #endif
3291 }
3292 
3293 static void
3294 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
3295     struct sockaddr *sa, uint32_t error, int so_locked
3296 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3297     SCTP_UNUSED
3298 #endif
3299 )
3300 {
3301 	struct mbuf *m_notify;
3302 	struct sctp_paddr_change *spc;
3303 	struct sctp_queued_to_read *control;
3304 
3305 	if ((stcb == NULL) ||
3306 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
3307 		/* event not enabled */
3308 		return;
3309 	}
3310 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
3311 	if (m_notify == NULL)
3312 		return;
3313 	SCTP_BUF_LEN(m_notify) = 0;
3314 	spc = mtod(m_notify, struct sctp_paddr_change *);
3315 	memset(spc, 0, sizeof(struct sctp_paddr_change));
3316 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
3317 	spc->spc_flags = 0;
3318 	spc->spc_length = sizeof(struct sctp_paddr_change);
3319 	switch (sa->sa_family) {
3320 #ifdef INET
3321 	case AF_INET:
3322 #ifdef INET6
3323 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
3324 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
3325 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
3326 		} else {
3327 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3328 		}
3329 #else
3330 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3331 #endif
3332 		break;
3333 #endif
3334 #ifdef INET6
3335 	case AF_INET6:
3336 		{
3337 			struct sockaddr_in6 *sin6;
3338 
3339 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
3340 
3341 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
3342 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
3343 				if (sin6->sin6_scope_id == 0) {
3344 					/* recover scope_id for user */
3345 					(void)sa6_recoverscope(sin6);
3346 				} else {
3347 					/* clear embedded scope_id for user */
3348 					in6_clearscope(&sin6->sin6_addr);
3349 				}
3350 			}
3351 			break;
3352 		}
3353 #endif
3354 	default:
3355 		/* TSNH */
3356 		break;
3357 	}
3358 	spc->spc_state = state;
3359 	spc->spc_error = error;
3360 	spc->spc_assoc_id = sctp_get_associd(stcb);
3361 
3362 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
3363 	SCTP_BUF_NEXT(m_notify) = NULL;
3364 
3365 	/* append to socket */
3366 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3367 	    0, 0, stcb->asoc.context, 0, 0, 0,
3368 	    m_notify);
3369 	if (control == NULL) {
3370 		/* no memory */
3371 		sctp_m_freem(m_notify);
3372 		return;
3373 	}
3374 	control->length = SCTP_BUF_LEN(m_notify);
3375 	control->spec_flags = M_NOTIFICATION;
3376 	/* not that we need this */
3377 	control->tail_mbuf = m_notify;
3378 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3379 	    control,
3380 	    &stcb->sctp_socket->so_rcv, 1,
3381 	    SCTP_READ_LOCK_NOT_HELD,
3382 	    so_locked);
3383 }
3384 
3385 
3386 static void
3387 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
3388     struct sctp_tmit_chunk *chk, int so_locked
3389 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3390     SCTP_UNUSED
3391 #endif
3392 )
3393 {
3394 	struct mbuf *m_notify;
3395 	struct sctp_send_failed *ssf;
3396 	struct sctp_send_failed_event *ssfe;
3397 	struct sctp_queued_to_read *control;
3398 	struct sctp_chunkhdr *chkhdr;
3399 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
3400 
3401 	if ((stcb == NULL) ||
3402 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3403 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3404 		/* event not enabled */
3405 		return;
3406 	}
3407 
3408 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3409 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3410 	} else {
3411 		notifhdr_len = sizeof(struct sctp_send_failed);
3412 	}
3413 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3414 	if (m_notify == NULL)
3415 		/* no space left */
3416 		return;
3417 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3418 	if (stcb->asoc.idata_supported) {
3419 		chkhdr_len = sizeof(struct sctp_idata_chunk);
3420 	} else {
3421 		chkhdr_len = sizeof(struct sctp_data_chunk);
3422 	}
3423 	/* Use some defaults in case we can't access the chunk header */
3424 	if (chk->send_size >= chkhdr_len) {
3425 		payload_len = chk->send_size - chkhdr_len;
3426 	} else {
3427 		payload_len = 0;
3428 	}
3429 	padding_len = 0;
3430 	if (chk->data != NULL) {
3431 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
3432 		if (chkhdr != NULL) {
3433 			chk_len = ntohs(chkhdr->chunk_length);
3434 			if ((chk_len >= chkhdr_len) &&
3435 			    (chk->send_size >= chk_len) &&
3436 			    (chk->send_size - chk_len < 4)) {
3437 				padding_len = chk->send_size - chk_len;
3438 				payload_len = chk->send_size - chkhdr_len - padding_len;
3439 			}
3440 		}
3441 	}
3442 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3443 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3444 		memset(ssfe, 0, notifhdr_len);
3445 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3446 		if (sent) {
3447 			ssfe->ssfe_flags = SCTP_DATA_SENT;
3448 		} else {
3449 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3450 		}
3451 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
3452 		ssfe->ssfe_error = error;
3453 		/* not exactly what the user sent in, but should be close :) */
3454 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
3455 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
3456 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
3457 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
3458 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3459 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3460 	} else {
3461 		ssf = mtod(m_notify, struct sctp_send_failed *);
3462 		memset(ssf, 0, notifhdr_len);
3463 		ssf->ssf_type = SCTP_SEND_FAILED;
3464 		if (sent) {
3465 			ssf->ssf_flags = SCTP_DATA_SENT;
3466 		} else {
3467 			ssf->ssf_flags = SCTP_DATA_UNSENT;
3468 		}
3469 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3470 		ssf->ssf_error = error;
3471 		/* not exactly what the user sent in, but should be close :) */
3472 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3473 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3474 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3475 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3476 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3477 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3478 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3479 	}
3480 	if (chk->data != NULL) {
3481 		/* Trim off the sctp chunk header (it should be there) */
3482 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3483 			m_adj(chk->data, chkhdr_len);
3484 			m_adj(chk->data, -padding_len);
3485 			sctp_mbuf_crush(chk->data);
3486 			chk->send_size -= (chkhdr_len + padding_len);
3487 		}
3488 	}
3489 	SCTP_BUF_NEXT(m_notify) = chk->data;
3490 	/* Steal off the mbuf */
3491 	chk->data = NULL;
3492 	/*
3493 	 * For this case, we check the actual socket buffer, since the assoc
3494 	 * is going away we don't want to overfill the socket buffer for a
3495 	 * non-reader
3496 	 */
3497 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3498 		sctp_m_freem(m_notify);
3499 		return;
3500 	}
3501 	/* append to socket */
3502 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3503 	    0, 0, stcb->asoc.context, 0, 0, 0,
3504 	    m_notify);
3505 	if (control == NULL) {
3506 		/* no memory */
3507 		sctp_m_freem(m_notify);
3508 		return;
3509 	}
3510 	control->length = SCTP_BUF_LEN(m_notify);
3511 	control->spec_flags = M_NOTIFICATION;
3512 	/* not that we need this */
3513 	control->tail_mbuf = m_notify;
3514 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3515 	    control,
3516 	    &stcb->sctp_socket->so_rcv, 1,
3517 	    SCTP_READ_LOCK_NOT_HELD,
3518 	    so_locked);
3519 }
3520 
3521 
3522 static void
3523 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3524     struct sctp_stream_queue_pending *sp, int so_locked
3525 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3526     SCTP_UNUSED
3527 #endif
3528 )
3529 {
3530 	struct mbuf *m_notify;
3531 	struct sctp_send_failed *ssf;
3532 	struct sctp_send_failed_event *ssfe;
3533 	struct sctp_queued_to_read *control;
3534 	int notifhdr_len;
3535 
3536 	if ((stcb == NULL) ||
3537 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3538 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3539 		/* event not enabled */
3540 		return;
3541 	}
3542 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3543 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3544 	} else {
3545 		notifhdr_len = sizeof(struct sctp_send_failed);
3546 	}
3547 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3548 	if (m_notify == NULL) {
3549 		/* no space left */
3550 		return;
3551 	}
3552 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3553 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3554 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3555 		memset(ssfe, 0, notifhdr_len);
3556 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3557 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3558 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3559 		ssfe->ssfe_error = error;
3560 		/* not exactly what the user sent in, but should be close :) */
3561 		ssfe->ssfe_info.snd_sid = sp->sid;
3562 		if (sp->some_taken) {
3563 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3564 		} else {
3565 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3566 		}
3567 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3568 		ssfe->ssfe_info.snd_context = sp->context;
3569 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3570 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3571 	} else {
3572 		ssf = mtod(m_notify, struct sctp_send_failed *);
3573 		memset(ssf, 0, notifhdr_len);
3574 		ssf->ssf_type = SCTP_SEND_FAILED;
3575 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3576 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3577 		ssf->ssf_error = error;
3578 		/* not exactly what the user sent in, but should be close :) */
3579 		ssf->ssf_info.sinfo_stream = sp->sid;
3580 		ssf->ssf_info.sinfo_ssn = 0;
3581 		if (sp->some_taken) {
3582 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3583 		} else {
3584 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3585 		}
3586 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3587 		ssf->ssf_info.sinfo_context = sp->context;
3588 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3589 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3590 	}
3591 	SCTP_BUF_NEXT(m_notify) = sp->data;
3592 
3593 	/* Steal off the mbuf */
3594 	sp->data = NULL;
3595 	/*
3596 	 * For this case, we check the actual socket buffer, since the assoc
3597 	 * is going away we don't want to overfill the socket buffer for a
3598 	 * non-reader
3599 	 */
3600 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3601 		sctp_m_freem(m_notify);
3602 		return;
3603 	}
3604 	/* append to socket */
3605 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3606 	    0, 0, stcb->asoc.context, 0, 0, 0,
3607 	    m_notify);
3608 	if (control == NULL) {
3609 		/* no memory */
3610 		sctp_m_freem(m_notify);
3611 		return;
3612 	}
3613 	control->length = SCTP_BUF_LEN(m_notify);
3614 	control->spec_flags = M_NOTIFICATION;
3615 	/* not that we need this */
3616 	control->tail_mbuf = m_notify;
3617 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3618 	    control,
3619 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3620 }
3621 
3622 
3623 
3624 static void
3625 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3626 {
3627 	struct mbuf *m_notify;
3628 	struct sctp_adaptation_event *sai;
3629 	struct sctp_queued_to_read *control;
3630 
3631 	if ((stcb == NULL) ||
3632 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3633 		/* event not enabled */
3634 		return;
3635 	}
3636 
3637 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3638 	if (m_notify == NULL)
3639 		/* no space left */
3640 		return;
3641 	SCTP_BUF_LEN(m_notify) = 0;
3642 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3643 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3644 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3645 	sai->sai_flags = 0;
3646 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3647 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3648 	sai->sai_assoc_id = sctp_get_associd(stcb);
3649 
3650 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3651 	SCTP_BUF_NEXT(m_notify) = NULL;
3652 
3653 	/* append to socket */
3654 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3655 	    0, 0, stcb->asoc.context, 0, 0, 0,
3656 	    m_notify);
3657 	if (control == NULL) {
3658 		/* no memory */
3659 		sctp_m_freem(m_notify);
3660 		return;
3661 	}
3662 	control->length = SCTP_BUF_LEN(m_notify);
3663 	control->spec_flags = M_NOTIFICATION;
3664 	/* not that we need this */
3665 	control->tail_mbuf = m_notify;
3666 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3667 	    control,
3668 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3669 }
3670 
3671 /* This always must be called with the read-queue LOCKED in the INP */
3672 static void
3673 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3674     uint32_t val, int so_locked
3675 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3676     SCTP_UNUSED
3677 #endif
3678 )
3679 {
3680 	struct mbuf *m_notify;
3681 	struct sctp_pdapi_event *pdapi;
3682 	struct sctp_queued_to_read *control;
3683 	struct sockbuf *sb;
3684 
3685 	if ((stcb == NULL) ||
3686 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3687 		/* event not enabled */
3688 		return;
3689 	}
3690 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3691 		return;
3692 	}
3693 
3694 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3695 	if (m_notify == NULL)
3696 		/* no space left */
3697 		return;
3698 	SCTP_BUF_LEN(m_notify) = 0;
3699 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3700 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3701 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3702 	pdapi->pdapi_flags = 0;
3703 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3704 	pdapi->pdapi_indication = error;
3705 	pdapi->pdapi_stream = (val >> 16);
3706 	pdapi->pdapi_seq = (val & 0x0000ffff);
3707 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3708 
3709 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3710 	SCTP_BUF_NEXT(m_notify) = NULL;
3711 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3712 	    0, 0, stcb->asoc.context, 0, 0, 0,
3713 	    m_notify);
3714 	if (control == NULL) {
3715 		/* no memory */
3716 		sctp_m_freem(m_notify);
3717 		return;
3718 	}
3719 	control->length = SCTP_BUF_LEN(m_notify);
3720 	control->spec_flags = M_NOTIFICATION;
3721 	/* not that we need this */
3722 	control->tail_mbuf = m_notify;
3723 	sb = &stcb->sctp_socket->so_rcv;
3724 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3725 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3726 	}
3727 	sctp_sballoc(stcb, sb, m_notify);
3728 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3729 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3730 	}
3731 	control->end_added = 1;
3732 	if (stcb->asoc.control_pdapi)
3733 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3734 	else {
3735 		/* we really should not see this case */
3736 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3737 	}
3738 	if (stcb->sctp_ep && stcb->sctp_socket) {
3739 		/* This should always be the case */
3740 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3741 		struct socket *so;
3742 
3743 		so = SCTP_INP_SO(stcb->sctp_ep);
3744 		if (!so_locked) {
3745 			atomic_add_int(&stcb->asoc.refcnt, 1);
3746 			SCTP_TCB_UNLOCK(stcb);
3747 			SCTP_SOCKET_LOCK(so, 1);
3748 			SCTP_TCB_LOCK(stcb);
3749 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3750 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3751 				SCTP_SOCKET_UNLOCK(so, 1);
3752 				return;
3753 			}
3754 		}
3755 #endif
3756 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3757 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3758 		if (!so_locked) {
3759 			SCTP_SOCKET_UNLOCK(so, 1);
3760 		}
3761 #endif
3762 	}
3763 }
3764 
3765 static void
3766 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3767 {
3768 	struct mbuf *m_notify;
3769 	struct sctp_shutdown_event *sse;
3770 	struct sctp_queued_to_read *control;
3771 
3772 	/*
3773 	 * For TCP model AND UDP connected sockets we will send an error up
3774 	 * when an SHUTDOWN completes
3775 	 */
3776 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3777 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3778 		/* mark socket closed for read/write and wakeup! */
3779 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3780 		struct socket *so;
3781 
3782 		so = SCTP_INP_SO(stcb->sctp_ep);
3783 		atomic_add_int(&stcb->asoc.refcnt, 1);
3784 		SCTP_TCB_UNLOCK(stcb);
3785 		SCTP_SOCKET_LOCK(so, 1);
3786 		SCTP_TCB_LOCK(stcb);
3787 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3788 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3789 			SCTP_SOCKET_UNLOCK(so, 1);
3790 			return;
3791 		}
3792 #endif
3793 		socantsendmore(stcb->sctp_socket);
3794 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3795 		SCTP_SOCKET_UNLOCK(so, 1);
3796 #endif
3797 	}
3798 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3799 		/* event not enabled */
3800 		return;
3801 	}
3802 
3803 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3804 	if (m_notify == NULL)
3805 		/* no space left */
3806 		return;
3807 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3808 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3809 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3810 	sse->sse_flags = 0;
3811 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3812 	sse->sse_assoc_id = sctp_get_associd(stcb);
3813 
3814 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3815 	SCTP_BUF_NEXT(m_notify) = NULL;
3816 
3817 	/* append to socket */
3818 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3819 	    0, 0, stcb->asoc.context, 0, 0, 0,
3820 	    m_notify);
3821 	if (control == NULL) {
3822 		/* no memory */
3823 		sctp_m_freem(m_notify);
3824 		return;
3825 	}
3826 	control->length = SCTP_BUF_LEN(m_notify);
3827 	control->spec_flags = M_NOTIFICATION;
3828 	/* not that we need this */
3829 	control->tail_mbuf = m_notify;
3830 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3831 	    control,
3832 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3833 }
3834 
3835 static void
3836 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3837     int so_locked
3838 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3839     SCTP_UNUSED
3840 #endif
3841 )
3842 {
3843 	struct mbuf *m_notify;
3844 	struct sctp_sender_dry_event *event;
3845 	struct sctp_queued_to_read *control;
3846 
3847 	if ((stcb == NULL) ||
3848 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3849 		/* event not enabled */
3850 		return;
3851 	}
3852 
3853 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3854 	if (m_notify == NULL) {
3855 		/* no space left */
3856 		return;
3857 	}
3858 	SCTP_BUF_LEN(m_notify) = 0;
3859 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3860 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3861 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3862 	event->sender_dry_flags = 0;
3863 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3864 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3865 
3866 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3867 	SCTP_BUF_NEXT(m_notify) = NULL;
3868 
3869 	/* append to socket */
3870 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3871 	    0, 0, stcb->asoc.context, 0, 0, 0,
3872 	    m_notify);
3873 	if (control == NULL) {
3874 		/* no memory */
3875 		sctp_m_freem(m_notify);
3876 		return;
3877 	}
3878 	control->length = SCTP_BUF_LEN(m_notify);
3879 	control->spec_flags = M_NOTIFICATION;
3880 	/* not that we need this */
3881 	control->tail_mbuf = m_notify;
3882 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3883 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3884 }
3885 
3886 
3887 void
3888 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3889 {
3890 	struct mbuf *m_notify;
3891 	struct sctp_queued_to_read *control;
3892 	struct sctp_stream_change_event *stradd;
3893 
3894 	if ((stcb == NULL) ||
3895 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3896 		/* event not enabled */
3897 		return;
3898 	}
3899 	if ((stcb->asoc.peer_req_out) && flag) {
3900 		/* Peer made the request, don't tell the local user */
3901 		stcb->asoc.peer_req_out = 0;
3902 		return;
3903 	}
3904 	stcb->asoc.peer_req_out = 0;
3905 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3906 	if (m_notify == NULL)
3907 		/* no space left */
3908 		return;
3909 	SCTP_BUF_LEN(m_notify) = 0;
3910 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3911 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3912 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3913 	stradd->strchange_flags = flag;
3914 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3915 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3916 	stradd->strchange_instrms = numberin;
3917 	stradd->strchange_outstrms = numberout;
3918 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3919 	SCTP_BUF_NEXT(m_notify) = NULL;
3920 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3921 		/* no space */
3922 		sctp_m_freem(m_notify);
3923 		return;
3924 	}
3925 	/* append to socket */
3926 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3927 	    0, 0, stcb->asoc.context, 0, 0, 0,
3928 	    m_notify);
3929 	if (control == NULL) {
3930 		/* no memory */
3931 		sctp_m_freem(m_notify);
3932 		return;
3933 	}
3934 	control->length = SCTP_BUF_LEN(m_notify);
3935 	control->spec_flags = M_NOTIFICATION;
3936 	/* not that we need this */
3937 	control->tail_mbuf = m_notify;
3938 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3939 	    control,
3940 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3941 }
3942 
3943 void
3944 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3945 {
3946 	struct mbuf *m_notify;
3947 	struct sctp_queued_to_read *control;
3948 	struct sctp_assoc_reset_event *strasoc;
3949 
3950 	if ((stcb == NULL) ||
3951 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3952 		/* event not enabled */
3953 		return;
3954 	}
3955 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3956 	if (m_notify == NULL)
3957 		/* no space left */
3958 		return;
3959 	SCTP_BUF_LEN(m_notify) = 0;
3960 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3961 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3962 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3963 	strasoc->assocreset_flags = flag;
3964 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3965 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3966 	strasoc->assocreset_local_tsn = sending_tsn;
3967 	strasoc->assocreset_remote_tsn = recv_tsn;
3968 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3969 	SCTP_BUF_NEXT(m_notify) = NULL;
3970 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3971 		/* no space */
3972 		sctp_m_freem(m_notify);
3973 		return;
3974 	}
3975 	/* append to socket */
3976 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3977 	    0, 0, stcb->asoc.context, 0, 0, 0,
3978 	    m_notify);
3979 	if (control == NULL) {
3980 		/* no memory */
3981 		sctp_m_freem(m_notify);
3982 		return;
3983 	}
3984 	control->length = SCTP_BUF_LEN(m_notify);
3985 	control->spec_flags = M_NOTIFICATION;
3986 	/* not that we need this */
3987 	control->tail_mbuf = m_notify;
3988 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3989 	    control,
3990 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3991 }
3992 
3993 
3994 
3995 static void
3996 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3997     int number_entries, uint16_t *list, int flag)
3998 {
3999 	struct mbuf *m_notify;
4000 	struct sctp_queued_to_read *control;
4001 	struct sctp_stream_reset_event *strreset;
4002 	int len;
4003 
4004 	if ((stcb == NULL) ||
4005 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
4006 		/* event not enabled */
4007 		return;
4008 	}
4009 
4010 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
4011 	if (m_notify == NULL)
4012 		/* no space left */
4013 		return;
4014 	SCTP_BUF_LEN(m_notify) = 0;
4015 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
4016 	if (len > M_TRAILINGSPACE(m_notify)) {
4017 		/* never enough room */
4018 		sctp_m_freem(m_notify);
4019 		return;
4020 	}
4021 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
4022 	memset(strreset, 0, len);
4023 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
4024 	strreset->strreset_flags = flag;
4025 	strreset->strreset_length = len;
4026 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
4027 	if (number_entries) {
4028 		int i;
4029 
4030 		for (i = 0; i < number_entries; i++) {
4031 			strreset->strreset_stream_list[i] = ntohs(list[i]);
4032 		}
4033 	}
4034 	SCTP_BUF_LEN(m_notify) = len;
4035 	SCTP_BUF_NEXT(m_notify) = NULL;
4036 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
4037 		/* no space */
4038 		sctp_m_freem(m_notify);
4039 		return;
4040 	}
4041 	/* append to socket */
4042 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
4043 	    0, 0, stcb->asoc.context, 0, 0, 0,
4044 	    m_notify);
4045 	if (control == NULL) {
4046 		/* no memory */
4047 		sctp_m_freem(m_notify);
4048 		return;
4049 	}
4050 	control->length = SCTP_BUF_LEN(m_notify);
4051 	control->spec_flags = M_NOTIFICATION;
4052 	/* not that we need this */
4053 	control->tail_mbuf = m_notify;
4054 	sctp_add_to_readq(stcb->sctp_ep, stcb,
4055 	    control,
4056 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
4057 }
4058 
4059 
4060 static void
4061 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
4062 {
4063 	struct mbuf *m_notify;
4064 	struct sctp_remote_error *sre;
4065 	struct sctp_queued_to_read *control;
4066 	unsigned int notif_len;
4067 	uint16_t chunk_len;
4068 
4069 	if ((stcb == NULL) ||
4070 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
4071 		return;
4072 	}
4073 	if (chunk != NULL) {
4074 		chunk_len = ntohs(chunk->ch.chunk_length);
4075 		/*
4076 		 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
4077 		 * contiguous.
4078 		 */
4079 		if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) {
4080 			chunk_len = SCTP_CHUNK_BUFFER_SIZE;
4081 		}
4082 	} else {
4083 		chunk_len = 0;
4084 	}
4085 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
4086 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
4087 	if (m_notify == NULL) {
4088 		/* Retry with smaller value. */
4089 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
4090 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
4091 		if (m_notify == NULL) {
4092 			return;
4093 		}
4094 	}
4095 	SCTP_BUF_NEXT(m_notify) = NULL;
4096 	sre = mtod(m_notify, struct sctp_remote_error *);
4097 	memset(sre, 0, notif_len);
4098 	sre->sre_type = SCTP_REMOTE_ERROR;
4099 	sre->sre_flags = 0;
4100 	sre->sre_length = sizeof(struct sctp_remote_error);
4101 	sre->sre_error = error;
4102 	sre->sre_assoc_id = sctp_get_associd(stcb);
4103 	if (notif_len > sizeof(struct sctp_remote_error)) {
4104 		memcpy(sre->sre_data, chunk, chunk_len);
4105 		sre->sre_length += chunk_len;
4106 	}
4107 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
4108 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
4109 	    0, 0, stcb->asoc.context, 0, 0, 0,
4110 	    m_notify);
4111 	if (control != NULL) {
4112 		control->length = SCTP_BUF_LEN(m_notify);
4113 		control->spec_flags = M_NOTIFICATION;
4114 		/* not that we need this */
4115 		control->tail_mbuf = m_notify;
4116 		sctp_add_to_readq(stcb->sctp_ep, stcb,
4117 		    control,
4118 		    &stcb->sctp_socket->so_rcv, 1,
4119 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
4120 	} else {
4121 		sctp_m_freem(m_notify);
4122 	}
4123 }
4124 
4125 
4126 void
4127 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
4128     uint32_t error, void *data, int so_locked
4129 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4130     SCTP_UNUSED
4131 #endif
4132 )
4133 {
4134 	if ((stcb == NULL) ||
4135 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4136 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4137 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4138 		/* If the socket is gone we are out of here */
4139 		return;
4140 	}
4141 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
4142 		return;
4143 	}
4144 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4145 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4146 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
4147 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
4148 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
4149 			/* Don't report these in front states */
4150 			return;
4151 		}
4152 	}
4153 	switch (notification) {
4154 	case SCTP_NOTIFY_ASSOC_UP:
4155 		if (stcb->asoc.assoc_up_sent == 0) {
4156 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
4157 			stcb->asoc.assoc_up_sent = 1;
4158 		}
4159 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
4160 			sctp_notify_adaptation_layer(stcb);
4161 		}
4162 		if (stcb->asoc.auth_supported == 0) {
4163 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
4164 			    NULL, so_locked);
4165 		}
4166 		break;
4167 	case SCTP_NOTIFY_ASSOC_DOWN:
4168 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
4169 		break;
4170 	case SCTP_NOTIFY_INTERFACE_DOWN:
4171 		{
4172 			struct sctp_nets *net;
4173 
4174 			net = (struct sctp_nets *)data;
4175 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
4176 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4177 			break;
4178 		}
4179 	case SCTP_NOTIFY_INTERFACE_UP:
4180 		{
4181 			struct sctp_nets *net;
4182 
4183 			net = (struct sctp_nets *)data;
4184 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
4185 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4186 			break;
4187 		}
4188 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
4189 		{
4190 			struct sctp_nets *net;
4191 
4192 			net = (struct sctp_nets *)data;
4193 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
4194 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4195 			break;
4196 		}
4197 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
4198 		sctp_notify_send_failed2(stcb, error,
4199 		    (struct sctp_stream_queue_pending *)data, so_locked);
4200 		break;
4201 	case SCTP_NOTIFY_SENT_DG_FAIL:
4202 		sctp_notify_send_failed(stcb, 1, error,
4203 		    (struct sctp_tmit_chunk *)data, so_locked);
4204 		break;
4205 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
4206 		sctp_notify_send_failed(stcb, 0, error,
4207 		    (struct sctp_tmit_chunk *)data, so_locked);
4208 		break;
4209 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
4210 		{
4211 			uint32_t val;
4212 
4213 			val = *((uint32_t *)data);
4214 
4215 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
4216 			break;
4217 		}
4218 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
4219 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4220 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4221 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
4222 		} else {
4223 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
4224 		}
4225 		break;
4226 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
4227 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4228 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4229 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
4230 		} else {
4231 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
4232 		}
4233 		break;
4234 	case SCTP_NOTIFY_ASSOC_RESTART:
4235 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
4236 		if (stcb->asoc.auth_supported == 0) {
4237 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
4238 			    NULL, so_locked);
4239 		}
4240 		break;
4241 	case SCTP_NOTIFY_STR_RESET_SEND:
4242 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
4243 		break;
4244 	case SCTP_NOTIFY_STR_RESET_RECV:
4245 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
4246 		break;
4247 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
4248 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
4249 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
4250 		break;
4251 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
4252 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
4253 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
4254 		break;
4255 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
4256 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
4257 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
4258 		break;
4259 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
4260 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
4261 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
4262 		break;
4263 	case SCTP_NOTIFY_ASCONF_ADD_IP:
4264 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
4265 		    error, so_locked);
4266 		break;
4267 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
4268 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
4269 		    error, so_locked);
4270 		break;
4271 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
4272 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
4273 		    error, so_locked);
4274 		break;
4275 	case SCTP_NOTIFY_PEER_SHUTDOWN:
4276 		sctp_notify_shutdown_event(stcb);
4277 		break;
4278 	case SCTP_NOTIFY_AUTH_NEW_KEY:
4279 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
4280 		    (uint16_t)(uintptr_t)data,
4281 		    so_locked);
4282 		break;
4283 	case SCTP_NOTIFY_AUTH_FREE_KEY:
4284 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
4285 		    (uint16_t)(uintptr_t)data,
4286 		    so_locked);
4287 		break;
4288 	case SCTP_NOTIFY_NO_PEER_AUTH:
4289 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
4290 		    (uint16_t)(uintptr_t)data,
4291 		    so_locked);
4292 		break;
4293 	case SCTP_NOTIFY_SENDER_DRY:
4294 		sctp_notify_sender_dry_event(stcb, so_locked);
4295 		break;
4296 	case SCTP_NOTIFY_REMOTE_ERROR:
4297 		sctp_notify_remote_error(stcb, error, data);
4298 		break;
4299 	default:
4300 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
4301 		    __func__, notification, notification);
4302 		break;
4303 	}			/* end switch */
4304 }
4305 
4306 void
4307 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
4308 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4309     SCTP_UNUSED
4310 #endif
4311 )
4312 {
4313 	struct sctp_association *asoc;
4314 	struct sctp_stream_out *outs;
4315 	struct sctp_tmit_chunk *chk, *nchk;
4316 	struct sctp_stream_queue_pending *sp, *nsp;
4317 	int i;
4318 
4319 	if (stcb == NULL) {
4320 		return;
4321 	}
4322 	asoc = &stcb->asoc;
4323 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4324 		/* already being freed */
4325 		return;
4326 	}
4327 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4328 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4329 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
4330 		return;
4331 	}
4332 	/* now through all the gunk freeing chunks */
4333 	if (holds_lock == 0) {
4334 		SCTP_TCB_SEND_LOCK(stcb);
4335 	}
4336 	/* sent queue SHOULD be empty */
4337 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
4338 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
4339 		asoc->sent_queue_cnt--;
4340 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
4341 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
4342 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
4343 #ifdef INVARIANTS
4344 			} else {
4345 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
4346 #endif
4347 			}
4348 		}
4349 		if (chk->data != NULL) {
4350 			sctp_free_bufspace(stcb, asoc, chk, 1);
4351 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
4352 			    error, chk, so_locked);
4353 			if (chk->data) {
4354 				sctp_m_freem(chk->data);
4355 				chk->data = NULL;
4356 			}
4357 		}
4358 		sctp_free_a_chunk(stcb, chk, so_locked);
4359 		/* sa_ignore FREED_MEMORY */
4360 	}
4361 	/* pending send queue SHOULD be empty */
4362 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
4363 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
4364 		asoc->send_queue_cnt--;
4365 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
4366 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
4367 #ifdef INVARIANTS
4368 		} else {
4369 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
4370 #endif
4371 		}
4372 		if (chk->data != NULL) {
4373 			sctp_free_bufspace(stcb, asoc, chk, 1);
4374 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
4375 			    error, chk, so_locked);
4376 			if (chk->data) {
4377 				sctp_m_freem(chk->data);
4378 				chk->data = NULL;
4379 			}
4380 		}
4381 		sctp_free_a_chunk(stcb, chk, so_locked);
4382 		/* sa_ignore FREED_MEMORY */
4383 	}
4384 	for (i = 0; i < asoc->streamoutcnt; i++) {
4385 		/* For each stream */
4386 		outs = &asoc->strmout[i];
4387 		/* clean up any sends there */
4388 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
4389 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
4390 			TAILQ_REMOVE(&outs->outqueue, sp, next);
4391 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
4392 			sctp_free_spbufspace(stcb, asoc, sp);
4393 			if (sp->data) {
4394 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
4395 				    error, (void *)sp, so_locked);
4396 				if (sp->data) {
4397 					sctp_m_freem(sp->data);
4398 					sp->data = NULL;
4399 					sp->tail_mbuf = NULL;
4400 					sp->length = 0;
4401 				}
4402 			}
4403 			if (sp->net) {
4404 				sctp_free_remote_addr(sp->net);
4405 				sp->net = NULL;
4406 			}
4407 			/* Free the chunk */
4408 			sctp_free_a_strmoq(stcb, sp, so_locked);
4409 			/* sa_ignore FREED_MEMORY */
4410 		}
4411 	}
4412 
4413 	if (holds_lock == 0) {
4414 		SCTP_TCB_SEND_UNLOCK(stcb);
4415 	}
4416 }
4417 
4418 void
4419 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
4420     struct sctp_abort_chunk *abort, int so_locked
4421 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4422     SCTP_UNUSED
4423 #endif
4424 )
4425 {
4426 	if (stcb == NULL) {
4427 		return;
4428 	}
4429 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
4430 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4431 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
4432 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
4433 	}
4434 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4435 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4436 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4437 		return;
4438 	}
4439 	/* Tell them we lost the asoc */
4440 	sctp_report_all_outbound(stcb, error, 0, so_locked);
4441 	if (from_peer) {
4442 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
4443 	} else {
4444 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
4445 	}
4446 }
4447 
4448 void
4449 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4450     struct mbuf *m, int iphlen,
4451     struct sockaddr *src, struct sockaddr *dst,
4452     struct sctphdr *sh, struct mbuf *op_err,
4453     uint8_t mflowtype, uint32_t mflowid,
4454     uint32_t vrf_id, uint16_t port)
4455 {
4456 	uint32_t vtag;
4457 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4458 	struct socket *so;
4459 #endif
4460 
4461 	vtag = 0;
4462 	if (stcb != NULL) {
4463 		vtag = stcb->asoc.peer_vtag;
4464 		vrf_id = stcb->asoc.vrf_id;
4465 	}
4466 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4467 	    mflowtype, mflowid, inp->fibnum,
4468 	    vrf_id, port);
4469 	if (stcb != NULL) {
4470 		/* We have a TCB to abort, send notification too */
4471 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4472 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4473 		/* Ok, now lets free it */
4474 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4475 		so = SCTP_INP_SO(inp);
4476 		atomic_add_int(&stcb->asoc.refcnt, 1);
4477 		SCTP_TCB_UNLOCK(stcb);
4478 		SCTP_SOCKET_LOCK(so, 1);
4479 		SCTP_TCB_LOCK(stcb);
4480 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4481 #endif
4482 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4483 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4484 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4485 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4486 		}
4487 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4488 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4489 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4490 		SCTP_SOCKET_UNLOCK(so, 1);
4491 #endif
4492 	}
4493 }
4494 #ifdef SCTP_ASOCLOG_OF_TSNS
4495 void
4496 sctp_print_out_track_log(struct sctp_tcb *stcb)
4497 {
4498 #ifdef NOSIY_PRINTS
4499 	int i;
4500 
4501 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4502 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4503 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4504 		SCTP_PRINTF("None rcvd\n");
4505 		goto none_in;
4506 	}
4507 	if (stcb->asoc.tsn_in_wrapped) {
4508 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4509 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4510 			    stcb->asoc.in_tsnlog[i].tsn,
4511 			    stcb->asoc.in_tsnlog[i].strm,
4512 			    stcb->asoc.in_tsnlog[i].seq,
4513 			    stcb->asoc.in_tsnlog[i].flgs,
4514 			    stcb->asoc.in_tsnlog[i].sz);
4515 		}
4516 	}
4517 	if (stcb->asoc.tsn_in_at) {
4518 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4519 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4520 			    stcb->asoc.in_tsnlog[i].tsn,
4521 			    stcb->asoc.in_tsnlog[i].strm,
4522 			    stcb->asoc.in_tsnlog[i].seq,
4523 			    stcb->asoc.in_tsnlog[i].flgs,
4524 			    stcb->asoc.in_tsnlog[i].sz);
4525 		}
4526 	}
4527 none_in:
4528 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4529 	if ((stcb->asoc.tsn_out_at == 0) &&
4530 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4531 		SCTP_PRINTF("None sent\n");
4532 	}
4533 	if (stcb->asoc.tsn_out_wrapped) {
4534 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4535 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4536 			    stcb->asoc.out_tsnlog[i].tsn,
4537 			    stcb->asoc.out_tsnlog[i].strm,
4538 			    stcb->asoc.out_tsnlog[i].seq,
4539 			    stcb->asoc.out_tsnlog[i].flgs,
4540 			    stcb->asoc.out_tsnlog[i].sz);
4541 		}
4542 	}
4543 	if (stcb->asoc.tsn_out_at) {
4544 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4545 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4546 			    stcb->asoc.out_tsnlog[i].tsn,
4547 			    stcb->asoc.out_tsnlog[i].strm,
4548 			    stcb->asoc.out_tsnlog[i].seq,
4549 			    stcb->asoc.out_tsnlog[i].flgs,
4550 			    stcb->asoc.out_tsnlog[i].sz);
4551 		}
4552 	}
4553 #endif
4554 }
4555 #endif
4556 
4557 void
4558 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4559     struct mbuf *op_err,
4560     int so_locked
4561 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4562     SCTP_UNUSED
4563 #endif
4564 )
4565 {
4566 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4567 	struct socket *so;
4568 #endif
4569 
4570 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4571 	so = SCTP_INP_SO(inp);
4572 #endif
4573 	if (stcb == NULL) {
4574 		/* Got to have a TCB */
4575 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4576 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4577 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4578 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4579 			}
4580 		}
4581 		return;
4582 	} else {
4583 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4584 	}
4585 	/* notify the peer */
4586 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4587 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4588 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4589 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4590 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4591 	}
4592 	/* notify the ulp */
4593 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4594 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4595 	}
4596 	/* now free the asoc */
4597 #ifdef SCTP_ASOCLOG_OF_TSNS
4598 	sctp_print_out_track_log(stcb);
4599 #endif
4600 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4601 	if (!so_locked) {
4602 		atomic_add_int(&stcb->asoc.refcnt, 1);
4603 		SCTP_TCB_UNLOCK(stcb);
4604 		SCTP_SOCKET_LOCK(so, 1);
4605 		SCTP_TCB_LOCK(stcb);
4606 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4607 	}
4608 #endif
4609 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4610 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4611 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4612 	if (!so_locked) {
4613 		SCTP_SOCKET_UNLOCK(so, 1);
4614 	}
4615 #endif
4616 }
4617 
4618 void
4619 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4620     struct sockaddr *src, struct sockaddr *dst,
4621     struct sctphdr *sh, struct sctp_inpcb *inp,
4622     struct mbuf *cause,
4623     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4624     uint32_t vrf_id, uint16_t port)
4625 {
4626 	struct sctp_chunkhdr *ch, chunk_buf;
4627 	unsigned int chk_length;
4628 	int contains_init_chunk;
4629 
4630 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4631 	/* Generate a TO address for future reference */
4632 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4633 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4634 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4635 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4636 		}
4637 	}
4638 	contains_init_chunk = 0;
4639 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4640 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4641 	while (ch != NULL) {
4642 		chk_length = ntohs(ch->chunk_length);
4643 		if (chk_length < sizeof(*ch)) {
4644 			/* break to abort land */
4645 			break;
4646 		}
4647 		switch (ch->chunk_type) {
4648 		case SCTP_INIT:
4649 			contains_init_chunk = 1;
4650 			break;
4651 		case SCTP_PACKET_DROPPED:
4652 			/* we don't respond to pkt-dropped */
4653 			return;
4654 		case SCTP_ABORT_ASSOCIATION:
4655 			/* we don't respond with an ABORT to an ABORT */
4656 			return;
4657 		case SCTP_SHUTDOWN_COMPLETE:
4658 			/*
4659 			 * we ignore it since we are not waiting for it and
4660 			 * peer is gone
4661 			 */
4662 			return;
4663 		case SCTP_SHUTDOWN_ACK:
4664 			sctp_send_shutdown_complete2(src, dst, sh,
4665 			    mflowtype, mflowid, fibnum,
4666 			    vrf_id, port);
4667 			return;
4668 		default:
4669 			break;
4670 		}
4671 		offset += SCTP_SIZE32(chk_length);
4672 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4673 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4674 	}
4675 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4676 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4677 	    (contains_init_chunk == 0))) {
4678 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4679 		    mflowtype, mflowid, fibnum,
4680 		    vrf_id, port);
4681 	}
4682 }
4683 
4684 /*
4685  * check the inbound datagram to make sure there is not an abort inside it,
4686  * if there is return 1, else return 0.
4687  */
4688 int
4689 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4690 {
4691 	struct sctp_chunkhdr *ch;
4692 	struct sctp_init_chunk *init_chk, chunk_buf;
4693 	int offset;
4694 	unsigned int chk_length;
4695 
4696 	offset = iphlen + sizeof(struct sctphdr);
4697 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4698 	    (uint8_t *)&chunk_buf);
4699 	while (ch != NULL) {
4700 		chk_length = ntohs(ch->chunk_length);
4701 		if (chk_length < sizeof(*ch)) {
4702 			/* packet is probably corrupt */
4703 			break;
4704 		}
4705 		/* we seem to be ok, is it an abort? */
4706 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4707 			/* yep, tell them */
4708 			return (1);
4709 		}
4710 		if (ch->chunk_type == SCTP_INITIATION) {
4711 			/* need to update the Vtag */
4712 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4713 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4714 			if (init_chk != NULL) {
4715 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4716 			}
4717 		}
4718 		/* Nope, move to the next chunk */
4719 		offset += SCTP_SIZE32(chk_length);
4720 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4721 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4722 	}
4723 	return (0);
4724 }
4725 
4726 /*
4727  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4728  * set (i.e. it's 0) so, create this function to compare link local scopes
4729  */
4730 #ifdef INET6
4731 uint32_t
4732 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4733 {
4734 	struct sockaddr_in6 a, b;
4735 
4736 	/* save copies */
4737 	a = *addr1;
4738 	b = *addr2;
4739 
4740 	if (a.sin6_scope_id == 0)
4741 		if (sa6_recoverscope(&a)) {
4742 			/* can't get scope, so can't match */
4743 			return (0);
4744 		}
4745 	if (b.sin6_scope_id == 0)
4746 		if (sa6_recoverscope(&b)) {
4747 			/* can't get scope, so can't match */
4748 			return (0);
4749 		}
4750 	if (a.sin6_scope_id != b.sin6_scope_id)
4751 		return (0);
4752 
4753 	return (1);
4754 }
4755 
4756 /*
4757  * returns a sockaddr_in6 with embedded scope recovered and removed
4758  */
4759 struct sockaddr_in6 *
4760 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4761 {
4762 	/* check and strip embedded scope junk */
4763 	if (addr->sin6_family == AF_INET6) {
4764 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4765 			if (addr->sin6_scope_id == 0) {
4766 				*store = *addr;
4767 				if (!sa6_recoverscope(store)) {
4768 					/* use the recovered scope */
4769 					addr = store;
4770 				}
4771 			} else {
4772 				/* else, return the original "to" addr */
4773 				in6_clearscope(&addr->sin6_addr);
4774 			}
4775 		}
4776 	}
4777 	return (addr);
4778 }
4779 #endif
4780 
4781 /*
4782  * are the two addresses the same?  currently a "scopeless" check returns: 1
4783  * if same, 0 if not
4784  */
4785 int
4786 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4787 {
4788 
4789 	/* must be valid */
4790 	if (sa1 == NULL || sa2 == NULL)
4791 		return (0);
4792 
4793 	/* must be the same family */
4794 	if (sa1->sa_family != sa2->sa_family)
4795 		return (0);
4796 
4797 	switch (sa1->sa_family) {
4798 #ifdef INET6
4799 	case AF_INET6:
4800 		{
4801 			/* IPv6 addresses */
4802 			struct sockaddr_in6 *sin6_1, *sin6_2;
4803 
4804 			sin6_1 = (struct sockaddr_in6 *)sa1;
4805 			sin6_2 = (struct sockaddr_in6 *)sa2;
4806 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4807 			    sin6_2));
4808 		}
4809 #endif
4810 #ifdef INET
4811 	case AF_INET:
4812 		{
4813 			/* IPv4 addresses */
4814 			struct sockaddr_in *sin_1, *sin_2;
4815 
4816 			sin_1 = (struct sockaddr_in *)sa1;
4817 			sin_2 = (struct sockaddr_in *)sa2;
4818 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4819 		}
4820 #endif
4821 	default:
4822 		/* we don't do these... */
4823 		return (0);
4824 	}
4825 }
4826 
4827 void
4828 sctp_print_address(struct sockaddr *sa)
4829 {
4830 #ifdef INET6
4831 	char ip6buf[INET6_ADDRSTRLEN];
4832 #endif
4833 
4834 	switch (sa->sa_family) {
4835 #ifdef INET6
4836 	case AF_INET6:
4837 		{
4838 			struct sockaddr_in6 *sin6;
4839 
4840 			sin6 = (struct sockaddr_in6 *)sa;
4841 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4842 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4843 			    ntohs(sin6->sin6_port),
4844 			    sin6->sin6_scope_id);
4845 			break;
4846 		}
4847 #endif
4848 #ifdef INET
4849 	case AF_INET:
4850 		{
4851 			struct sockaddr_in *sin;
4852 			unsigned char *p;
4853 
4854 			sin = (struct sockaddr_in *)sa;
4855 			p = (unsigned char *)&sin->sin_addr;
4856 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4857 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4858 			break;
4859 		}
4860 #endif
4861 	default:
4862 		SCTP_PRINTF("?\n");
4863 		break;
4864 	}
4865 }
4866 
4867 void
4868 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4869     struct sctp_inpcb *new_inp,
4870     struct sctp_tcb *stcb,
4871     int waitflags)
4872 {
4873 	/*
4874 	 * go through our old INP and pull off any control structures that
4875 	 * belong to stcb and move then to the new inp.
4876 	 */
4877 	struct socket *old_so, *new_so;
4878 	struct sctp_queued_to_read *control, *nctl;
4879 	struct sctp_readhead tmp_queue;
4880 	struct mbuf *m;
4881 	int error = 0;
4882 
4883 	old_so = old_inp->sctp_socket;
4884 	new_so = new_inp->sctp_socket;
4885 	TAILQ_INIT(&tmp_queue);
4886 	error = sblock(&old_so->so_rcv, waitflags);
4887 	if (error) {
4888 		/*
4889 		 * Gak, can't get sblock, we have a problem. data will be
4890 		 * left stranded.. and we don't dare look at it since the
4891 		 * other thread may be reading something. Oh well, its a
4892 		 * screwed up app that does a peeloff OR a accept while
4893 		 * reading from the main socket... actually its only the
4894 		 * peeloff() case, since I think read will fail on a
4895 		 * listening socket..
4896 		 */
4897 		return;
4898 	}
4899 	/* lock the socket buffers */
4900 	SCTP_INP_READ_LOCK(old_inp);
4901 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4902 		/* Pull off all for out target stcb */
4903 		if (control->stcb == stcb) {
4904 			/* remove it we want it */
4905 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4906 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4907 			m = control->data;
4908 			while (m) {
4909 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4910 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4911 				}
4912 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4913 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4914 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4915 				}
4916 				m = SCTP_BUF_NEXT(m);
4917 			}
4918 		}
4919 	}
4920 	SCTP_INP_READ_UNLOCK(old_inp);
4921 	/* Remove the sb-lock on the old socket */
4922 
4923 	sbunlock(&old_so->so_rcv);
4924 	/* Now we move them over to the new socket buffer */
4925 	SCTP_INP_READ_LOCK(new_inp);
4926 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4927 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4928 		m = control->data;
4929 		while (m) {
4930 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4931 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4932 			}
4933 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4934 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4935 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4936 			}
4937 			m = SCTP_BUF_NEXT(m);
4938 		}
4939 	}
4940 	SCTP_INP_READ_UNLOCK(new_inp);
4941 }
4942 
4943 void
4944 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4945     struct sctp_tcb *stcb,
4946     int so_locked
4947 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4948     SCTP_UNUSED
4949 #endif
4950 )
4951 {
4952 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4953 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4954 		struct socket *so;
4955 
4956 		so = SCTP_INP_SO(inp);
4957 		if (!so_locked) {
4958 			if (stcb) {
4959 				atomic_add_int(&stcb->asoc.refcnt, 1);
4960 				SCTP_TCB_UNLOCK(stcb);
4961 			}
4962 			SCTP_SOCKET_LOCK(so, 1);
4963 			if (stcb) {
4964 				SCTP_TCB_LOCK(stcb);
4965 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4966 			}
4967 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4968 				SCTP_SOCKET_UNLOCK(so, 1);
4969 				return;
4970 			}
4971 		}
4972 #endif
4973 		sctp_sorwakeup(inp, inp->sctp_socket);
4974 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4975 		if (!so_locked) {
4976 			SCTP_SOCKET_UNLOCK(so, 1);
4977 		}
4978 #endif
4979 	}
4980 }
4981 
4982 void
4983 sctp_add_to_readq(struct sctp_inpcb *inp,
4984     struct sctp_tcb *stcb,
4985     struct sctp_queued_to_read *control,
4986     struct sockbuf *sb,
4987     int end,
4988     int inp_read_lock_held,
4989     int so_locked
4990 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4991     SCTP_UNUSED
4992 #endif
4993 )
4994 {
4995 	/*
4996 	 * Here we must place the control on the end of the socket read
4997 	 * queue AND increment sb_cc so that select will work properly on
4998 	 * read.
4999 	 */
5000 	struct mbuf *m, *prev = NULL;
5001 
5002 	if (inp == NULL) {
5003 		/* Gak, TSNH!! */
5004 #ifdef INVARIANTS
5005 		panic("Gak, inp NULL on add_to_readq");
5006 #endif
5007 		return;
5008 	}
5009 	if (inp_read_lock_held == 0)
5010 		SCTP_INP_READ_LOCK(inp);
5011 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
5012 		if (!control->on_strm_q) {
5013 			sctp_free_remote_addr(control->whoFrom);
5014 			if (control->data) {
5015 				sctp_m_freem(control->data);
5016 				control->data = NULL;
5017 			}
5018 			sctp_free_a_readq(stcb, control);
5019 		}
5020 		if (inp_read_lock_held == 0)
5021 			SCTP_INP_READ_UNLOCK(inp);
5022 		return;
5023 	}
5024 	if (!(control->spec_flags & M_NOTIFICATION)) {
5025 		atomic_add_int(&inp->total_recvs, 1);
5026 		if (!control->do_not_ref_stcb) {
5027 			atomic_add_int(&stcb->total_recvs, 1);
5028 		}
5029 	}
5030 	m = control->data;
5031 	control->held_length = 0;
5032 	control->length = 0;
5033 	while (m) {
5034 		if (SCTP_BUF_LEN(m) == 0) {
5035 			/* Skip mbufs with NO length */
5036 			if (prev == NULL) {
5037 				/* First one */
5038 				control->data = sctp_m_free(m);
5039 				m = control->data;
5040 			} else {
5041 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
5042 				m = SCTP_BUF_NEXT(prev);
5043 			}
5044 			if (m == NULL) {
5045 				control->tail_mbuf = prev;
5046 			}
5047 			continue;
5048 		}
5049 		prev = m;
5050 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5051 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
5052 		}
5053 		sctp_sballoc(stcb, sb, m);
5054 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5055 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5056 		}
5057 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
5058 		m = SCTP_BUF_NEXT(m);
5059 	}
5060 	if (prev != NULL) {
5061 		control->tail_mbuf = prev;
5062 	} else {
5063 		/* Everything got collapsed out?? */
5064 		if (!control->on_strm_q) {
5065 			sctp_free_remote_addr(control->whoFrom);
5066 			sctp_free_a_readq(stcb, control);
5067 		}
5068 		if (inp_read_lock_held == 0)
5069 			SCTP_INP_READ_UNLOCK(inp);
5070 		return;
5071 	}
5072 	if (end) {
5073 		control->end_added = 1;
5074 	}
5075 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
5076 	control->on_read_q = 1;
5077 	if (inp_read_lock_held == 0)
5078 		SCTP_INP_READ_UNLOCK(inp);
5079 	if (inp && inp->sctp_socket) {
5080 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
5081 	}
5082 }
5083 
5084 /*************HOLD THIS COMMENT FOR PATCH FILE OF
5085  *************ALTERNATE ROUTING CODE
5086  */
5087 
5088 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
5089  *************ALTERNATE ROUTING CODE
5090  */
5091 
5092 struct mbuf *
5093 sctp_generate_cause(uint16_t code, char *info)
5094 {
5095 	struct mbuf *m;
5096 	struct sctp_gen_error_cause *cause;
5097 	size_t info_len;
5098 	uint16_t len;
5099 
5100 	if ((code == 0) || (info == NULL)) {
5101 		return (NULL);
5102 	}
5103 	info_len = strlen(info);
5104 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
5105 		return (NULL);
5106 	}
5107 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
5108 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5109 	if (m != NULL) {
5110 		SCTP_BUF_LEN(m) = len;
5111 		cause = mtod(m, struct sctp_gen_error_cause *);
5112 		cause->code = htons(code);
5113 		cause->length = htons(len);
5114 		memcpy(cause->info, info, info_len);
5115 	}
5116 	return (m);
5117 }
5118 
5119 struct mbuf *
5120 sctp_generate_no_user_data_cause(uint32_t tsn)
5121 {
5122 	struct mbuf *m;
5123 	struct sctp_error_no_user_data *no_user_data_cause;
5124 	uint16_t len;
5125 
5126 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
5127 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5128 	if (m != NULL) {
5129 		SCTP_BUF_LEN(m) = len;
5130 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
5131 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
5132 		no_user_data_cause->cause.length = htons(len);
5133 		no_user_data_cause->tsn = htonl(tsn);
5134 	}
5135 	return (m);
5136 }
5137 
5138 #ifdef SCTP_MBCNT_LOGGING
5139 void
5140 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
5141     struct sctp_tmit_chunk *tp1, int chk_cnt)
5142 {
5143 	if (tp1->data == NULL) {
5144 		return;
5145 	}
5146 	asoc->chunks_on_out_queue -= chk_cnt;
5147 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
5148 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
5149 		    asoc->total_output_queue_size,
5150 		    tp1->book_size,
5151 		    0,
5152 		    tp1->mbcnt);
5153 	}
5154 	if (asoc->total_output_queue_size >= tp1->book_size) {
5155 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
5156 	} else {
5157 		asoc->total_output_queue_size = 0;
5158 	}
5159 
5160 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
5161 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
5162 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
5163 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
5164 		} else {
5165 			stcb->sctp_socket->so_snd.sb_cc = 0;
5166 
5167 		}
5168 	}
5169 }
5170 
5171 #endif
5172 
5173 int
5174 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
5175     uint8_t sent, int so_locked
5176 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
5177     SCTP_UNUSED
5178 #endif
5179 )
5180 {
5181 	struct sctp_stream_out *strq;
5182 	struct sctp_tmit_chunk *chk = NULL, *tp2;
5183 	struct sctp_stream_queue_pending *sp;
5184 	uint32_t mid;
5185 	uint16_t sid;
5186 	uint8_t foundeom = 0;
5187 	int ret_sz = 0;
5188 	int notdone;
5189 	int do_wakeup_routine = 0;
5190 
5191 	sid = tp1->rec.data.sid;
5192 	mid = tp1->rec.data.mid;
5193 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
5194 		stcb->asoc.abandoned_sent[0]++;
5195 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5196 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
5197 #if defined(SCTP_DETAILED_STR_STATS)
5198 		stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5199 #endif
5200 	} else {
5201 		stcb->asoc.abandoned_unsent[0]++;
5202 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5203 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
5204 #if defined(SCTP_DETAILED_STR_STATS)
5205 		stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5206 #endif
5207 	}
5208 	do {
5209 		ret_sz += tp1->book_size;
5210 		if (tp1->data != NULL) {
5211 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5212 				sctp_flight_size_decrease(tp1);
5213 				sctp_total_flight_decrease(stcb, tp1);
5214 			}
5215 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5216 			stcb->asoc.peers_rwnd += tp1->send_size;
5217 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
5218 			if (sent) {
5219 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5220 			} else {
5221 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5222 			}
5223 			if (tp1->data) {
5224 				sctp_m_freem(tp1->data);
5225 				tp1->data = NULL;
5226 			}
5227 			do_wakeup_routine = 1;
5228 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
5229 				stcb->asoc.sent_queue_cnt_removeable--;
5230 			}
5231 		}
5232 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
5233 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
5234 		    SCTP_DATA_NOT_FRAG) {
5235 			/* not frag'ed we ae done   */
5236 			notdone = 0;
5237 			foundeom = 1;
5238 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5239 			/* end of frag, we are done */
5240 			notdone = 0;
5241 			foundeom = 1;
5242 		} else {
5243 			/*
5244 			 * Its a begin or middle piece, we must mark all of
5245 			 * it
5246 			 */
5247 			notdone = 1;
5248 			tp1 = TAILQ_NEXT(tp1, sctp_next);
5249 		}
5250 	} while (tp1 && notdone);
5251 	if (foundeom == 0) {
5252 		/*
5253 		 * The multi-part message was scattered across the send and
5254 		 * sent queue.
5255 		 */
5256 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
5257 			if ((tp1->rec.data.sid != sid) ||
5258 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
5259 				break;
5260 			}
5261 			/*
5262 			 * save to chk in case we have some on stream out
5263 			 * queue. If so and we have an un-transmitted one we
5264 			 * don't have to fudge the TSN.
5265 			 */
5266 			chk = tp1;
5267 			ret_sz += tp1->book_size;
5268 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5269 			if (sent) {
5270 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5271 			} else {
5272 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5273 			}
5274 			if (tp1->data) {
5275 				sctp_m_freem(tp1->data);
5276 				tp1->data = NULL;
5277 			}
5278 			/* No flight involved here book the size to 0 */
5279 			tp1->book_size = 0;
5280 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5281 				foundeom = 1;
5282 			}
5283 			do_wakeup_routine = 1;
5284 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
5285 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
5286 			/*
5287 			 * on to the sent queue so we can wait for it to be
5288 			 * passed by.
5289 			 */
5290 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
5291 			    sctp_next);
5292 			stcb->asoc.send_queue_cnt--;
5293 			stcb->asoc.sent_queue_cnt++;
5294 		}
5295 	}
5296 	if (foundeom == 0) {
5297 		/*
5298 		 * Still no eom found. That means there is stuff left on the
5299 		 * stream out queue.. yuck.
5300 		 */
5301 		SCTP_TCB_SEND_LOCK(stcb);
5302 		strq = &stcb->asoc.strmout[sid];
5303 		sp = TAILQ_FIRST(&strq->outqueue);
5304 		if (sp != NULL) {
5305 			sp->discard_rest = 1;
5306 			/*
5307 			 * We may need to put a chunk on the queue that
5308 			 * holds the TSN that would have been sent with the
5309 			 * LAST bit.
5310 			 */
5311 			if (chk == NULL) {
5312 				/* Yep, we have to */
5313 				sctp_alloc_a_chunk(stcb, chk);
5314 				if (chk == NULL) {
5315 					/*
5316 					 * we are hosed. All we can do is
5317 					 * nothing.. which will cause an
5318 					 * abort if the peer is paying
5319 					 * attention.
5320 					 */
5321 					goto oh_well;
5322 				}
5323 				memset(chk, 0, sizeof(*chk));
5324 				chk->rec.data.rcv_flags = 0;
5325 				chk->sent = SCTP_FORWARD_TSN_SKIP;
5326 				chk->asoc = &stcb->asoc;
5327 				if (stcb->asoc.idata_supported == 0) {
5328 					if (sp->sinfo_flags & SCTP_UNORDERED) {
5329 						chk->rec.data.mid = 0;
5330 					} else {
5331 						chk->rec.data.mid = strq->next_mid_ordered;
5332 					}
5333 				} else {
5334 					if (sp->sinfo_flags & SCTP_UNORDERED) {
5335 						chk->rec.data.mid = strq->next_mid_unordered;
5336 					} else {
5337 						chk->rec.data.mid = strq->next_mid_ordered;
5338 					}
5339 				}
5340 				chk->rec.data.sid = sp->sid;
5341 				chk->rec.data.ppid = sp->ppid;
5342 				chk->rec.data.context = sp->context;
5343 				chk->flags = sp->act_flags;
5344 				chk->whoTo = NULL;
5345 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
5346 				strq->chunks_on_queues++;
5347 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
5348 				stcb->asoc.sent_queue_cnt++;
5349 				stcb->asoc.pr_sctp_cnt++;
5350 			}
5351 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
5352 			if (sp->sinfo_flags & SCTP_UNORDERED) {
5353 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
5354 			}
5355 			if (stcb->asoc.idata_supported == 0) {
5356 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
5357 					strq->next_mid_ordered++;
5358 				}
5359 			} else {
5360 				if (sp->sinfo_flags & SCTP_UNORDERED) {
5361 					strq->next_mid_unordered++;
5362 				} else {
5363 					strq->next_mid_ordered++;
5364 				}
5365 			}
5366 	oh_well:
5367 			if (sp->data) {
5368 				/*
5369 				 * Pull any data to free up the SB and allow
5370 				 * sender to "add more" while we will throw
5371 				 * away :-)
5372 				 */
5373 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
5374 				ret_sz += sp->length;
5375 				do_wakeup_routine = 1;
5376 				sp->some_taken = 1;
5377 				sctp_m_freem(sp->data);
5378 				sp->data = NULL;
5379 				sp->tail_mbuf = NULL;
5380 				sp->length = 0;
5381 			}
5382 		}
5383 		SCTP_TCB_SEND_UNLOCK(stcb);
5384 	}
5385 	if (do_wakeup_routine) {
5386 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5387 		struct socket *so;
5388 
5389 		so = SCTP_INP_SO(stcb->sctp_ep);
5390 		if (!so_locked) {
5391 			atomic_add_int(&stcb->asoc.refcnt, 1);
5392 			SCTP_TCB_UNLOCK(stcb);
5393 			SCTP_SOCKET_LOCK(so, 1);
5394 			SCTP_TCB_LOCK(stcb);
5395 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
5396 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5397 				/* assoc was freed while we were unlocked */
5398 				SCTP_SOCKET_UNLOCK(so, 1);
5399 				return (ret_sz);
5400 			}
5401 		}
5402 #endif
5403 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5404 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5405 		if (!so_locked) {
5406 			SCTP_SOCKET_UNLOCK(so, 1);
5407 		}
5408 #endif
5409 	}
5410 	return (ret_sz);
5411 }
5412 
5413 /*
5414  * checks to see if the given address, sa, is one that is currently known by
5415  * the kernel note: can't distinguish the same address on multiple interfaces
5416  * and doesn't handle multiple addresses with different zone/scope id's note:
5417  * ifa_ifwithaddr() compares the entire sockaddr struct
5418  */
5419 struct sctp_ifa *
5420 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5421     int holds_lock)
5422 {
5423 	struct sctp_laddr *laddr;
5424 
5425 	if (holds_lock == 0) {
5426 		SCTP_INP_RLOCK(inp);
5427 	}
5428 
5429 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5430 		if (laddr->ifa == NULL)
5431 			continue;
5432 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5433 			continue;
5434 #ifdef INET
5435 		if (addr->sa_family == AF_INET) {
5436 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5437 			    laddr->ifa->address.sin.sin_addr.s_addr) {
5438 				/* found him. */
5439 				if (holds_lock == 0) {
5440 					SCTP_INP_RUNLOCK(inp);
5441 				}
5442 				return (laddr->ifa);
5443 				break;
5444 			}
5445 		}
5446 #endif
5447 #ifdef INET6
5448 		if (addr->sa_family == AF_INET6) {
5449 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5450 			    &laddr->ifa->address.sin6)) {
5451 				/* found him. */
5452 				if (holds_lock == 0) {
5453 					SCTP_INP_RUNLOCK(inp);
5454 				}
5455 				return (laddr->ifa);
5456 				break;
5457 			}
5458 		}
5459 #endif
5460 	}
5461 	if (holds_lock == 0) {
5462 		SCTP_INP_RUNLOCK(inp);
5463 	}
5464 	return (NULL);
5465 }
5466 
5467 uint32_t
5468 sctp_get_ifa_hash_val(struct sockaddr *addr)
5469 {
5470 	switch (addr->sa_family) {
5471 #ifdef INET
5472 	case AF_INET:
5473 		{
5474 			struct sockaddr_in *sin;
5475 
5476 			sin = (struct sockaddr_in *)addr;
5477 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5478 		}
5479 #endif
5480 #ifdef INET6
5481 	case AF_INET6:
5482 		{
5483 			struct sockaddr_in6 *sin6;
5484 			uint32_t hash_of_addr;
5485 
5486 			sin6 = (struct sockaddr_in6 *)addr;
5487 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5488 			    sin6->sin6_addr.s6_addr32[1] +
5489 			    sin6->sin6_addr.s6_addr32[2] +
5490 			    sin6->sin6_addr.s6_addr32[3]);
5491 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5492 			return (hash_of_addr);
5493 		}
5494 #endif
5495 	default:
5496 		break;
5497 	}
5498 	return (0);
5499 }
5500 
5501 struct sctp_ifa *
5502 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5503 {
5504 	struct sctp_ifa *sctp_ifap;
5505 	struct sctp_vrf *vrf;
5506 	struct sctp_ifalist *hash_head;
5507 	uint32_t hash_of_addr;
5508 
5509 	if (holds_lock == 0)
5510 		SCTP_IPI_ADDR_RLOCK();
5511 
5512 	vrf = sctp_find_vrf(vrf_id);
5513 	if (vrf == NULL) {
5514 		if (holds_lock == 0)
5515 			SCTP_IPI_ADDR_RUNLOCK();
5516 		return (NULL);
5517 	}
5518 
5519 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5520 
5521 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5522 	if (hash_head == NULL) {
5523 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5524 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5525 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5526 		sctp_print_address(addr);
5527 		SCTP_PRINTF("No such bucket for address\n");
5528 		if (holds_lock == 0)
5529 			SCTP_IPI_ADDR_RUNLOCK();
5530 
5531 		return (NULL);
5532 	}
5533 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5534 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5535 			continue;
5536 #ifdef INET
5537 		if (addr->sa_family == AF_INET) {
5538 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5539 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5540 				/* found him. */
5541 				if (holds_lock == 0)
5542 					SCTP_IPI_ADDR_RUNLOCK();
5543 				return (sctp_ifap);
5544 				break;
5545 			}
5546 		}
5547 #endif
5548 #ifdef INET6
5549 		if (addr->sa_family == AF_INET6) {
5550 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5551 			    &sctp_ifap->address.sin6)) {
5552 				/* found him. */
5553 				if (holds_lock == 0)
5554 					SCTP_IPI_ADDR_RUNLOCK();
5555 				return (sctp_ifap);
5556 				break;
5557 			}
5558 		}
5559 #endif
5560 	}
5561 	if (holds_lock == 0)
5562 		SCTP_IPI_ADDR_RUNLOCK();
5563 	return (NULL);
5564 }
5565 
5566 static void
5567 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5568     uint32_t rwnd_req)
5569 {
5570 	/* User pulled some data, do we need a rwnd update? */
5571 	struct epoch_tracker et;
5572 	int r_unlocked = 0;
5573 	uint32_t dif, rwnd;
5574 	struct socket *so = NULL;
5575 
5576 	if (stcb == NULL)
5577 		return;
5578 
5579 	atomic_add_int(&stcb->asoc.refcnt, 1);
5580 
5581 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
5582 	    (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) {
5583 		/* Pre-check If we are freeing no update */
5584 		goto no_lock;
5585 	}
5586 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5587 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5588 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5589 		goto out;
5590 	}
5591 	so = stcb->sctp_socket;
5592 	if (so == NULL) {
5593 		goto out;
5594 	}
5595 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5596 	/* Have you have freed enough to look */
5597 	*freed_so_far = 0;
5598 	/* Yep, its worth a look and the lock overhead */
5599 
5600 	/* Figure out what the rwnd would be */
5601 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5602 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5603 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5604 	} else {
5605 		dif = 0;
5606 	}
5607 	if (dif >= rwnd_req) {
5608 		if (hold_rlock) {
5609 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5610 			r_unlocked = 1;
5611 		}
5612 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5613 			/*
5614 			 * One last check before we allow the guy possibly
5615 			 * to get in. There is a race, where the guy has not
5616 			 * reached the gate. In that case
5617 			 */
5618 			goto out;
5619 		}
5620 		SCTP_TCB_LOCK(stcb);
5621 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5622 			/* No reports here */
5623 			SCTP_TCB_UNLOCK(stcb);
5624 			goto out;
5625 		}
5626 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5627 		NET_EPOCH_ENTER(et);
5628 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5629 
5630 		sctp_chunk_output(stcb->sctp_ep, stcb,
5631 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5632 		/* make sure no timer is running */
5633 		NET_EPOCH_EXIT(et);
5634 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5635 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5636 		SCTP_TCB_UNLOCK(stcb);
5637 	} else {
5638 		/* Update how much we have pending */
5639 		stcb->freed_by_sorcv_sincelast = dif;
5640 	}
5641 out:
5642 	if (so && r_unlocked && hold_rlock) {
5643 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5644 	}
5645 
5646 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5647 no_lock:
5648 	atomic_add_int(&stcb->asoc.refcnt, -1);
5649 	return;
5650 }
5651 
5652 int
5653 sctp_sorecvmsg(struct socket *so,
5654     struct uio *uio,
5655     struct mbuf **mp,
5656     struct sockaddr *from,
5657     int fromlen,
5658     int *msg_flags,
5659     struct sctp_sndrcvinfo *sinfo,
5660     int filling_sinfo)
5661 {
5662 	/*
5663 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5664 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5665 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5666 	 * On the way out we may send out any combination of:
5667 	 * MSG_NOTIFICATION MSG_EOR
5668 	 *
5669 	 */
5670 	struct sctp_inpcb *inp = NULL;
5671 	ssize_t my_len = 0;
5672 	ssize_t cp_len = 0;
5673 	int error = 0;
5674 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5675 	struct mbuf *m = NULL;
5676 	struct sctp_tcb *stcb = NULL;
5677 	int wakeup_read_socket = 0;
5678 	int freecnt_applied = 0;
5679 	int out_flags = 0, in_flags = 0;
5680 	int block_allowed = 1;
5681 	uint32_t freed_so_far = 0;
5682 	ssize_t copied_so_far = 0;
5683 	int in_eeor_mode = 0;
5684 	int no_rcv_needed = 0;
5685 	uint32_t rwnd_req = 0;
5686 	int hold_sblock = 0;
5687 	int hold_rlock = 0;
5688 	ssize_t slen = 0;
5689 	uint32_t held_length = 0;
5690 	int sockbuf_lock = 0;
5691 
5692 	if (uio == NULL) {
5693 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5694 		return (EINVAL);
5695 	}
5696 
5697 	if (msg_flags) {
5698 		in_flags = *msg_flags;
5699 		if (in_flags & MSG_PEEK)
5700 			SCTP_STAT_INCR(sctps_read_peeks);
5701 	} else {
5702 		in_flags = 0;
5703 	}
5704 	slen = uio->uio_resid;
5705 
5706 	/* Pull in and set up our int flags */
5707 	if (in_flags & MSG_OOB) {
5708 		/* Out of band's NOT supported */
5709 		return (EOPNOTSUPP);
5710 	}
5711 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5712 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5713 		return (EINVAL);
5714 	}
5715 	if ((in_flags & (MSG_DONTWAIT
5716 	    | MSG_NBIO
5717 	    )) ||
5718 	    SCTP_SO_IS_NBIO(so)) {
5719 		block_allowed = 0;
5720 	}
5721 	/* setup the endpoint */
5722 	inp = (struct sctp_inpcb *)so->so_pcb;
5723 	if (inp == NULL) {
5724 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5725 		return (EFAULT);
5726 	}
5727 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5728 	/* Must be at least a MTU's worth */
5729 	if (rwnd_req < SCTP_MIN_RWND)
5730 		rwnd_req = SCTP_MIN_RWND;
5731 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5732 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5733 		sctp_misc_ints(SCTP_SORECV_ENTER,
5734 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5735 	}
5736 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5737 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5738 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5739 	}
5740 
5741 
5742 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5743 	if (error) {
5744 		goto release_unlocked;
5745 	}
5746 	sockbuf_lock = 1;
5747 restart:
5748 
5749 
5750 restart_nosblocks:
5751 	if (hold_sblock == 0) {
5752 		SOCKBUF_LOCK(&so->so_rcv);
5753 		hold_sblock = 1;
5754 	}
5755 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5756 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5757 		goto out;
5758 	}
5759 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5760 		if (so->so_error) {
5761 			error = so->so_error;
5762 			if ((in_flags & MSG_PEEK) == 0)
5763 				so->so_error = 0;
5764 			goto out;
5765 		} else {
5766 			if (so->so_rcv.sb_cc == 0) {
5767 				/* indicate EOF */
5768 				error = 0;
5769 				goto out;
5770 			}
5771 		}
5772 	}
5773 	if (so->so_rcv.sb_cc <= held_length) {
5774 		if (so->so_error) {
5775 			error = so->so_error;
5776 			if ((in_flags & MSG_PEEK) == 0) {
5777 				so->so_error = 0;
5778 			}
5779 			goto out;
5780 		}
5781 		if ((so->so_rcv.sb_cc == 0) &&
5782 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5783 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5784 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5785 				/*
5786 				 * For active open side clear flags for
5787 				 * re-use passive open is blocked by
5788 				 * connect.
5789 				 */
5790 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5791 					/*
5792 					 * You were aborted, passive side
5793 					 * always hits here
5794 					 */
5795 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5796 					error = ECONNRESET;
5797 				}
5798 				so->so_state &= ~(SS_ISCONNECTING |
5799 				    SS_ISDISCONNECTING |
5800 				    SS_ISCONFIRMING |
5801 				    SS_ISCONNECTED);
5802 				if (error == 0) {
5803 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5804 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5805 						error = ENOTCONN;
5806 					}
5807 				}
5808 				goto out;
5809 			}
5810 		}
5811 		if (block_allowed) {
5812 			error = sbwait(&so->so_rcv);
5813 			if (error) {
5814 				goto out;
5815 			}
5816 			held_length = 0;
5817 			goto restart_nosblocks;
5818 		} else {
5819 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5820 			error = EWOULDBLOCK;
5821 			goto out;
5822 		}
5823 	}
5824 	if (hold_sblock == 1) {
5825 		SOCKBUF_UNLOCK(&so->so_rcv);
5826 		hold_sblock = 0;
5827 	}
5828 	/* we possibly have data we can read */
5829 	/* sa_ignore FREED_MEMORY */
5830 	control = TAILQ_FIRST(&inp->read_queue);
5831 	if (control == NULL) {
5832 		/*
5833 		 * This could be happening since the appender did the
5834 		 * increment but as not yet did the tailq insert onto the
5835 		 * read_queue
5836 		 */
5837 		if (hold_rlock == 0) {
5838 			SCTP_INP_READ_LOCK(inp);
5839 		}
5840 		control = TAILQ_FIRST(&inp->read_queue);
5841 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5842 #ifdef INVARIANTS
5843 			panic("Huh, its non zero and nothing on control?");
5844 #endif
5845 			so->so_rcv.sb_cc = 0;
5846 		}
5847 		SCTP_INP_READ_UNLOCK(inp);
5848 		hold_rlock = 0;
5849 		goto restart;
5850 	}
5851 
5852 	if ((control->length == 0) &&
5853 	    (control->do_not_ref_stcb)) {
5854 		/*
5855 		 * Clean up code for freeing assoc that left behind a
5856 		 * pdapi.. maybe a peer in EEOR that just closed after
5857 		 * sending and never indicated a EOR.
5858 		 */
5859 		if (hold_rlock == 0) {
5860 			hold_rlock = 1;
5861 			SCTP_INP_READ_LOCK(inp);
5862 		}
5863 		control->held_length = 0;
5864 		if (control->data) {
5865 			/* Hmm there is data here .. fix */
5866 			struct mbuf *m_tmp;
5867 			int cnt = 0;
5868 
5869 			m_tmp = control->data;
5870 			while (m_tmp) {
5871 				cnt += SCTP_BUF_LEN(m_tmp);
5872 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5873 					control->tail_mbuf = m_tmp;
5874 					control->end_added = 1;
5875 				}
5876 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5877 			}
5878 			control->length = cnt;
5879 		} else {
5880 			/* remove it */
5881 			TAILQ_REMOVE(&inp->read_queue, control, next);
5882 			/* Add back any hiddend data */
5883 			sctp_free_remote_addr(control->whoFrom);
5884 			sctp_free_a_readq(stcb, control);
5885 		}
5886 		if (hold_rlock) {
5887 			hold_rlock = 0;
5888 			SCTP_INP_READ_UNLOCK(inp);
5889 		}
5890 		goto restart;
5891 	}
5892 	if ((control->length == 0) &&
5893 	    (control->end_added == 1)) {
5894 		/*
5895 		 * Do we also need to check for (control->pdapi_aborted ==
5896 		 * 1)?
5897 		 */
5898 		if (hold_rlock == 0) {
5899 			hold_rlock = 1;
5900 			SCTP_INP_READ_LOCK(inp);
5901 		}
5902 		TAILQ_REMOVE(&inp->read_queue, control, next);
5903 		if (control->data) {
5904 #ifdef INVARIANTS
5905 			panic("control->data not null but control->length == 0");
5906 #else
5907 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5908 			sctp_m_freem(control->data);
5909 			control->data = NULL;
5910 #endif
5911 		}
5912 		if (control->aux_data) {
5913 			sctp_m_free(control->aux_data);
5914 			control->aux_data = NULL;
5915 		}
5916 #ifdef INVARIANTS
5917 		if (control->on_strm_q) {
5918 			panic("About to free ctl:%p so:%p and its in %d",
5919 			    control, so, control->on_strm_q);
5920 		}
5921 #endif
5922 		sctp_free_remote_addr(control->whoFrom);
5923 		sctp_free_a_readq(stcb, control);
5924 		if (hold_rlock) {
5925 			hold_rlock = 0;
5926 			SCTP_INP_READ_UNLOCK(inp);
5927 		}
5928 		goto restart;
5929 	}
5930 	if (control->length == 0) {
5931 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5932 		    (filling_sinfo)) {
5933 			/* find a more suitable one then this */
5934 			ctl = TAILQ_NEXT(control, next);
5935 			while (ctl) {
5936 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5937 				    (ctl->some_taken ||
5938 				    (ctl->spec_flags & M_NOTIFICATION) ||
5939 				    ((ctl->do_not_ref_stcb == 0) &&
5940 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5941 				    ) {
5942 					/*-
5943 					 * If we have a different TCB next, and there is data
5944 					 * present. If we have already taken some (pdapi), OR we can
5945 					 * ref the tcb and no delivery as started on this stream, we
5946 					 * take it. Note we allow a notification on a different
5947 					 * assoc to be delivered..
5948 					 */
5949 					control = ctl;
5950 					goto found_one;
5951 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5952 					    (ctl->length) &&
5953 					    ((ctl->some_taken) ||
5954 					    ((ctl->do_not_ref_stcb == 0) &&
5955 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5956 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5957 					/*-
5958 					 * If we have the same tcb, and there is data present, and we
5959 					 * have the strm interleave feature present. Then if we have
5960 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5961 					 * not started a delivery for this stream, we can take it.
5962 					 * Note we do NOT allow a notificaiton on the same assoc to
5963 					 * be delivered.
5964 					 */
5965 					control = ctl;
5966 					goto found_one;
5967 				}
5968 				ctl = TAILQ_NEXT(ctl, next);
5969 			}
5970 		}
5971 		/*
5972 		 * if we reach here, not suitable replacement is available
5973 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5974 		 * into the our held count, and its time to sleep again.
5975 		 */
5976 		held_length = so->so_rcv.sb_cc;
5977 		control->held_length = so->so_rcv.sb_cc;
5978 		goto restart;
5979 	}
5980 	/* Clear the held length since there is something to read */
5981 	control->held_length = 0;
5982 found_one:
5983 	/*
5984 	 * If we reach here, control has a some data for us to read off.
5985 	 * Note that stcb COULD be NULL.
5986 	 */
5987 	if (hold_rlock == 0) {
5988 		hold_rlock = 1;
5989 		SCTP_INP_READ_LOCK(inp);
5990 	}
5991 	control->some_taken++;
5992 	stcb = control->stcb;
5993 	if (stcb) {
5994 		if ((control->do_not_ref_stcb == 0) &&
5995 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5996 			if (freecnt_applied == 0)
5997 				stcb = NULL;
5998 		} else if (control->do_not_ref_stcb == 0) {
5999 			/* you can't free it on me please */
6000 			/*
6001 			 * The lock on the socket buffer protects us so the
6002 			 * free code will stop. But since we used the
6003 			 * socketbuf lock and the sender uses the tcb_lock
6004 			 * to increment, we need to use the atomic add to
6005 			 * the refcnt
6006 			 */
6007 			if (freecnt_applied) {
6008 #ifdef INVARIANTS
6009 				panic("refcnt already incremented");
6010 #else
6011 				SCTP_PRINTF("refcnt already incremented?\n");
6012 #endif
6013 			} else {
6014 				atomic_add_int(&stcb->asoc.refcnt, 1);
6015 				freecnt_applied = 1;
6016 			}
6017 			/*
6018 			 * Setup to remember how much we have not yet told
6019 			 * the peer our rwnd has opened up. Note we grab the
6020 			 * value from the tcb from last time. Note too that
6021 			 * sack sending clears this when a sack is sent,
6022 			 * which is fine. Once we hit the rwnd_req, we then
6023 			 * will go to the sctp_user_rcvd() that will not
6024 			 * lock until it KNOWs it MUST send a WUP-SACK.
6025 			 */
6026 			freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast;
6027 			stcb->freed_by_sorcv_sincelast = 0;
6028 		}
6029 	}
6030 	if (stcb &&
6031 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
6032 	    control->do_not_ref_stcb == 0) {
6033 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
6034 	}
6035 
6036 	/* First lets get off the sinfo and sockaddr info */
6037 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
6038 		sinfo->sinfo_stream = control->sinfo_stream;
6039 		sinfo->sinfo_ssn = (uint16_t)control->mid;
6040 		sinfo->sinfo_flags = control->sinfo_flags;
6041 		sinfo->sinfo_ppid = control->sinfo_ppid;
6042 		sinfo->sinfo_context = control->sinfo_context;
6043 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
6044 		sinfo->sinfo_tsn = control->sinfo_tsn;
6045 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
6046 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
6047 		nxt = TAILQ_NEXT(control, next);
6048 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6049 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
6050 			struct sctp_extrcvinfo *s_extra;
6051 
6052 			s_extra = (struct sctp_extrcvinfo *)sinfo;
6053 			if ((nxt) &&
6054 			    (nxt->length)) {
6055 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
6056 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
6057 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
6058 				}
6059 				if (nxt->spec_flags & M_NOTIFICATION) {
6060 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
6061 				}
6062 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
6063 				s_extra->serinfo_next_length = nxt->length;
6064 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
6065 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
6066 				if (nxt->tail_mbuf != NULL) {
6067 					if (nxt->end_added) {
6068 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
6069 					}
6070 				}
6071 			} else {
6072 				/*
6073 				 * we explicitly 0 this, since the memcpy
6074 				 * got some other things beyond the older
6075 				 * sinfo_ that is on the control's structure
6076 				 * :-D
6077 				 */
6078 				nxt = NULL;
6079 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6080 				s_extra->serinfo_next_aid = 0;
6081 				s_extra->serinfo_next_length = 0;
6082 				s_extra->serinfo_next_ppid = 0;
6083 				s_extra->serinfo_next_stream = 0;
6084 			}
6085 		}
6086 		/*
6087 		 * update off the real current cum-ack, if we have an stcb.
6088 		 */
6089 		if ((control->do_not_ref_stcb == 0) && stcb)
6090 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
6091 		/*
6092 		 * mask off the high bits, we keep the actual chunk bits in
6093 		 * there.
6094 		 */
6095 		sinfo->sinfo_flags &= 0x00ff;
6096 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
6097 			sinfo->sinfo_flags |= SCTP_UNORDERED;
6098 		}
6099 	}
6100 #ifdef SCTP_ASOCLOG_OF_TSNS
6101 	{
6102 		int index, newindex;
6103 		struct sctp_pcbtsn_rlog *entry;
6104 
6105 		do {
6106 			index = inp->readlog_index;
6107 			newindex = index + 1;
6108 			if (newindex >= SCTP_READ_LOG_SIZE) {
6109 				newindex = 0;
6110 			}
6111 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
6112 		entry = &inp->readlog[index];
6113 		entry->vtag = control->sinfo_assoc_id;
6114 		entry->strm = control->sinfo_stream;
6115 		entry->seq = (uint16_t)control->mid;
6116 		entry->sz = control->length;
6117 		entry->flgs = control->sinfo_flags;
6118 	}
6119 #endif
6120 	if ((fromlen > 0) && (from != NULL)) {
6121 		union sctp_sockstore store;
6122 		size_t len;
6123 
6124 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
6125 #ifdef INET6
6126 		case AF_INET6:
6127 			len = sizeof(struct sockaddr_in6);
6128 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
6129 			store.sin6.sin6_port = control->port_from;
6130 			break;
6131 #endif
6132 #ifdef INET
6133 		case AF_INET:
6134 #ifdef INET6
6135 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
6136 				len = sizeof(struct sockaddr_in6);
6137 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
6138 				    &store.sin6);
6139 				store.sin6.sin6_port = control->port_from;
6140 			} else {
6141 				len = sizeof(struct sockaddr_in);
6142 				store.sin = control->whoFrom->ro._l_addr.sin;
6143 				store.sin.sin_port = control->port_from;
6144 			}
6145 #else
6146 			len = sizeof(struct sockaddr_in);
6147 			store.sin = control->whoFrom->ro._l_addr.sin;
6148 			store.sin.sin_port = control->port_from;
6149 #endif
6150 			break;
6151 #endif
6152 		default:
6153 			len = 0;
6154 			break;
6155 		}
6156 		memcpy(from, &store, min((size_t)fromlen, len));
6157 #ifdef INET6
6158 		{
6159 			struct sockaddr_in6 lsa6, *from6;
6160 
6161 			from6 = (struct sockaddr_in6 *)from;
6162 			sctp_recover_scope_mac(from6, (&lsa6));
6163 		}
6164 #endif
6165 	}
6166 	if (hold_rlock) {
6167 		SCTP_INP_READ_UNLOCK(inp);
6168 		hold_rlock = 0;
6169 	}
6170 	if (hold_sblock) {
6171 		SOCKBUF_UNLOCK(&so->so_rcv);
6172 		hold_sblock = 0;
6173 	}
6174 	/* now copy out what data we can */
6175 	if (mp == NULL) {
6176 		/* copy out each mbuf in the chain up to length */
6177 get_more_data:
6178 		m = control->data;
6179 		while (m) {
6180 			/* Move out all we can */
6181 			cp_len = uio->uio_resid;
6182 			my_len = SCTP_BUF_LEN(m);
6183 			if (cp_len > my_len) {
6184 				/* not enough in this buf */
6185 				cp_len = my_len;
6186 			}
6187 			if (hold_rlock) {
6188 				SCTP_INP_READ_UNLOCK(inp);
6189 				hold_rlock = 0;
6190 			}
6191 			if (cp_len > 0)
6192 				error = uiomove(mtod(m, char *), (int)cp_len, uio);
6193 			/* re-read */
6194 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
6195 				goto release;
6196 			}
6197 
6198 			if ((control->do_not_ref_stcb == 0) && stcb &&
6199 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
6200 				no_rcv_needed = 1;
6201 			}
6202 			if (error) {
6203 				/* error we are out of here */
6204 				goto release;
6205 			}
6206 			SCTP_INP_READ_LOCK(inp);
6207 			hold_rlock = 1;
6208 			if (cp_len == SCTP_BUF_LEN(m)) {
6209 				if ((SCTP_BUF_NEXT(m) == NULL) &&
6210 				    (control->end_added)) {
6211 					out_flags |= MSG_EOR;
6212 					if ((control->do_not_ref_stcb == 0) &&
6213 					    (control->stcb != NULL) &&
6214 					    ((control->spec_flags & M_NOTIFICATION) == 0))
6215 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6216 				}
6217 				if (control->spec_flags & M_NOTIFICATION) {
6218 					out_flags |= MSG_NOTIFICATION;
6219 				}
6220 				/* we ate up the mbuf */
6221 				if (in_flags & MSG_PEEK) {
6222 					/* just looking */
6223 					m = SCTP_BUF_NEXT(m);
6224 					copied_so_far += cp_len;
6225 				} else {
6226 					/* dispose of the mbuf */
6227 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6228 						sctp_sblog(&so->so_rcv,
6229 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6230 					}
6231 					sctp_sbfree(control, stcb, &so->so_rcv, m);
6232 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6233 						sctp_sblog(&so->so_rcv,
6234 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6235 					}
6236 					copied_so_far += cp_len;
6237 					freed_so_far += (uint32_t)cp_len;
6238 					freed_so_far += MSIZE;
6239 					atomic_subtract_int(&control->length, cp_len);
6240 					control->data = sctp_m_free(m);
6241 					m = control->data;
6242 					/*
6243 					 * been through it all, must hold sb
6244 					 * lock ok to null tail
6245 					 */
6246 					if (control->data == NULL) {
6247 #ifdef INVARIANTS
6248 						if ((control->end_added == 0) ||
6249 						    (TAILQ_NEXT(control, next) == NULL)) {
6250 							/*
6251 							 * If the end is not
6252 							 * added, OR the
6253 							 * next is NOT null
6254 							 * we MUST have the
6255 							 * lock.
6256 							 */
6257 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
6258 								panic("Hmm we don't own the lock?");
6259 							}
6260 						}
6261 #endif
6262 						control->tail_mbuf = NULL;
6263 #ifdef INVARIANTS
6264 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
6265 							panic("end_added, nothing left and no MSG_EOR");
6266 						}
6267 #endif
6268 					}
6269 				}
6270 			} else {
6271 				/* Do we need to trim the mbuf? */
6272 				if (control->spec_flags & M_NOTIFICATION) {
6273 					out_flags |= MSG_NOTIFICATION;
6274 				}
6275 				if ((in_flags & MSG_PEEK) == 0) {
6276 					SCTP_BUF_RESV_UF(m, cp_len);
6277 					SCTP_BUF_LEN(m) -= (int)cp_len;
6278 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6279 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len);
6280 					}
6281 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
6282 					if ((control->do_not_ref_stcb == 0) &&
6283 					    stcb) {
6284 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
6285 					}
6286 					copied_so_far += cp_len;
6287 					freed_so_far += (uint32_t)cp_len;
6288 					freed_so_far += MSIZE;
6289 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6290 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
6291 						    SCTP_LOG_SBRESULT, 0);
6292 					}
6293 					atomic_subtract_int(&control->length, cp_len);
6294 				} else {
6295 					copied_so_far += cp_len;
6296 				}
6297 			}
6298 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
6299 				break;
6300 			}
6301 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6302 			    (control->do_not_ref_stcb == 0) &&
6303 			    (freed_so_far >= rwnd_req)) {
6304 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6305 			}
6306 		}		/* end while(m) */
6307 		/*
6308 		 * At this point we have looked at it all and we either have
6309 		 * a MSG_EOR/or read all the user wants... <OR>
6310 		 * control->length == 0.
6311 		 */
6312 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
6313 			/* we are done with this control */
6314 			if (control->length == 0) {
6315 				if (control->data) {
6316 #ifdef INVARIANTS
6317 					panic("control->data not null at read eor?");
6318 #else
6319 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
6320 					sctp_m_freem(control->data);
6321 					control->data = NULL;
6322 #endif
6323 				}
6324 		done_with_control:
6325 				if (hold_rlock == 0) {
6326 					SCTP_INP_READ_LOCK(inp);
6327 					hold_rlock = 1;
6328 				}
6329 				TAILQ_REMOVE(&inp->read_queue, control, next);
6330 				/* Add back any hiddend data */
6331 				if (control->held_length) {
6332 					held_length = 0;
6333 					control->held_length = 0;
6334 					wakeup_read_socket = 1;
6335 				}
6336 				if (control->aux_data) {
6337 					sctp_m_free(control->aux_data);
6338 					control->aux_data = NULL;
6339 				}
6340 				no_rcv_needed = control->do_not_ref_stcb;
6341 				sctp_free_remote_addr(control->whoFrom);
6342 				control->data = NULL;
6343 #ifdef INVARIANTS
6344 				if (control->on_strm_q) {
6345 					panic("About to free ctl:%p so:%p and its in %d",
6346 					    control, so, control->on_strm_q);
6347 				}
6348 #endif
6349 				sctp_free_a_readq(stcb, control);
6350 				control = NULL;
6351 				if ((freed_so_far >= rwnd_req) &&
6352 				    (no_rcv_needed == 0))
6353 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6354 
6355 			} else {
6356 				/*
6357 				 * The user did not read all of this
6358 				 * message, turn off the returned MSG_EOR
6359 				 * since we are leaving more behind on the
6360 				 * control to read.
6361 				 */
6362 #ifdef INVARIANTS
6363 				if (control->end_added &&
6364 				    (control->data == NULL) &&
6365 				    (control->tail_mbuf == NULL)) {
6366 					panic("Gak, control->length is corrupt?");
6367 				}
6368 #endif
6369 				no_rcv_needed = control->do_not_ref_stcb;
6370 				out_flags &= ~MSG_EOR;
6371 			}
6372 		}
6373 		if (out_flags & MSG_EOR) {
6374 			goto release;
6375 		}
6376 		if ((uio->uio_resid == 0) ||
6377 		    ((in_eeor_mode) &&
6378 		    (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) {
6379 			goto release;
6380 		}
6381 		/*
6382 		 * If I hit here the receiver wants more and this message is
6383 		 * NOT done (pd-api). So two questions. Can we block? if not
6384 		 * we are done. Did the user NOT set MSG_WAITALL?
6385 		 */
6386 		if (block_allowed == 0) {
6387 			goto release;
6388 		}
6389 		/*
6390 		 * We need to wait for more data a few things: - We don't
6391 		 * sbunlock() so we don't get someone else reading. - We
6392 		 * must be sure to account for the case where what is added
6393 		 * is NOT to our control when we wakeup.
6394 		 */
6395 
6396 		/*
6397 		 * Do we need to tell the transport a rwnd update might be
6398 		 * needed before we go to sleep?
6399 		 */
6400 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6401 		    ((freed_so_far >= rwnd_req) &&
6402 		    (control->do_not_ref_stcb == 0) &&
6403 		    (no_rcv_needed == 0))) {
6404 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6405 		}
6406 wait_some_more:
6407 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6408 			goto release;
6409 		}
6410 
6411 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6412 			goto release;
6413 
6414 		if (hold_rlock == 1) {
6415 			SCTP_INP_READ_UNLOCK(inp);
6416 			hold_rlock = 0;
6417 		}
6418 		if (hold_sblock == 0) {
6419 			SOCKBUF_LOCK(&so->so_rcv);
6420 			hold_sblock = 1;
6421 		}
6422 		if ((copied_so_far) && (control->length == 0) &&
6423 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6424 			goto release;
6425 		}
6426 		if (so->so_rcv.sb_cc <= control->held_length) {
6427 			error = sbwait(&so->so_rcv);
6428 			if (error) {
6429 				goto release;
6430 			}
6431 			control->held_length = 0;
6432 		}
6433 		if (hold_sblock) {
6434 			SOCKBUF_UNLOCK(&so->so_rcv);
6435 			hold_sblock = 0;
6436 		}
6437 		if (control->length == 0) {
6438 			/* still nothing here */
6439 			if (control->end_added == 1) {
6440 				/* he aborted, or is done i.e.did a shutdown */
6441 				out_flags |= MSG_EOR;
6442 				if (control->pdapi_aborted) {
6443 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6444 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6445 
6446 					out_flags |= MSG_TRUNC;
6447 				} else {
6448 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6449 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6450 				}
6451 				goto done_with_control;
6452 			}
6453 			if (so->so_rcv.sb_cc > held_length) {
6454 				control->held_length = so->so_rcv.sb_cc;
6455 				held_length = 0;
6456 			}
6457 			goto wait_some_more;
6458 		} else if (control->data == NULL) {
6459 			/*
6460 			 * we must re-sync since data is probably being
6461 			 * added
6462 			 */
6463 			SCTP_INP_READ_LOCK(inp);
6464 			if ((control->length > 0) && (control->data == NULL)) {
6465 				/*
6466 				 * big trouble.. we have the lock and its
6467 				 * corrupt?
6468 				 */
6469 #ifdef INVARIANTS
6470 				panic("Impossible data==NULL length !=0");
6471 #endif
6472 				out_flags |= MSG_EOR;
6473 				out_flags |= MSG_TRUNC;
6474 				control->length = 0;
6475 				SCTP_INP_READ_UNLOCK(inp);
6476 				goto done_with_control;
6477 			}
6478 			SCTP_INP_READ_UNLOCK(inp);
6479 			/* We will fall around to get more data */
6480 		}
6481 		goto get_more_data;
6482 	} else {
6483 		/*-
6484 		 * Give caller back the mbuf chain,
6485 		 * store in uio_resid the length
6486 		 */
6487 		wakeup_read_socket = 0;
6488 		if ((control->end_added == 0) ||
6489 		    (TAILQ_NEXT(control, next) == NULL)) {
6490 			/* Need to get rlock */
6491 			if (hold_rlock == 0) {
6492 				SCTP_INP_READ_LOCK(inp);
6493 				hold_rlock = 1;
6494 			}
6495 		}
6496 		if (control->end_added) {
6497 			out_flags |= MSG_EOR;
6498 			if ((control->do_not_ref_stcb == 0) &&
6499 			    (control->stcb != NULL) &&
6500 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6501 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6502 		}
6503 		if (control->spec_flags & M_NOTIFICATION) {
6504 			out_flags |= MSG_NOTIFICATION;
6505 		}
6506 		uio->uio_resid = control->length;
6507 		*mp = control->data;
6508 		m = control->data;
6509 		while (m) {
6510 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6511 				sctp_sblog(&so->so_rcv,
6512 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6513 			}
6514 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6515 			freed_so_far += (uint32_t)SCTP_BUF_LEN(m);
6516 			freed_so_far += MSIZE;
6517 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6518 				sctp_sblog(&so->so_rcv,
6519 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6520 			}
6521 			m = SCTP_BUF_NEXT(m);
6522 		}
6523 		control->data = control->tail_mbuf = NULL;
6524 		control->length = 0;
6525 		if (out_flags & MSG_EOR) {
6526 			/* Done with this control */
6527 			goto done_with_control;
6528 		}
6529 	}
6530 release:
6531 	if (hold_rlock == 1) {
6532 		SCTP_INP_READ_UNLOCK(inp);
6533 		hold_rlock = 0;
6534 	}
6535 	if (hold_sblock == 1) {
6536 		SOCKBUF_UNLOCK(&so->so_rcv);
6537 		hold_sblock = 0;
6538 	}
6539 
6540 	sbunlock(&so->so_rcv);
6541 	sockbuf_lock = 0;
6542 
6543 release_unlocked:
6544 	if (hold_sblock) {
6545 		SOCKBUF_UNLOCK(&so->so_rcv);
6546 		hold_sblock = 0;
6547 	}
6548 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6549 		if ((freed_so_far >= rwnd_req) &&
6550 		    (control && (control->do_not_ref_stcb == 0)) &&
6551 		    (no_rcv_needed == 0))
6552 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6553 	}
6554 out:
6555 	if (msg_flags) {
6556 		*msg_flags = out_flags;
6557 	}
6558 	if (((out_flags & MSG_EOR) == 0) &&
6559 	    ((in_flags & MSG_PEEK) == 0) &&
6560 	    (sinfo) &&
6561 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6562 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6563 		struct sctp_extrcvinfo *s_extra;
6564 
6565 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6566 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6567 	}
6568 	if (hold_rlock == 1) {
6569 		SCTP_INP_READ_UNLOCK(inp);
6570 	}
6571 	if (hold_sblock) {
6572 		SOCKBUF_UNLOCK(&so->so_rcv);
6573 	}
6574 	if (sockbuf_lock) {
6575 		sbunlock(&so->so_rcv);
6576 	}
6577 
6578 	if (freecnt_applied) {
6579 		/*
6580 		 * The lock on the socket buffer protects us so the free
6581 		 * code will stop. But since we used the socketbuf lock and
6582 		 * the sender uses the tcb_lock to increment, we need to use
6583 		 * the atomic add to the refcnt.
6584 		 */
6585 		if (stcb == NULL) {
6586 #ifdef INVARIANTS
6587 			panic("stcb for refcnt has gone NULL?");
6588 			goto stage_left;
6589 #else
6590 			goto stage_left;
6591 #endif
6592 		}
6593 		/* Save the value back for next time */
6594 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6595 		atomic_add_int(&stcb->asoc.refcnt, -1);
6596 	}
6597 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6598 		if (stcb) {
6599 			sctp_misc_ints(SCTP_SORECV_DONE,
6600 			    freed_so_far,
6601 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6602 			    stcb->asoc.my_rwnd,
6603 			    so->so_rcv.sb_cc);
6604 		} else {
6605 			sctp_misc_ints(SCTP_SORECV_DONE,
6606 			    freed_so_far,
6607 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6608 			    0,
6609 			    so->so_rcv.sb_cc);
6610 		}
6611 	}
6612 stage_left:
6613 	if (wakeup_read_socket) {
6614 		sctp_sorwakeup(inp, so);
6615 	}
6616 	return (error);
6617 }
6618 
6619 
6620 #ifdef SCTP_MBUF_LOGGING
6621 struct mbuf *
6622 sctp_m_free(struct mbuf *m)
6623 {
6624 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6625 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6626 	}
6627 	return (m_free(m));
6628 }
6629 
6630 void
6631 sctp_m_freem(struct mbuf *mb)
6632 {
6633 	while (mb != NULL)
6634 		mb = sctp_m_free(mb);
6635 }
6636 
6637 #endif
6638 
6639 int
6640 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6641 {
6642 	/*
6643 	 * Given a local address. For all associations that holds the
6644 	 * address, request a peer-set-primary.
6645 	 */
6646 	struct sctp_ifa *ifa;
6647 	struct sctp_laddr *wi;
6648 
6649 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6650 	if (ifa == NULL) {
6651 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6652 		return (EADDRNOTAVAIL);
6653 	}
6654 	/*
6655 	 * Now that we have the ifa we must awaken the iterator with this
6656 	 * message.
6657 	 */
6658 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6659 	if (wi == NULL) {
6660 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6661 		return (ENOMEM);
6662 	}
6663 	/* Now incr the count and int wi structure */
6664 	SCTP_INCR_LADDR_COUNT();
6665 	memset(wi, 0, sizeof(*wi));
6666 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6667 	wi->ifa = ifa;
6668 	wi->action = SCTP_SET_PRIM_ADDR;
6669 	atomic_add_int(&ifa->refcount, 1);
6670 
6671 	/* Now add it to the work queue */
6672 	SCTP_WQ_ADDR_LOCK();
6673 	/*
6674 	 * Should this really be a tailq? As it is we will process the
6675 	 * newest first :-0
6676 	 */
6677 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6678 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6679 	    (struct sctp_inpcb *)NULL,
6680 	    (struct sctp_tcb *)NULL,
6681 	    (struct sctp_nets *)NULL);
6682 	SCTP_WQ_ADDR_UNLOCK();
6683 	return (0);
6684 }
6685 
6686 
6687 int
6688 sctp_soreceive(struct socket *so,
6689     struct sockaddr **psa,
6690     struct uio *uio,
6691     struct mbuf **mp0,
6692     struct mbuf **controlp,
6693     int *flagsp)
6694 {
6695 	int error, fromlen;
6696 	uint8_t sockbuf[256];
6697 	struct sockaddr *from;
6698 	struct sctp_extrcvinfo sinfo;
6699 	int filling_sinfo = 1;
6700 	int flags;
6701 	struct sctp_inpcb *inp;
6702 
6703 	inp = (struct sctp_inpcb *)so->so_pcb;
6704 	/* pickup the assoc we are reading from */
6705 	if (inp == NULL) {
6706 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6707 		return (EINVAL);
6708 	}
6709 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6710 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6711 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6712 	    (controlp == NULL)) {
6713 		/* user does not want the sndrcv ctl */
6714 		filling_sinfo = 0;
6715 	}
6716 	if (psa) {
6717 		from = (struct sockaddr *)sockbuf;
6718 		fromlen = sizeof(sockbuf);
6719 		from->sa_len = 0;
6720 	} else {
6721 		from = NULL;
6722 		fromlen = 0;
6723 	}
6724 
6725 	if (filling_sinfo) {
6726 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6727 	}
6728 	if (flagsp != NULL) {
6729 		flags = *flagsp;
6730 	} else {
6731 		flags = 0;
6732 	}
6733 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags,
6734 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6735 	if (flagsp != NULL) {
6736 		*flagsp = flags;
6737 	}
6738 	if (controlp != NULL) {
6739 		/* copy back the sinfo in a CMSG format */
6740 		if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) {
6741 			*controlp = sctp_build_ctl_nchunk(inp,
6742 			    (struct sctp_sndrcvinfo *)&sinfo);
6743 		} else {
6744 			*controlp = NULL;
6745 		}
6746 	}
6747 	if (psa) {
6748 		/* copy back the address info */
6749 		if (from && from->sa_len) {
6750 			*psa = sodupsockaddr(from, M_NOWAIT);
6751 		} else {
6752 			*psa = NULL;
6753 		}
6754 	}
6755 	return (error);
6756 }
6757 
6758 
6759 
6760 
6761 
6762 int
6763 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6764     int totaddr, int *error)
6765 {
6766 	int added = 0;
6767 	int i;
6768 	struct sctp_inpcb *inp;
6769 	struct sockaddr *sa;
6770 	size_t incr = 0;
6771 #ifdef INET
6772 	struct sockaddr_in *sin;
6773 #endif
6774 #ifdef INET6
6775 	struct sockaddr_in6 *sin6;
6776 #endif
6777 
6778 	sa = addr;
6779 	inp = stcb->sctp_ep;
6780 	*error = 0;
6781 	for (i = 0; i < totaddr; i++) {
6782 		switch (sa->sa_family) {
6783 #ifdef INET
6784 		case AF_INET:
6785 			incr = sizeof(struct sockaddr_in);
6786 			sin = (struct sockaddr_in *)sa;
6787 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6788 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6789 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6790 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6791 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6792 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6793 				*error = EINVAL;
6794 				goto out_now;
6795 			}
6796 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6797 			    SCTP_DONOT_SETSCOPE,
6798 			    SCTP_ADDR_IS_CONFIRMED)) {
6799 				/* assoc gone no un-lock */
6800 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6801 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6802 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6803 				*error = ENOBUFS;
6804 				goto out_now;
6805 			}
6806 			added++;
6807 			break;
6808 #endif
6809 #ifdef INET6
6810 		case AF_INET6:
6811 			incr = sizeof(struct sockaddr_in6);
6812 			sin6 = (struct sockaddr_in6 *)sa;
6813 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6814 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6815 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6816 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6817 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6818 				*error = EINVAL;
6819 				goto out_now;
6820 			}
6821 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6822 			    SCTP_DONOT_SETSCOPE,
6823 			    SCTP_ADDR_IS_CONFIRMED)) {
6824 				/* assoc gone no un-lock */
6825 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6826 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6827 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6828 				*error = ENOBUFS;
6829 				goto out_now;
6830 			}
6831 			added++;
6832 			break;
6833 #endif
6834 		default:
6835 			break;
6836 		}
6837 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6838 	}
6839 out_now:
6840 	return (added);
6841 }
6842 
6843 int
6844 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6845     unsigned int totaddr,
6846     unsigned int *num_v4, unsigned int *num_v6,
6847     unsigned int limit)
6848 {
6849 	struct sockaddr *sa;
6850 	struct sctp_tcb *stcb;
6851 	unsigned int incr, at, i;
6852 
6853 	at = 0;
6854 	sa = addr;
6855 	*num_v6 = *num_v4 = 0;
6856 	/* account and validate addresses */
6857 	if (totaddr == 0) {
6858 		return (EINVAL);
6859 	}
6860 	for (i = 0; i < totaddr; i++) {
6861 		if (at + sizeof(struct sockaddr) > limit) {
6862 			return (EINVAL);
6863 		}
6864 		switch (sa->sa_family) {
6865 #ifdef INET
6866 		case AF_INET:
6867 			incr = (unsigned int)sizeof(struct sockaddr_in);
6868 			if (sa->sa_len != incr) {
6869 				return (EINVAL);
6870 			}
6871 			(*num_v4) += 1;
6872 			break;
6873 #endif
6874 #ifdef INET6
6875 		case AF_INET6:
6876 			{
6877 				struct sockaddr_in6 *sin6;
6878 
6879 				sin6 = (struct sockaddr_in6 *)sa;
6880 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6881 					/* Must be non-mapped for connectx */
6882 					return (EINVAL);
6883 				}
6884 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6885 				if (sa->sa_len != incr) {
6886 					return (EINVAL);
6887 				}
6888 				(*num_v6) += 1;
6889 				break;
6890 			}
6891 #endif
6892 		default:
6893 			return (EINVAL);
6894 		}
6895 		if ((at + incr) > limit) {
6896 			return (EINVAL);
6897 		}
6898 		SCTP_INP_INCR_REF(inp);
6899 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6900 		if (stcb != NULL) {
6901 			SCTP_TCB_UNLOCK(stcb);
6902 			return (EALREADY);
6903 		} else {
6904 			SCTP_INP_DECR_REF(inp);
6905 		}
6906 		at += incr;
6907 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6908 	}
6909 	return (0);
6910 }
6911 
6912 /*
6913  * sctp_bindx(ADD) for one address.
6914  * assumes all arguments are valid/checked by caller.
6915  */
6916 void
6917 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6918     struct sockaddr *sa, sctp_assoc_t assoc_id,
6919     uint32_t vrf_id, int *error, void *p)
6920 {
6921 	struct sockaddr *addr_touse;
6922 #if defined(INET) && defined(INET6)
6923 	struct sockaddr_in sin;
6924 #endif
6925 
6926 	/* see if we're bound all already! */
6927 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6928 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6929 		*error = EINVAL;
6930 		return;
6931 	}
6932 	addr_touse = sa;
6933 #ifdef INET6
6934 	if (sa->sa_family == AF_INET6) {
6935 #ifdef INET
6936 		struct sockaddr_in6 *sin6;
6937 
6938 #endif
6939 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6940 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6941 			*error = EINVAL;
6942 			return;
6943 		}
6944 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6945 			/* can only bind v6 on PF_INET6 sockets */
6946 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6947 			*error = EINVAL;
6948 			return;
6949 		}
6950 #ifdef INET
6951 		sin6 = (struct sockaddr_in6 *)addr_touse;
6952 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6953 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6954 			    SCTP_IPV6_V6ONLY(inp)) {
6955 				/* can't bind v4-mapped on PF_INET sockets */
6956 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6957 				*error = EINVAL;
6958 				return;
6959 			}
6960 			in6_sin6_2_sin(&sin, sin6);
6961 			addr_touse = (struct sockaddr *)&sin;
6962 		}
6963 #endif
6964 	}
6965 #endif
6966 #ifdef INET
6967 	if (sa->sa_family == AF_INET) {
6968 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6969 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6970 			*error = EINVAL;
6971 			return;
6972 		}
6973 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6974 		    SCTP_IPV6_V6ONLY(inp)) {
6975 			/* can't bind v4 on PF_INET sockets */
6976 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6977 			*error = EINVAL;
6978 			return;
6979 		}
6980 	}
6981 #endif
6982 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6983 		if (p == NULL) {
6984 			/* Can't get proc for Net/Open BSD */
6985 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6986 			*error = EINVAL;
6987 			return;
6988 		}
6989 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6990 		return;
6991 	}
6992 	/*
6993 	 * No locks required here since bind and mgmt_ep_sa all do their own
6994 	 * locking. If we do something for the FIX: below we may need to
6995 	 * lock in that case.
6996 	 */
6997 	if (assoc_id == 0) {
6998 		/* add the address */
6999 		struct sctp_inpcb *lep;
7000 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
7001 
7002 		/* validate the incoming port */
7003 		if ((lsin->sin_port != 0) &&
7004 		    (lsin->sin_port != inp->sctp_lport)) {
7005 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7006 			*error = EINVAL;
7007 			return;
7008 		} else {
7009 			/* user specified 0 port, set it to existing port */
7010 			lsin->sin_port = inp->sctp_lport;
7011 		}
7012 
7013 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
7014 		if (lep != NULL) {
7015 			/*
7016 			 * We must decrement the refcount since we have the
7017 			 * ep already and are binding. No remove going on
7018 			 * here.
7019 			 */
7020 			SCTP_INP_DECR_REF(lep);
7021 		}
7022 		if (lep == inp) {
7023 			/* already bound to it.. ok */
7024 			return;
7025 		} else if (lep == NULL) {
7026 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
7027 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
7028 			    SCTP_ADD_IP_ADDRESS,
7029 			    vrf_id, NULL);
7030 		} else {
7031 			*error = EADDRINUSE;
7032 		}
7033 		if (*error)
7034 			return;
7035 	} else {
7036 		/*
7037 		 * FIX: decide whether we allow assoc based bindx
7038 		 */
7039 	}
7040 }
7041 
7042 /*
7043  * sctp_bindx(DELETE) for one address.
7044  * assumes all arguments are valid/checked by caller.
7045  */
7046 void
7047 sctp_bindx_delete_address(struct sctp_inpcb *inp,
7048     struct sockaddr *sa, sctp_assoc_t assoc_id,
7049     uint32_t vrf_id, int *error)
7050 {
7051 	struct sockaddr *addr_touse;
7052 #if defined(INET) && defined(INET6)
7053 	struct sockaddr_in sin;
7054 #endif
7055 
7056 	/* see if we're bound all already! */
7057 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7058 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7059 		*error = EINVAL;
7060 		return;
7061 	}
7062 	addr_touse = sa;
7063 #ifdef INET6
7064 	if (sa->sa_family == AF_INET6) {
7065 #ifdef INET
7066 		struct sockaddr_in6 *sin6;
7067 #endif
7068 
7069 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
7070 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7071 			*error = EINVAL;
7072 			return;
7073 		}
7074 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
7075 			/* can only bind v6 on PF_INET6 sockets */
7076 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7077 			*error = EINVAL;
7078 			return;
7079 		}
7080 #ifdef INET
7081 		sin6 = (struct sockaddr_in6 *)addr_touse;
7082 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7083 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7084 			    SCTP_IPV6_V6ONLY(inp)) {
7085 				/* can't bind mapped-v4 on PF_INET sockets */
7086 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7087 				*error = EINVAL;
7088 				return;
7089 			}
7090 			in6_sin6_2_sin(&sin, sin6);
7091 			addr_touse = (struct sockaddr *)&sin;
7092 		}
7093 #endif
7094 	}
7095 #endif
7096 #ifdef INET
7097 	if (sa->sa_family == AF_INET) {
7098 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
7099 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7100 			*error = EINVAL;
7101 			return;
7102 		}
7103 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7104 		    SCTP_IPV6_V6ONLY(inp)) {
7105 			/* can't bind v4 on PF_INET sockets */
7106 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7107 			*error = EINVAL;
7108 			return;
7109 		}
7110 	}
7111 #endif
7112 	/*
7113 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
7114 	 * below is ever changed we may need to lock before calling
7115 	 * association level binding.
7116 	 */
7117 	if (assoc_id == 0) {
7118 		/* delete the address */
7119 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
7120 		    SCTP_DEL_IP_ADDRESS,
7121 		    vrf_id, NULL);
7122 	} else {
7123 		/*
7124 		 * FIX: decide whether we allow assoc based bindx
7125 		 */
7126 	}
7127 }
7128 
7129 /*
7130  * returns the valid local address count for an assoc, taking into account
7131  * all scoping rules
7132  */
7133 int
7134 sctp_local_addr_count(struct sctp_tcb *stcb)
7135 {
7136 	int loopback_scope;
7137 #if defined(INET)
7138 	int ipv4_local_scope, ipv4_addr_legal;
7139 #endif
7140 #if defined (INET6)
7141 	int local_scope, site_scope, ipv6_addr_legal;
7142 #endif
7143 	struct sctp_vrf *vrf;
7144 	struct sctp_ifn *sctp_ifn;
7145 	struct sctp_ifa *sctp_ifa;
7146 	int count = 0;
7147 
7148 	/* Turn on all the appropriate scopes */
7149 	loopback_scope = stcb->asoc.scope.loopback_scope;
7150 #if defined(INET)
7151 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
7152 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
7153 #endif
7154 #if defined(INET6)
7155 	local_scope = stcb->asoc.scope.local_scope;
7156 	site_scope = stcb->asoc.scope.site_scope;
7157 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
7158 #endif
7159 	SCTP_IPI_ADDR_RLOCK();
7160 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
7161 	if (vrf == NULL) {
7162 		/* no vrf, no addresses */
7163 		SCTP_IPI_ADDR_RUNLOCK();
7164 		return (0);
7165 	}
7166 
7167 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7168 		/*
7169 		 * bound all case: go through all ifns on the vrf
7170 		 */
7171 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
7172 			if ((loopback_scope == 0) &&
7173 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
7174 				continue;
7175 			}
7176 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
7177 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
7178 					continue;
7179 				switch (sctp_ifa->address.sa.sa_family) {
7180 #ifdef INET
7181 				case AF_INET:
7182 					if (ipv4_addr_legal) {
7183 						struct sockaddr_in *sin;
7184 
7185 						sin = &sctp_ifa->address.sin;
7186 						if (sin->sin_addr.s_addr == 0) {
7187 							/*
7188 							 * skip unspecified
7189 							 * addrs
7190 							 */
7191 							continue;
7192 						}
7193 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
7194 						    &sin->sin_addr) != 0) {
7195 							continue;
7196 						}
7197 						if ((ipv4_local_scope == 0) &&
7198 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
7199 							continue;
7200 						}
7201 						/* count this one */
7202 						count++;
7203 					} else {
7204 						continue;
7205 					}
7206 					break;
7207 #endif
7208 #ifdef INET6
7209 				case AF_INET6:
7210 					if (ipv6_addr_legal) {
7211 						struct sockaddr_in6 *sin6;
7212 
7213 						sin6 = &sctp_ifa->address.sin6;
7214 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
7215 							continue;
7216 						}
7217 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
7218 						    &sin6->sin6_addr) != 0) {
7219 							continue;
7220 						}
7221 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
7222 							if (local_scope == 0)
7223 								continue;
7224 							if (sin6->sin6_scope_id == 0) {
7225 								if (sa6_recoverscope(sin6) != 0)
7226 									/*
7227 									 *
7228 									 * bad
7229 									 * link
7230 									 *
7231 									 * local
7232 									 *
7233 									 * address
7234 									 */
7235 									continue;
7236 							}
7237 						}
7238 						if ((site_scope == 0) &&
7239 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
7240 							continue;
7241 						}
7242 						/* count this one */
7243 						count++;
7244 					}
7245 					break;
7246 #endif
7247 				default:
7248 					/* TSNH */
7249 					break;
7250 				}
7251 			}
7252 		}
7253 	} else {
7254 		/*
7255 		 * subset bound case
7256 		 */
7257 		struct sctp_laddr *laddr;
7258 
7259 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
7260 		    sctp_nxt_addr) {
7261 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
7262 				continue;
7263 			}
7264 			/* count this one */
7265 			count++;
7266 		}
7267 	}
7268 	SCTP_IPI_ADDR_RUNLOCK();
7269 	return (count);
7270 }
7271 
7272 #if defined(SCTP_LOCAL_TRACE_BUF)
7273 
7274 void
7275 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
7276 {
7277 	uint32_t saveindex, newindex;
7278 
7279 	do {
7280 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
7281 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7282 			newindex = 1;
7283 		} else {
7284 			newindex = saveindex + 1;
7285 		}
7286 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
7287 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7288 		saveindex = 0;
7289 	}
7290 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
7291 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
7292 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
7293 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
7294 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
7295 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
7296 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
7297 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
7298 }
7299 
7300 #endif
7301 static void
7302 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
7303     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
7304 {
7305 	struct ip *iph;
7306 #ifdef INET6
7307 	struct ip6_hdr *ip6;
7308 #endif
7309 	struct mbuf *sp, *last;
7310 	struct udphdr *uhdr;
7311 	uint16_t port;
7312 
7313 	if ((m->m_flags & M_PKTHDR) == 0) {
7314 		/* Can't handle one that is not a pkt hdr */
7315 		goto out;
7316 	}
7317 	/* Pull the src port */
7318 	iph = mtod(m, struct ip *);
7319 	uhdr = (struct udphdr *)((caddr_t)iph + off);
7320 	port = uhdr->uh_sport;
7321 	/*
7322 	 * Split out the mbuf chain. Leave the IP header in m, place the
7323 	 * rest in the sp.
7324 	 */
7325 	sp = m_split(m, off, M_NOWAIT);
7326 	if (sp == NULL) {
7327 		/* Gak, drop packet, we can't do a split */
7328 		goto out;
7329 	}
7330 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
7331 		/* Gak, packet can't have an SCTP header in it - too small */
7332 		m_freem(sp);
7333 		goto out;
7334 	}
7335 	/* Now pull up the UDP header and SCTP header together */
7336 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
7337 	if (sp == NULL) {
7338 		/* Gak pullup failed */
7339 		goto out;
7340 	}
7341 	/* Trim out the UDP header */
7342 	m_adj(sp, sizeof(struct udphdr));
7343 
7344 	/* Now reconstruct the mbuf chain */
7345 	for (last = m; last->m_next; last = last->m_next);
7346 	last->m_next = sp;
7347 	m->m_pkthdr.len += sp->m_pkthdr.len;
7348 	/*
7349 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
7350 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
7351 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
7352 	 * SCTP checksum. Therefore, clear the bit.
7353 	 */
7354 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
7355 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
7356 	    m->m_pkthdr.len,
7357 	    if_name(m->m_pkthdr.rcvif),
7358 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
7359 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
7360 	iph = mtod(m, struct ip *);
7361 	switch (iph->ip_v) {
7362 #ifdef INET
7363 	case IPVERSION:
7364 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
7365 		sctp_input_with_port(m, off, port);
7366 		break;
7367 #endif
7368 #ifdef INET6
7369 	case IPV6_VERSION >> 4:
7370 		ip6 = mtod(m, struct ip6_hdr *);
7371 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
7372 		sctp6_input_with_port(&m, &off, port);
7373 		break;
7374 #endif
7375 	default:
7376 		goto out;
7377 		break;
7378 	}
7379 	return;
7380 out:
7381 	m_freem(m);
7382 }
7383 
7384 #ifdef INET
7385 static void
7386 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
7387 {
7388 	struct ip *outer_ip, *inner_ip;
7389 	struct sctphdr *sh;
7390 	struct icmp *icmp;
7391 	struct udphdr *udp;
7392 	struct sctp_inpcb *inp;
7393 	struct sctp_tcb *stcb;
7394 	struct sctp_nets *net;
7395 	struct sctp_init_chunk *ch;
7396 	struct sockaddr_in src, dst;
7397 	uint8_t type, code;
7398 
7399 	inner_ip = (struct ip *)vip;
7400 	icmp = (struct icmp *)((caddr_t)inner_ip -
7401 	    (sizeof(struct icmp) - sizeof(struct ip)));
7402 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
7403 	if (ntohs(outer_ip->ip_len) <
7404 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
7405 		return;
7406 	}
7407 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
7408 	sh = (struct sctphdr *)(udp + 1);
7409 	memset(&src, 0, sizeof(struct sockaddr_in));
7410 	src.sin_family = AF_INET;
7411 	src.sin_len = sizeof(struct sockaddr_in);
7412 	src.sin_port = sh->src_port;
7413 	src.sin_addr = inner_ip->ip_src;
7414 	memset(&dst, 0, sizeof(struct sockaddr_in));
7415 	dst.sin_family = AF_INET;
7416 	dst.sin_len = sizeof(struct sockaddr_in);
7417 	dst.sin_port = sh->dest_port;
7418 	dst.sin_addr = inner_ip->ip_dst;
7419 	/*
7420 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
7421 	 * holds our local endpoint address. Thus we reverse the dst and the
7422 	 * src in the lookup.
7423 	 */
7424 	inp = NULL;
7425 	net = NULL;
7426 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7427 	    (struct sockaddr *)&src,
7428 	    &inp, &net, 1,
7429 	    SCTP_DEFAULT_VRFID);
7430 	if ((stcb != NULL) &&
7431 	    (net != NULL) &&
7432 	    (inp != NULL)) {
7433 		/* Check the UDP port numbers */
7434 		if ((udp->uh_dport != net->port) ||
7435 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7436 			SCTP_TCB_UNLOCK(stcb);
7437 			return;
7438 		}
7439 		/* Check the verification tag */
7440 		if (ntohl(sh->v_tag) != 0) {
7441 			/*
7442 			 * This must be the verification tag used for
7443 			 * sending out packets. We don't consider packets
7444 			 * reflecting the verification tag.
7445 			 */
7446 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
7447 				SCTP_TCB_UNLOCK(stcb);
7448 				return;
7449 			}
7450 		} else {
7451 			if (ntohs(outer_ip->ip_len) >=
7452 			    sizeof(struct ip) +
7453 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
7454 				/*
7455 				 * In this case we can check if we got an
7456 				 * INIT chunk and if the initiate tag
7457 				 * matches.
7458 				 */
7459 				ch = (struct sctp_init_chunk *)(sh + 1);
7460 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
7461 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
7462 					SCTP_TCB_UNLOCK(stcb);
7463 					return;
7464 				}
7465 			} else {
7466 				SCTP_TCB_UNLOCK(stcb);
7467 				return;
7468 			}
7469 		}
7470 		type = icmp->icmp_type;
7471 		code = icmp->icmp_code;
7472 		if ((type == ICMP_UNREACH) &&
7473 		    (code == ICMP_UNREACH_PORT)) {
7474 			code = ICMP_UNREACH_PROTOCOL;
7475 		}
7476 		sctp_notify(inp, stcb, net, type, code,
7477 		    ntohs(inner_ip->ip_len),
7478 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
7479 	} else {
7480 		if ((stcb == NULL) && (inp != NULL)) {
7481 			/* reduce ref-count */
7482 			SCTP_INP_WLOCK(inp);
7483 			SCTP_INP_DECR_REF(inp);
7484 			SCTP_INP_WUNLOCK(inp);
7485 		}
7486 		if (stcb) {
7487 			SCTP_TCB_UNLOCK(stcb);
7488 		}
7489 	}
7490 	return;
7491 }
7492 #endif
7493 
7494 #ifdef INET6
7495 static void
7496 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7497 {
7498 	struct ip6ctlparam *ip6cp;
7499 	struct sctp_inpcb *inp;
7500 	struct sctp_tcb *stcb;
7501 	struct sctp_nets *net;
7502 	struct sctphdr sh;
7503 	struct udphdr udp;
7504 	struct sockaddr_in6 src, dst;
7505 	uint8_t type, code;
7506 
7507 	ip6cp = (struct ip6ctlparam *)d;
7508 	/*
7509 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7510 	 */
7511 	if (ip6cp->ip6c_m == NULL) {
7512 		return;
7513 	}
7514 	/*
7515 	 * Check if we can safely examine the ports and the verification tag
7516 	 * of the SCTP common header.
7517 	 */
7518 	if (ip6cp->ip6c_m->m_pkthdr.len <
7519 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7520 		return;
7521 	}
7522 	/* Copy out the UDP header. */
7523 	memset(&udp, 0, sizeof(struct udphdr));
7524 	m_copydata(ip6cp->ip6c_m,
7525 	    ip6cp->ip6c_off,
7526 	    sizeof(struct udphdr),
7527 	    (caddr_t)&udp);
7528 	/* Copy out the port numbers and the verification tag. */
7529 	memset(&sh, 0, sizeof(struct sctphdr));
7530 	m_copydata(ip6cp->ip6c_m,
7531 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7532 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7533 	    (caddr_t)&sh);
7534 	memset(&src, 0, sizeof(struct sockaddr_in6));
7535 	src.sin6_family = AF_INET6;
7536 	src.sin6_len = sizeof(struct sockaddr_in6);
7537 	src.sin6_port = sh.src_port;
7538 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7539 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7540 		return;
7541 	}
7542 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7543 	dst.sin6_family = AF_INET6;
7544 	dst.sin6_len = sizeof(struct sockaddr_in6);
7545 	dst.sin6_port = sh.dest_port;
7546 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7547 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7548 		return;
7549 	}
7550 	inp = NULL;
7551 	net = NULL;
7552 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7553 	    (struct sockaddr *)&src,
7554 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7555 	if ((stcb != NULL) &&
7556 	    (net != NULL) &&
7557 	    (inp != NULL)) {
7558 		/* Check the UDP port numbers */
7559 		if ((udp.uh_dport != net->port) ||
7560 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7561 			SCTP_TCB_UNLOCK(stcb);
7562 			return;
7563 		}
7564 		/* Check the verification tag */
7565 		if (ntohl(sh.v_tag) != 0) {
7566 			/*
7567 			 * This must be the verification tag used for
7568 			 * sending out packets. We don't consider packets
7569 			 * reflecting the verification tag.
7570 			 */
7571 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7572 				SCTP_TCB_UNLOCK(stcb);
7573 				return;
7574 			}
7575 		} else {
7576 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7577 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7578 			    sizeof(struct sctphdr) +
7579 			    sizeof(struct sctp_chunkhdr) +
7580 			    offsetof(struct sctp_init, a_rwnd)) {
7581 				/*
7582 				 * In this case we can check if we got an
7583 				 * INIT chunk and if the initiate tag
7584 				 * matches.
7585 				 */
7586 				uint32_t initiate_tag;
7587 				uint8_t chunk_type;
7588 
7589 				m_copydata(ip6cp->ip6c_m,
7590 				    ip6cp->ip6c_off +
7591 				    sizeof(struct udphdr) +
7592 				    sizeof(struct sctphdr),
7593 				    sizeof(uint8_t),
7594 				    (caddr_t)&chunk_type);
7595 				m_copydata(ip6cp->ip6c_m,
7596 				    ip6cp->ip6c_off +
7597 				    sizeof(struct udphdr) +
7598 				    sizeof(struct sctphdr) +
7599 				    sizeof(struct sctp_chunkhdr),
7600 				    sizeof(uint32_t),
7601 				    (caddr_t)&initiate_tag);
7602 				if ((chunk_type != SCTP_INITIATION) ||
7603 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7604 					SCTP_TCB_UNLOCK(stcb);
7605 					return;
7606 				}
7607 			} else {
7608 				SCTP_TCB_UNLOCK(stcb);
7609 				return;
7610 			}
7611 		}
7612 		type = ip6cp->ip6c_icmp6->icmp6_type;
7613 		code = ip6cp->ip6c_icmp6->icmp6_code;
7614 		if ((type == ICMP6_DST_UNREACH) &&
7615 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7616 			type = ICMP6_PARAM_PROB;
7617 			code = ICMP6_PARAMPROB_NEXTHEADER;
7618 		}
7619 		sctp6_notify(inp, stcb, net, type, code,
7620 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7621 	} else {
7622 		if ((stcb == NULL) && (inp != NULL)) {
7623 			/* reduce inp's ref-count */
7624 			SCTP_INP_WLOCK(inp);
7625 			SCTP_INP_DECR_REF(inp);
7626 			SCTP_INP_WUNLOCK(inp);
7627 		}
7628 		if (stcb) {
7629 			SCTP_TCB_UNLOCK(stcb);
7630 		}
7631 	}
7632 }
7633 #endif
7634 
7635 void
7636 sctp_over_udp_stop(void)
7637 {
7638 	/*
7639 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7640 	 * for writting!
7641 	 */
7642 #ifdef INET
7643 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7644 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7645 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7646 	}
7647 #endif
7648 #ifdef INET6
7649 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7650 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7651 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7652 	}
7653 #endif
7654 }
7655 
7656 int
7657 sctp_over_udp_start(void)
7658 {
7659 	uint16_t port;
7660 	int ret;
7661 #ifdef INET
7662 	struct sockaddr_in sin;
7663 #endif
7664 #ifdef INET6
7665 	struct sockaddr_in6 sin6;
7666 #endif
7667 	/*
7668 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7669 	 * for writting!
7670 	 */
7671 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7672 	if (ntohs(port) == 0) {
7673 		/* Must have a port set */
7674 		return (EINVAL);
7675 	}
7676 #ifdef INET
7677 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7678 		/* Already running -- must stop first */
7679 		return (EALREADY);
7680 	}
7681 #endif
7682 #ifdef INET6
7683 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7684 		/* Already running -- must stop first */
7685 		return (EALREADY);
7686 	}
7687 #endif
7688 #ifdef INET
7689 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7690 	    SOCK_DGRAM, IPPROTO_UDP,
7691 	    curthread->td_ucred, curthread))) {
7692 		sctp_over_udp_stop();
7693 		return (ret);
7694 	}
7695 	/* Call the special UDP hook. */
7696 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7697 	    sctp_recv_udp_tunneled_packet,
7698 	    sctp_recv_icmp_tunneled_packet,
7699 	    NULL))) {
7700 		sctp_over_udp_stop();
7701 		return (ret);
7702 	}
7703 	/* Ok, we have a socket, bind it to the port. */
7704 	memset(&sin, 0, sizeof(struct sockaddr_in));
7705 	sin.sin_len = sizeof(struct sockaddr_in);
7706 	sin.sin_family = AF_INET;
7707 	sin.sin_port = htons(port);
7708 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7709 	    (struct sockaddr *)&sin, curthread))) {
7710 		sctp_over_udp_stop();
7711 		return (ret);
7712 	}
7713 #endif
7714 #ifdef INET6
7715 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7716 	    SOCK_DGRAM, IPPROTO_UDP,
7717 	    curthread->td_ucred, curthread))) {
7718 		sctp_over_udp_stop();
7719 		return (ret);
7720 	}
7721 	/* Call the special UDP hook. */
7722 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7723 	    sctp_recv_udp_tunneled_packet,
7724 	    sctp_recv_icmp6_tunneled_packet,
7725 	    NULL))) {
7726 		sctp_over_udp_stop();
7727 		return (ret);
7728 	}
7729 	/* Ok, we have a socket, bind it to the port. */
7730 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7731 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7732 	sin6.sin6_family = AF_INET6;
7733 	sin6.sin6_port = htons(port);
7734 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7735 	    (struct sockaddr *)&sin6, curthread))) {
7736 		sctp_over_udp_stop();
7737 		return (ret);
7738 	}
7739 #endif
7740 	return (0);
7741 }
7742 
7743 /*
7744  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7745  * If all arguments are zero, zero is returned.
7746  */
7747 uint32_t
7748 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7749 {
7750 	if (mtu1 > 0) {
7751 		if (mtu2 > 0) {
7752 			if (mtu3 > 0) {
7753 				return (min(mtu1, min(mtu2, mtu3)));
7754 			} else {
7755 				return (min(mtu1, mtu2));
7756 			}
7757 		} else {
7758 			if (mtu3 > 0) {
7759 				return (min(mtu1, mtu3));
7760 			} else {
7761 				return (mtu1);
7762 			}
7763 		}
7764 	} else {
7765 		if (mtu2 > 0) {
7766 			if (mtu3 > 0) {
7767 				return (min(mtu2, mtu3));
7768 			} else {
7769 				return (mtu2);
7770 			}
7771 		} else {
7772 			return (mtu3);
7773 		}
7774 	}
7775 }
7776 
7777 void
7778 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7779 {
7780 	struct in_conninfo inc;
7781 
7782 	memset(&inc, 0, sizeof(struct in_conninfo));
7783 	inc.inc_fibnum = fibnum;
7784 	switch (addr->sa.sa_family) {
7785 #ifdef INET
7786 	case AF_INET:
7787 		inc.inc_faddr = addr->sin.sin_addr;
7788 		break;
7789 #endif
7790 #ifdef INET6
7791 	case AF_INET6:
7792 		inc.inc_flags |= INC_ISIPV6;
7793 		inc.inc6_faddr = addr->sin6.sin6_addr;
7794 		break;
7795 #endif
7796 	default:
7797 		return;
7798 	}
7799 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7800 }
7801 
7802 uint32_t
7803 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7804 {
7805 	struct in_conninfo inc;
7806 
7807 	memset(&inc, 0, sizeof(struct in_conninfo));
7808 	inc.inc_fibnum = fibnum;
7809 	switch (addr->sa.sa_family) {
7810 #ifdef INET
7811 	case AF_INET:
7812 		inc.inc_faddr = addr->sin.sin_addr;
7813 		break;
7814 #endif
7815 #ifdef INET6
7816 	case AF_INET6:
7817 		inc.inc_flags |= INC_ISIPV6;
7818 		inc.inc6_faddr = addr->sin6.sin6_addr;
7819 		break;
7820 #endif
7821 	default:
7822 		return (0);
7823 	}
7824 	return ((uint32_t)tcp_hc_getmtu(&inc));
7825 }
7826 
7827 void
7828 sctp_set_state(struct sctp_tcb *stcb, int new_state)
7829 {
7830 #if defined(KDTRACE_HOOKS)
7831 	int old_state = stcb->asoc.state;
7832 #endif
7833 
7834 	KASSERT((new_state & ~SCTP_STATE_MASK) == 0,
7835 	    ("sctp_set_state: Can't set substate (new_state = %x)",
7836 	    new_state));
7837 	stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state;
7838 	if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) ||
7839 	    (new_state == SCTP_STATE_SHUTDOWN_SENT) ||
7840 	    (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7841 		SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
7842 	}
7843 #if defined(KDTRACE_HOOKS)
7844 	if (((old_state & SCTP_STATE_MASK) != new_state) &&
7845 	    !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) &&
7846 	    (new_state == SCTP_STATE_INUSE))) {
7847 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7848 	}
7849 #endif
7850 }
7851 
7852 void
7853 sctp_add_substate(struct sctp_tcb *stcb, int substate)
7854 {
7855 #if defined(KDTRACE_HOOKS)
7856 	int old_state = stcb->asoc.state;
7857 #endif
7858 
7859 	KASSERT((substate & SCTP_STATE_MASK) == 0,
7860 	    ("sctp_add_substate: Can't set state (substate = %x)",
7861 	    substate));
7862 	stcb->asoc.state |= substate;
7863 #if defined(KDTRACE_HOOKS)
7864 	if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) &&
7865 	    ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) ||
7866 	    ((substate & SCTP_STATE_SHUTDOWN_PENDING) &&
7867 	    ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) {
7868 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7869 	}
7870 #endif
7871 }
7872