xref: /freebsd/sys/netinet/sctputil.c (revision 9b5631807ebc64e1fdfd2b23e402d79aec6b47c5)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #include <netinet6/sctp6_var.h>
45 #endif
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_output.h>
48 #include <netinet/sctp_uio.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_auth.h>
52 #include <netinet/sctp_asconf.h>
53 #include <netinet/sctp_bsd_addr.h>
54 #include <netinet/sctp_kdtrace.h>
55 #if defined(INET6) || defined(INET)
56 #include <netinet/tcp_var.h>
57 #endif
58 #include <netinet/udp.h>
59 #include <netinet/udp_var.h>
60 #include <sys/proc.h>
61 #ifdef INET6
62 #include <netinet/icmp6.h>
63 #endif
64 
65 
66 #ifndef KTR_SCTP
67 #define KTR_SCTP KTR_SUBSYS
68 #endif
69 
70 extern const struct sctp_cc_functions sctp_cc_functions[];
71 extern const struct sctp_ss_functions sctp_ss_functions[];
72 
73 void
74 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
75 {
76 #if defined(SCTP_LOCAL_TRACE_BUF)
77 	struct sctp_cwnd_log sctp_clog;
78 
79 	sctp_clog.x.sb.stcb = stcb;
80 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
81 	if (stcb)
82 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
83 	else
84 		sctp_clog.x.sb.stcb_sbcc = 0;
85 	sctp_clog.x.sb.incr = incr;
86 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
87 	    SCTP_LOG_EVENT_SB,
88 	    from,
89 	    sctp_clog.x.misc.log1,
90 	    sctp_clog.x.misc.log2,
91 	    sctp_clog.x.misc.log3,
92 	    sctp_clog.x.misc.log4);
93 #endif
94 }
95 
96 void
97 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
98 {
99 #if defined(SCTP_LOCAL_TRACE_BUF)
100 	struct sctp_cwnd_log sctp_clog;
101 
102 	sctp_clog.x.close.inp = (void *)inp;
103 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
104 	if (stcb) {
105 		sctp_clog.x.close.stcb = (void *)stcb;
106 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
107 	} else {
108 		sctp_clog.x.close.stcb = 0;
109 		sctp_clog.x.close.state = 0;
110 	}
111 	sctp_clog.x.close.loc = loc;
112 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
113 	    SCTP_LOG_EVENT_CLOSE,
114 	    0,
115 	    sctp_clog.x.misc.log1,
116 	    sctp_clog.x.misc.log2,
117 	    sctp_clog.x.misc.log3,
118 	    sctp_clog.x.misc.log4);
119 #endif
120 }
121 
122 void
123 rto_logging(struct sctp_nets *net, int from)
124 {
125 #if defined(SCTP_LOCAL_TRACE_BUF)
126 	struct sctp_cwnd_log sctp_clog;
127 
128 	memset(&sctp_clog, 0, sizeof(sctp_clog));
129 	sctp_clog.x.rto.net = (void *)net;
130 	sctp_clog.x.rto.rtt = net->rtt / 1000;
131 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
132 	    SCTP_LOG_EVENT_RTT,
133 	    from,
134 	    sctp_clog.x.misc.log1,
135 	    sctp_clog.x.misc.log2,
136 	    sctp_clog.x.misc.log3,
137 	    sctp_clog.x.misc.log4);
138 #endif
139 }
140 
141 void
142 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
143 {
144 #if defined(SCTP_LOCAL_TRACE_BUF)
145 	struct sctp_cwnd_log sctp_clog;
146 
147 	sctp_clog.x.strlog.stcb = stcb;
148 	sctp_clog.x.strlog.n_tsn = tsn;
149 	sctp_clog.x.strlog.n_sseq = sseq;
150 	sctp_clog.x.strlog.e_tsn = 0;
151 	sctp_clog.x.strlog.e_sseq = 0;
152 	sctp_clog.x.strlog.strm = stream;
153 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
154 	    SCTP_LOG_EVENT_STRM,
155 	    from,
156 	    sctp_clog.x.misc.log1,
157 	    sctp_clog.x.misc.log2,
158 	    sctp_clog.x.misc.log3,
159 	    sctp_clog.x.misc.log4);
160 #endif
161 }
162 
163 void
164 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
165 {
166 #if defined(SCTP_LOCAL_TRACE_BUF)
167 	struct sctp_cwnd_log sctp_clog;
168 
169 	sctp_clog.x.nagle.stcb = (void *)stcb;
170 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
171 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
172 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
173 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
174 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
175 	    SCTP_LOG_EVENT_NAGLE,
176 	    action,
177 	    sctp_clog.x.misc.log1,
178 	    sctp_clog.x.misc.log2,
179 	    sctp_clog.x.misc.log3,
180 	    sctp_clog.x.misc.log4);
181 #endif
182 }
183 
184 void
185 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
186 {
187 #if defined(SCTP_LOCAL_TRACE_BUF)
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	sctp_clog.x.sack.cumack = cumack;
191 	sctp_clog.x.sack.oldcumack = old_cumack;
192 	sctp_clog.x.sack.tsn = tsn;
193 	sctp_clog.x.sack.numGaps = gaps;
194 	sctp_clog.x.sack.numDups = dups;
195 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
196 	    SCTP_LOG_EVENT_SACK,
197 	    from,
198 	    sctp_clog.x.misc.log1,
199 	    sctp_clog.x.misc.log2,
200 	    sctp_clog.x.misc.log3,
201 	    sctp_clog.x.misc.log4);
202 #endif
203 }
204 
205 void
206 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
207 {
208 #if defined(SCTP_LOCAL_TRACE_BUF)
209 	struct sctp_cwnd_log sctp_clog;
210 
211 	memset(&sctp_clog, 0, sizeof(sctp_clog));
212 	sctp_clog.x.map.base = map;
213 	sctp_clog.x.map.cum = cum;
214 	sctp_clog.x.map.high = high;
215 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
216 	    SCTP_LOG_EVENT_MAP,
217 	    from,
218 	    sctp_clog.x.misc.log1,
219 	    sctp_clog.x.misc.log2,
220 	    sctp_clog.x.misc.log3,
221 	    sctp_clog.x.misc.log4);
222 #endif
223 }
224 
225 void
226 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
227 {
228 #if defined(SCTP_LOCAL_TRACE_BUF)
229 	struct sctp_cwnd_log sctp_clog;
230 
231 	memset(&sctp_clog, 0, sizeof(sctp_clog));
232 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
233 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
234 	sctp_clog.x.fr.tsn = tsn;
235 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
236 	    SCTP_LOG_EVENT_FR,
237 	    from,
238 	    sctp_clog.x.misc.log1,
239 	    sctp_clog.x.misc.log2,
240 	    sctp_clog.x.misc.log3,
241 	    sctp_clog.x.misc.log4);
242 #endif
243 }
244 
245 #ifdef SCTP_MBUF_LOGGING
246 void
247 sctp_log_mb(struct mbuf *m, int from)
248 {
249 #if defined(SCTP_LOCAL_TRACE_BUF)
250 	struct sctp_cwnd_log sctp_clog;
251 
252 	sctp_clog.x.mb.mp = m;
253 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
254 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
255 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
256 	if (SCTP_BUF_IS_EXTENDED(m)) {
257 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
258 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
259 	} else {
260 		sctp_clog.x.mb.ext = 0;
261 		sctp_clog.x.mb.refcnt = 0;
262 	}
263 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
264 	    SCTP_LOG_EVENT_MBUF,
265 	    from,
266 	    sctp_clog.x.misc.log1,
267 	    sctp_clog.x.misc.log2,
268 	    sctp_clog.x.misc.log3,
269 	    sctp_clog.x.misc.log4);
270 #endif
271 }
272 
273 void
274 sctp_log_mbc(struct mbuf *m, int from)
275 {
276 	struct mbuf *mat;
277 
278 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
279 		sctp_log_mb(mat, from);
280 	}
281 }
282 #endif
283 
284 void
285 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
286 {
287 #if defined(SCTP_LOCAL_TRACE_BUF)
288 	struct sctp_cwnd_log sctp_clog;
289 
290 	if (control == NULL) {
291 		SCTP_PRINTF("Gak log of NULL?\n");
292 		return;
293 	}
294 	sctp_clog.x.strlog.stcb = control->stcb;
295 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
296 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
297 	sctp_clog.x.strlog.strm = control->sinfo_stream;
298 	if (poschk != NULL) {
299 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
300 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
301 	} else {
302 		sctp_clog.x.strlog.e_tsn = 0;
303 		sctp_clog.x.strlog.e_sseq = 0;
304 	}
305 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
306 	    SCTP_LOG_EVENT_STRM,
307 	    from,
308 	    sctp_clog.x.misc.log1,
309 	    sctp_clog.x.misc.log2,
310 	    sctp_clog.x.misc.log3,
311 	    sctp_clog.x.misc.log4);
312 #endif
313 }
314 
315 void
316 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
317 {
318 #if defined(SCTP_LOCAL_TRACE_BUF)
319 	struct sctp_cwnd_log sctp_clog;
320 
321 	sctp_clog.x.cwnd.net = net;
322 	if (stcb->asoc.send_queue_cnt > 255)
323 		sctp_clog.x.cwnd.cnt_in_send = 255;
324 	else
325 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
326 	if (stcb->asoc.stream_queue_cnt > 255)
327 		sctp_clog.x.cwnd.cnt_in_str = 255;
328 	else
329 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
330 
331 	if (net) {
332 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
333 		sctp_clog.x.cwnd.inflight = net->flight_size;
334 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
335 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
336 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
337 	}
338 	if (SCTP_CWNDLOG_PRESEND == from) {
339 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
340 	}
341 	sctp_clog.x.cwnd.cwnd_augment = augment;
342 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
343 	    SCTP_LOG_EVENT_CWND,
344 	    from,
345 	    sctp_clog.x.misc.log1,
346 	    sctp_clog.x.misc.log2,
347 	    sctp_clog.x.misc.log3,
348 	    sctp_clog.x.misc.log4);
349 #endif
350 }
351 
352 void
353 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
354 {
355 #if defined(SCTP_LOCAL_TRACE_BUF)
356 	struct sctp_cwnd_log sctp_clog;
357 
358 	memset(&sctp_clog, 0, sizeof(sctp_clog));
359 	if (inp) {
360 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
361 
362 	} else {
363 		sctp_clog.x.lock.sock = (void *)NULL;
364 	}
365 	sctp_clog.x.lock.inp = (void *)inp;
366 	if (stcb) {
367 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
368 	} else {
369 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
370 	}
371 	if (inp) {
372 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
373 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
374 	} else {
375 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
376 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
377 	}
378 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
379 	if (inp && (inp->sctp_socket)) {
380 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
381 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
382 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
383 	} else {
384 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
385 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
386 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
387 	}
388 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
389 	    SCTP_LOG_LOCK_EVENT,
390 	    from,
391 	    sctp_clog.x.misc.log1,
392 	    sctp_clog.x.misc.log2,
393 	    sctp_clog.x.misc.log3,
394 	    sctp_clog.x.misc.log4);
395 #endif
396 }
397 
398 void
399 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
400 {
401 #if defined(SCTP_LOCAL_TRACE_BUF)
402 	struct sctp_cwnd_log sctp_clog;
403 
404 	memset(&sctp_clog, 0, sizeof(sctp_clog));
405 	sctp_clog.x.cwnd.net = net;
406 	sctp_clog.x.cwnd.cwnd_new_value = error;
407 	sctp_clog.x.cwnd.inflight = net->flight_size;
408 	sctp_clog.x.cwnd.cwnd_augment = burst;
409 	if (stcb->asoc.send_queue_cnt > 255)
410 		sctp_clog.x.cwnd.cnt_in_send = 255;
411 	else
412 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
413 	if (stcb->asoc.stream_queue_cnt > 255)
414 		sctp_clog.x.cwnd.cnt_in_str = 255;
415 	else
416 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
417 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
418 	    SCTP_LOG_EVENT_MAXBURST,
419 	    from,
420 	    sctp_clog.x.misc.log1,
421 	    sctp_clog.x.misc.log2,
422 	    sctp_clog.x.misc.log3,
423 	    sctp_clog.x.misc.log4);
424 #endif
425 }
426 
427 void
428 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
429 {
430 #if defined(SCTP_LOCAL_TRACE_BUF)
431 	struct sctp_cwnd_log sctp_clog;
432 
433 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
434 	sctp_clog.x.rwnd.send_size = snd_size;
435 	sctp_clog.x.rwnd.overhead = overhead;
436 	sctp_clog.x.rwnd.new_rwnd = 0;
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_EVENT_RWND,
439 	    from,
440 	    sctp_clog.x.misc.log1,
441 	    sctp_clog.x.misc.log2,
442 	    sctp_clog.x.misc.log3,
443 	    sctp_clog.x.misc.log4);
444 #endif
445 }
446 
447 void
448 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
449 {
450 #if defined(SCTP_LOCAL_TRACE_BUF)
451 	struct sctp_cwnd_log sctp_clog;
452 
453 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
454 	sctp_clog.x.rwnd.send_size = flight_size;
455 	sctp_clog.x.rwnd.overhead = overhead;
456 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
457 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
458 	    SCTP_LOG_EVENT_RWND,
459 	    from,
460 	    sctp_clog.x.misc.log1,
461 	    sctp_clog.x.misc.log2,
462 	    sctp_clog.x.misc.log3,
463 	    sctp_clog.x.misc.log4);
464 #endif
465 }
466 
467 #ifdef SCTP_MBCNT_LOGGING
468 static void
469 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
470 {
471 #if defined(SCTP_LOCAL_TRACE_BUF)
472 	struct sctp_cwnd_log sctp_clog;
473 
474 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
475 	sctp_clog.x.mbcnt.size_change = book;
476 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
477 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
478 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
479 	    SCTP_LOG_EVENT_MBCNT,
480 	    from,
481 	    sctp_clog.x.misc.log1,
482 	    sctp_clog.x.misc.log2,
483 	    sctp_clog.x.misc.log3,
484 	    sctp_clog.x.misc.log4);
485 #endif
486 }
487 #endif
488 
489 void
490 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
491 {
492 #if defined(SCTP_LOCAL_TRACE_BUF)
493 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
494 	    SCTP_LOG_MISC_EVENT,
495 	    from,
496 	    a, b, c, d);
497 #endif
498 }
499 
500 void
501 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
502 {
503 #if defined(SCTP_LOCAL_TRACE_BUF)
504 	struct sctp_cwnd_log sctp_clog;
505 
506 	sctp_clog.x.wake.stcb = (void *)stcb;
507 	sctp_clog.x.wake.wake_cnt = wake_cnt;
508 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
509 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
510 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
511 
512 	if (stcb->asoc.stream_queue_cnt < 0xff)
513 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
514 	else
515 		sctp_clog.x.wake.stream_qcnt = 0xff;
516 
517 	if (stcb->asoc.chunks_on_out_queue < 0xff)
518 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
519 	else
520 		sctp_clog.x.wake.chunks_on_oque = 0xff;
521 
522 	sctp_clog.x.wake.sctpflags = 0;
523 	/* set in the defered mode stuff */
524 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
525 		sctp_clog.x.wake.sctpflags |= 1;
526 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
527 		sctp_clog.x.wake.sctpflags |= 2;
528 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
529 		sctp_clog.x.wake.sctpflags |= 4;
530 	/* what about the sb */
531 	if (stcb->sctp_socket) {
532 		struct socket *so = stcb->sctp_socket;
533 
534 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
535 	} else {
536 		sctp_clog.x.wake.sbflags = 0xff;
537 	}
538 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
539 	    SCTP_LOG_EVENT_WAKE,
540 	    from,
541 	    sctp_clog.x.misc.log1,
542 	    sctp_clog.x.misc.log2,
543 	    sctp_clog.x.misc.log3,
544 	    sctp_clog.x.misc.log4);
545 #endif
546 }
547 
548 void
549 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen)
550 {
551 #if defined(SCTP_LOCAL_TRACE_BUF)
552 	struct sctp_cwnd_log sctp_clog;
553 
554 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
555 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
556 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
557 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
558 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
559 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
560 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
561 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
562 	    SCTP_LOG_EVENT_BLOCK,
563 	    from,
564 	    sctp_clog.x.misc.log1,
565 	    sctp_clog.x.misc.log2,
566 	    sctp_clog.x.misc.log3,
567 	    sctp_clog.x.misc.log4);
568 #endif
569 }
570 
571 int
572 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
573 {
574 	/* May need to fix this if ktrdump does not work */
575 	return (0);
576 }
577 
578 #ifdef SCTP_AUDITING_ENABLED
579 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
580 static int sctp_audit_indx = 0;
581 
582 static
583 void
584 sctp_print_audit_report(void)
585 {
586 	int i;
587 	int cnt;
588 
589 	cnt = 0;
590 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
591 		if ((sctp_audit_data[i][0] == 0xe0) &&
592 		    (sctp_audit_data[i][1] == 0x01)) {
593 			cnt = 0;
594 			SCTP_PRINTF("\n");
595 		} else if (sctp_audit_data[i][0] == 0xf0) {
596 			cnt = 0;
597 			SCTP_PRINTF("\n");
598 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
599 		    (sctp_audit_data[i][1] == 0x01)) {
600 			SCTP_PRINTF("\n");
601 			cnt = 0;
602 		}
603 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
604 		    (uint32_t)sctp_audit_data[i][1]);
605 		cnt++;
606 		if ((cnt % 14) == 0)
607 			SCTP_PRINTF("\n");
608 	}
609 	for (i = 0; i < sctp_audit_indx; i++) {
610 		if ((sctp_audit_data[i][0] == 0xe0) &&
611 		    (sctp_audit_data[i][1] == 0x01)) {
612 			cnt = 0;
613 			SCTP_PRINTF("\n");
614 		} else if (sctp_audit_data[i][0] == 0xf0) {
615 			cnt = 0;
616 			SCTP_PRINTF("\n");
617 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
618 		    (sctp_audit_data[i][1] == 0x01)) {
619 			SCTP_PRINTF("\n");
620 			cnt = 0;
621 		}
622 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
623 		    (uint32_t)sctp_audit_data[i][1]);
624 		cnt++;
625 		if ((cnt % 14) == 0)
626 			SCTP_PRINTF("\n");
627 	}
628 	SCTP_PRINTF("\n");
629 }
630 
631 void
632 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
633     struct sctp_nets *net)
634 {
635 	int resend_cnt, tot_out, rep, tot_book_cnt;
636 	struct sctp_nets *lnet;
637 	struct sctp_tmit_chunk *chk;
638 
639 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
640 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
641 	sctp_audit_indx++;
642 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
643 		sctp_audit_indx = 0;
644 	}
645 	if (inp == NULL) {
646 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
647 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
648 		sctp_audit_indx++;
649 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
650 			sctp_audit_indx = 0;
651 		}
652 		return;
653 	}
654 	if (stcb == NULL) {
655 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
656 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
657 		sctp_audit_indx++;
658 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
659 			sctp_audit_indx = 0;
660 		}
661 		return;
662 	}
663 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
664 	sctp_audit_data[sctp_audit_indx][1] =
665 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
666 	sctp_audit_indx++;
667 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
668 		sctp_audit_indx = 0;
669 	}
670 	rep = 0;
671 	tot_book_cnt = 0;
672 	resend_cnt = tot_out = 0;
673 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
674 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
675 			resend_cnt++;
676 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
677 			tot_out += chk->book_size;
678 			tot_book_cnt++;
679 		}
680 	}
681 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
682 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
683 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
684 		sctp_audit_indx++;
685 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
686 			sctp_audit_indx = 0;
687 		}
688 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
689 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
690 		rep = 1;
691 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
692 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
693 		sctp_audit_data[sctp_audit_indx][1] =
694 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
695 		sctp_audit_indx++;
696 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
697 			sctp_audit_indx = 0;
698 		}
699 	}
700 	if (tot_out != stcb->asoc.total_flight) {
701 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
702 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
703 		sctp_audit_indx++;
704 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
705 			sctp_audit_indx = 0;
706 		}
707 		rep = 1;
708 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
709 		    (int)stcb->asoc.total_flight);
710 		stcb->asoc.total_flight = tot_out;
711 	}
712 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
713 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
714 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
715 		sctp_audit_indx++;
716 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
717 			sctp_audit_indx = 0;
718 		}
719 		rep = 1;
720 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
721 
722 		stcb->asoc.total_flight_count = tot_book_cnt;
723 	}
724 	tot_out = 0;
725 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
726 		tot_out += lnet->flight_size;
727 	}
728 	if (tot_out != stcb->asoc.total_flight) {
729 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
730 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
731 		sctp_audit_indx++;
732 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
733 			sctp_audit_indx = 0;
734 		}
735 		rep = 1;
736 		SCTP_PRINTF("real flight:%d net total was %d\n",
737 		    stcb->asoc.total_flight, tot_out);
738 		/* now corrective action */
739 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
740 
741 			tot_out = 0;
742 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
743 				if ((chk->whoTo == lnet) &&
744 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
745 					tot_out += chk->book_size;
746 				}
747 			}
748 			if (lnet->flight_size != tot_out) {
749 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
750 				    (void *)lnet, lnet->flight_size,
751 				    tot_out);
752 				lnet->flight_size = tot_out;
753 			}
754 		}
755 	}
756 	if (rep) {
757 		sctp_print_audit_report();
758 	}
759 }
760 
761 void
762 sctp_audit_log(uint8_t ev, uint8_t fd)
763 {
764 
765 	sctp_audit_data[sctp_audit_indx][0] = ev;
766 	sctp_audit_data[sctp_audit_indx][1] = fd;
767 	sctp_audit_indx++;
768 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
769 		sctp_audit_indx = 0;
770 	}
771 }
772 
773 #endif
774 
775 /*
776  * The conversion from time to ticks and vice versa is done by rounding
777  * upwards. This way we can test in the code the time to be positive and
778  * know that this corresponds to a positive number of ticks.
779  */
780 
781 uint32_t
782 sctp_msecs_to_ticks(uint32_t msecs)
783 {
784 	uint64_t temp;
785 	uint32_t ticks;
786 
787 	if (hz == 1000) {
788 		ticks = msecs;
789 	} else {
790 		temp = (((uint64_t)msecs * hz) + 999) / 1000;
791 		if (temp > UINT32_MAX) {
792 			ticks = UINT32_MAX;
793 		} else {
794 			ticks = (uint32_t)temp;
795 		}
796 	}
797 	return (ticks);
798 }
799 
800 uint32_t
801 sctp_ticks_to_msecs(uint32_t ticks)
802 {
803 	uint64_t temp;
804 	uint32_t msecs;
805 
806 	if (hz == 1000) {
807 		msecs = ticks;
808 	} else {
809 		temp = (((uint64_t)ticks * 1000) + (hz - 1)) / hz;
810 		if (temp > UINT32_MAX) {
811 			msecs = UINT32_MAX;
812 		} else {
813 			msecs = (uint32_t)temp;
814 		}
815 	}
816 	return (msecs);
817 }
818 
819 uint32_t
820 sctp_secs_to_ticks(uint32_t secs)
821 {
822 	uint64_t temp;
823 	uint32_t ticks;
824 
825 	temp = (uint64_t)secs * hz;
826 	if (temp > UINT32_MAX) {
827 		ticks = UINT32_MAX;
828 	} else {
829 		ticks = (uint32_t)temp;
830 	}
831 	return (ticks);
832 }
833 
834 uint32_t
835 sctp_ticks_to_secs(uint32_t ticks)
836 {
837 	uint64_t temp;
838 	uint32_t secs;
839 
840 	temp = ((uint64_t)ticks + (hz - 1)) / hz;
841 	if (temp > UINT32_MAX) {
842 		secs = UINT32_MAX;
843 	} else {
844 		secs = (uint32_t)temp;
845 	}
846 	return (secs);
847 }
848 
849 /*
850  * sctp_stop_timers_for_shutdown() should be called
851  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
852  * state to make sure that all timers are stopped.
853  */
854 void
855 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
856 {
857 	struct sctp_inpcb *inp;
858 	struct sctp_nets *net;
859 
860 	inp = stcb->sctp_ep;
861 
862 	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
863 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_12);
864 	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
865 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_13);
866 	sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
867 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_14);
868 	sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
869 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_15);
870 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
871 		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
872 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_16);
873 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
874 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_17);
875 	}
876 }
877 
878 void
879 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer)
880 {
881 	struct sctp_inpcb *inp;
882 	struct sctp_nets *net;
883 
884 	inp = stcb->sctp_ep;
885 	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
886 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_18);
887 	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
888 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_19);
889 	if (stop_assoc_kill_timer) {
890 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
891 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_20);
892 	}
893 	sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
894 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_21);
895 	sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
896 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_22);
897 	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL,
898 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_23);
899 	/* Mobility adaptation */
900 	sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL,
901 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_24);
902 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
903 		sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
904 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_25);
905 		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net,
906 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_26);
907 		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net,
908 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_27);
909 		sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net,
910 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_28);
911 		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net,
912 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_29);
913 		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
914 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_30);
915 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
916 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_31);
917 	}
918 }
919 
920 /*
921  * A list of sizes based on typical mtu's, used only if next hop size not
922  * returned. These values MUST be multiples of 4 and MUST be ordered.
923  */
924 static uint32_t sctp_mtu_sizes[] = {
925 	68,
926 	296,
927 	508,
928 	512,
929 	544,
930 	576,
931 	1004,
932 	1492,
933 	1500,
934 	1536,
935 	2000,
936 	2048,
937 	4352,
938 	4464,
939 	8168,
940 	17912,
941 	32000,
942 	65532
943 };
944 
945 /*
946  * Return the largest MTU in sctp_mtu_sizes smaller than val.
947  * If val is smaller than the minimum, just return the largest
948  * multiple of 4 smaller or equal to val.
949  * Ensure that the result is a multiple of 4.
950  */
951 uint32_t
952 sctp_get_prev_mtu(uint32_t val)
953 {
954 	uint32_t i;
955 
956 	val &= 0xfffffffc;
957 	if (val <= sctp_mtu_sizes[0]) {
958 		return (val);
959 	}
960 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
961 		if (val <= sctp_mtu_sizes[i]) {
962 			break;
963 		}
964 	}
965 	KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0,
966 	    ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1));
967 	return (sctp_mtu_sizes[i - 1]);
968 }
969 
970 /*
971  * Return the smallest MTU in sctp_mtu_sizes larger than val.
972  * If val is larger than the maximum, just return the largest multiple of 4 smaller
973  * or equal to val.
974  * Ensure that the result is a multiple of 4.
975  */
976 uint32_t
977 sctp_get_next_mtu(uint32_t val)
978 {
979 	/* select another MTU that is just bigger than this one */
980 	uint32_t i;
981 
982 	val &= 0xfffffffc;
983 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
984 		if (val < sctp_mtu_sizes[i]) {
985 			KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0,
986 			    ("sctp_mtu_sizes[%u] not a multiple of 4", i));
987 			return (sctp_mtu_sizes[i]);
988 		}
989 	}
990 	return (val);
991 }
992 
993 void
994 sctp_fill_random_store(struct sctp_pcb *m)
995 {
996 	/*
997 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
998 	 * our counter. The result becomes our good random numbers and we
999 	 * then setup to give these out. Note that we do no locking to
1000 	 * protect this. This is ok, since if competing folks call this we
1001 	 * will get more gobbled gook in the random store which is what we
1002 	 * want. There is a danger that two guys will use the same random
1003 	 * numbers, but thats ok too since that is random as well :->
1004 	 */
1005 	m->store_at = 0;
1006 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
1007 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
1008 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
1009 	m->random_counter++;
1010 }
1011 
1012 uint32_t
1013 sctp_select_initial_TSN(struct sctp_pcb *inp)
1014 {
1015 	/*
1016 	 * A true implementation should use random selection process to get
1017 	 * the initial stream sequence number, using RFC1750 as a good
1018 	 * guideline
1019 	 */
1020 	uint32_t x, *xp;
1021 	uint8_t *p;
1022 	int store_at, new_store;
1023 
1024 	if (inp->initial_sequence_debug != 0) {
1025 		uint32_t ret;
1026 
1027 		ret = inp->initial_sequence_debug;
1028 		inp->initial_sequence_debug++;
1029 		return (ret);
1030 	}
1031 retry:
1032 	store_at = inp->store_at;
1033 	new_store = store_at + sizeof(uint32_t);
1034 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
1035 		new_store = 0;
1036 	}
1037 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
1038 		goto retry;
1039 	}
1040 	if (new_store == 0) {
1041 		/* Refill the random store */
1042 		sctp_fill_random_store(inp);
1043 	}
1044 	p = &inp->random_store[store_at];
1045 	xp = (uint32_t *)p;
1046 	x = *xp;
1047 	return (x);
1048 }
1049 
1050 uint32_t
1051 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
1052 {
1053 	uint32_t x;
1054 	struct timeval now;
1055 
1056 	if (check) {
1057 		(void)SCTP_GETTIME_TIMEVAL(&now);
1058 	}
1059 	for (;;) {
1060 		x = sctp_select_initial_TSN(&inp->sctp_ep);
1061 		if (x == 0) {
1062 			/* we never use 0 */
1063 			continue;
1064 		}
1065 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
1066 			break;
1067 		}
1068 	}
1069 	return (x);
1070 }
1071 
1072 int32_t
1073 sctp_map_assoc_state(int kernel_state)
1074 {
1075 	int32_t user_state;
1076 
1077 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
1078 		user_state = SCTP_CLOSED;
1079 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
1080 		user_state = SCTP_SHUTDOWN_PENDING;
1081 	} else {
1082 		switch (kernel_state & SCTP_STATE_MASK) {
1083 		case SCTP_STATE_EMPTY:
1084 			user_state = SCTP_CLOSED;
1085 			break;
1086 		case SCTP_STATE_INUSE:
1087 			user_state = SCTP_CLOSED;
1088 			break;
1089 		case SCTP_STATE_COOKIE_WAIT:
1090 			user_state = SCTP_COOKIE_WAIT;
1091 			break;
1092 		case SCTP_STATE_COOKIE_ECHOED:
1093 			user_state = SCTP_COOKIE_ECHOED;
1094 			break;
1095 		case SCTP_STATE_OPEN:
1096 			user_state = SCTP_ESTABLISHED;
1097 			break;
1098 		case SCTP_STATE_SHUTDOWN_SENT:
1099 			user_state = SCTP_SHUTDOWN_SENT;
1100 			break;
1101 		case SCTP_STATE_SHUTDOWN_RECEIVED:
1102 			user_state = SCTP_SHUTDOWN_RECEIVED;
1103 			break;
1104 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
1105 			user_state = SCTP_SHUTDOWN_ACK_SENT;
1106 			break;
1107 		default:
1108 			user_state = SCTP_CLOSED;
1109 			break;
1110 		}
1111 	}
1112 	return (user_state);
1113 }
1114 
1115 int
1116 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1117     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
1118 {
1119 	struct sctp_association *asoc;
1120 
1121 	/*
1122 	 * Anything set to zero is taken care of by the allocation routine's
1123 	 * bzero
1124 	 */
1125 
1126 	/*
1127 	 * Up front select what scoping to apply on addresses I tell my peer
1128 	 * Not sure what to do with these right now, we will need to come up
1129 	 * with a way to set them. We may need to pass them through from the
1130 	 * caller in the sctp_aloc_assoc() function.
1131 	 */
1132 	int i;
1133 #if defined(SCTP_DETAILED_STR_STATS)
1134 	int j;
1135 #endif
1136 
1137 	asoc = &stcb->asoc;
1138 	/* init all variables to a known value. */
1139 	SCTP_SET_STATE(stcb, SCTP_STATE_INUSE);
1140 	asoc->max_burst = inp->sctp_ep.max_burst;
1141 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
1142 	asoc->heart_beat_delay = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
1143 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
1144 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
1145 	asoc->ecn_supported = inp->ecn_supported;
1146 	asoc->prsctp_supported = inp->prsctp_supported;
1147 	asoc->idata_supported = inp->idata_supported;
1148 	asoc->auth_supported = inp->auth_supported;
1149 	asoc->asconf_supported = inp->asconf_supported;
1150 	asoc->reconfig_supported = inp->reconfig_supported;
1151 	asoc->nrsack_supported = inp->nrsack_supported;
1152 	asoc->pktdrop_supported = inp->pktdrop_supported;
1153 	asoc->idata_supported = inp->idata_supported;
1154 	asoc->sctp_cmt_pf = (uint8_t)0;
1155 	asoc->sctp_frag_point = inp->sctp_frag_point;
1156 	asoc->sctp_features = inp->sctp_features;
1157 	asoc->default_dscp = inp->sctp_ep.default_dscp;
1158 	asoc->max_cwnd = inp->max_cwnd;
1159 #ifdef INET6
1160 	if (inp->sctp_ep.default_flowlabel) {
1161 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
1162 	} else {
1163 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
1164 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
1165 			asoc->default_flowlabel &= 0x000fffff;
1166 			asoc->default_flowlabel |= 0x80000000;
1167 		} else {
1168 			asoc->default_flowlabel = 0;
1169 		}
1170 	}
1171 #endif
1172 	asoc->sb_send_resv = 0;
1173 	if (override_tag) {
1174 		asoc->my_vtag = override_tag;
1175 	} else {
1176 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1177 	}
1178 	/* Get the nonce tags */
1179 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1180 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1181 	asoc->vrf_id = vrf_id;
1182 
1183 #ifdef SCTP_ASOCLOG_OF_TSNS
1184 	asoc->tsn_in_at = 0;
1185 	asoc->tsn_out_at = 0;
1186 	asoc->tsn_in_wrapped = 0;
1187 	asoc->tsn_out_wrapped = 0;
1188 	asoc->cumack_log_at = 0;
1189 	asoc->cumack_log_atsnt = 0;
1190 #endif
1191 #ifdef SCTP_FS_SPEC_LOG
1192 	asoc->fs_index = 0;
1193 #endif
1194 	asoc->refcnt = 0;
1195 	asoc->assoc_up_sent = 0;
1196 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1197 	    sctp_select_initial_TSN(&inp->sctp_ep);
1198 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1199 	/* we are optimisitic here */
1200 	asoc->peer_supports_nat = 0;
1201 	asoc->sent_queue_retran_cnt = 0;
1202 
1203 	/* for CMT */
1204 	asoc->last_net_cmt_send_started = NULL;
1205 
1206 	/* This will need to be adjusted */
1207 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1208 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1209 	asoc->asconf_seq_in = asoc->last_acked_seq;
1210 
1211 	/* here we are different, we hold the next one we expect */
1212 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1213 
1214 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1215 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1216 
1217 	asoc->default_mtu = inp->sctp_ep.default_mtu;
1218 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1219 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1220 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1221 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1222 	asoc->free_chunk_cnt = 0;
1223 
1224 	asoc->iam_blocking = 0;
1225 	asoc->context = inp->sctp_context;
1226 	asoc->local_strreset_support = inp->local_strreset_support;
1227 	asoc->def_send = inp->def_send;
1228 	asoc->delayed_ack = sctp_ticks_to_msecs(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1229 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1230 	asoc->pr_sctp_cnt = 0;
1231 	asoc->total_output_queue_size = 0;
1232 
1233 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1234 		asoc->scope.ipv6_addr_legal = 1;
1235 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1236 			asoc->scope.ipv4_addr_legal = 1;
1237 		} else {
1238 			asoc->scope.ipv4_addr_legal = 0;
1239 		}
1240 	} else {
1241 		asoc->scope.ipv6_addr_legal = 0;
1242 		asoc->scope.ipv4_addr_legal = 1;
1243 	}
1244 
1245 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1246 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1247 
1248 	asoc->smallest_mtu = inp->sctp_frag_point;
1249 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1250 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1251 
1252 	asoc->stream_locked_on = 0;
1253 	asoc->ecn_echo_cnt_onq = 0;
1254 	asoc->stream_locked = 0;
1255 
1256 	asoc->send_sack = 1;
1257 
1258 	LIST_INIT(&asoc->sctp_restricted_addrs);
1259 
1260 	TAILQ_INIT(&asoc->nets);
1261 	TAILQ_INIT(&asoc->pending_reply_queue);
1262 	TAILQ_INIT(&asoc->asconf_ack_sent);
1263 	/* Setup to fill the hb random cache at first HB */
1264 	asoc->hb_random_idx = 4;
1265 
1266 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1267 
1268 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1269 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1270 
1271 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1272 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1273 
1274 	/*
1275 	 * Now the stream parameters, here we allocate space for all streams
1276 	 * that we request by default.
1277 	 */
1278 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1279 	    o_strms;
1280 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1281 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1282 	    SCTP_M_STRMO);
1283 	if (asoc->strmout == NULL) {
1284 		/* big trouble no memory */
1285 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1286 		return (ENOMEM);
1287 	}
1288 	for (i = 0; i < asoc->streamoutcnt; i++) {
1289 		/*
1290 		 * inbound side must be set to 0xffff, also NOTE when we get
1291 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1292 		 * count (streamoutcnt) but first check if we sent to any of
1293 		 * the upper streams that were dropped (if some were). Those
1294 		 * that were dropped must be notified to the upper layer as
1295 		 * failed to send.
1296 		 */
1297 		asoc->strmout[i].next_mid_ordered = 0;
1298 		asoc->strmout[i].next_mid_unordered = 0;
1299 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1300 		asoc->strmout[i].chunks_on_queues = 0;
1301 #if defined(SCTP_DETAILED_STR_STATS)
1302 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1303 			asoc->strmout[i].abandoned_sent[j] = 0;
1304 			asoc->strmout[i].abandoned_unsent[j] = 0;
1305 		}
1306 #else
1307 		asoc->strmout[i].abandoned_sent[0] = 0;
1308 		asoc->strmout[i].abandoned_unsent[0] = 0;
1309 #endif
1310 		asoc->strmout[i].sid = i;
1311 		asoc->strmout[i].last_msg_incomplete = 0;
1312 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1313 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1314 	}
1315 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1316 
1317 	/* Now the mapping array */
1318 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1319 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1320 	    SCTP_M_MAP);
1321 	if (asoc->mapping_array == NULL) {
1322 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1323 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1324 		return (ENOMEM);
1325 	}
1326 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1327 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1328 	    SCTP_M_MAP);
1329 	if (asoc->nr_mapping_array == NULL) {
1330 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1331 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1332 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1333 		return (ENOMEM);
1334 	}
1335 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1336 
1337 	/* Now the init of the other outqueues */
1338 	TAILQ_INIT(&asoc->free_chunks);
1339 	TAILQ_INIT(&asoc->control_send_queue);
1340 	TAILQ_INIT(&asoc->asconf_send_queue);
1341 	TAILQ_INIT(&asoc->send_queue);
1342 	TAILQ_INIT(&asoc->sent_queue);
1343 	TAILQ_INIT(&asoc->resetHead);
1344 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1345 	TAILQ_INIT(&asoc->asconf_queue);
1346 	/* authentication fields */
1347 	asoc->authinfo.random = NULL;
1348 	asoc->authinfo.active_keyid = 0;
1349 	asoc->authinfo.assoc_key = NULL;
1350 	asoc->authinfo.assoc_keyid = 0;
1351 	asoc->authinfo.recv_key = NULL;
1352 	asoc->authinfo.recv_keyid = 0;
1353 	LIST_INIT(&asoc->shared_keys);
1354 	asoc->marked_retrans = 0;
1355 	asoc->port = inp->sctp_ep.port;
1356 	asoc->timoinit = 0;
1357 	asoc->timodata = 0;
1358 	asoc->timosack = 0;
1359 	asoc->timoshutdown = 0;
1360 	asoc->timoheartbeat = 0;
1361 	asoc->timocookie = 0;
1362 	asoc->timoshutdownack = 0;
1363 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1364 	asoc->discontinuity_time = asoc->start_time;
1365 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1366 		asoc->abandoned_unsent[i] = 0;
1367 		asoc->abandoned_sent[i] = 0;
1368 	}
1369 	/*
1370 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1371 	 * freed later when the association is freed.
1372 	 */
1373 	return (0);
1374 }
1375 
1376 void
1377 sctp_print_mapping_array(struct sctp_association *asoc)
1378 {
1379 	unsigned int i, limit;
1380 
1381 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1382 	    asoc->mapping_array_size,
1383 	    asoc->mapping_array_base_tsn,
1384 	    asoc->cumulative_tsn,
1385 	    asoc->highest_tsn_inside_map,
1386 	    asoc->highest_tsn_inside_nr_map);
1387 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1388 		if (asoc->mapping_array[limit - 1] != 0) {
1389 			break;
1390 		}
1391 	}
1392 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1393 	for (i = 0; i < limit; i++) {
1394 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1395 	}
1396 	if (limit % 16)
1397 		SCTP_PRINTF("\n");
1398 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1399 		if (asoc->nr_mapping_array[limit - 1]) {
1400 			break;
1401 		}
1402 	}
1403 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1404 	for (i = 0; i < limit; i++) {
1405 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1406 	}
1407 	if (limit % 16)
1408 		SCTP_PRINTF("\n");
1409 }
1410 
1411 int
1412 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1413 {
1414 	/* mapping array needs to grow */
1415 	uint8_t *new_array1, *new_array2;
1416 	uint32_t new_size;
1417 
1418 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1419 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1420 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1421 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1422 		/* can't get more, forget it */
1423 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1424 		if (new_array1) {
1425 			SCTP_FREE(new_array1, SCTP_M_MAP);
1426 		}
1427 		if (new_array2) {
1428 			SCTP_FREE(new_array2, SCTP_M_MAP);
1429 		}
1430 		return (-1);
1431 	}
1432 	memset(new_array1, 0, new_size);
1433 	memset(new_array2, 0, new_size);
1434 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1435 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1436 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1437 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1438 	asoc->mapping_array = new_array1;
1439 	asoc->nr_mapping_array = new_array2;
1440 	asoc->mapping_array_size = new_size;
1441 	return (0);
1442 }
1443 
1444 
1445 static void
1446 sctp_iterator_work(struct sctp_iterator *it)
1447 {
1448 	struct epoch_tracker et;
1449 	struct sctp_inpcb *tinp;
1450 	int iteration_count = 0;
1451 	int inp_skip = 0;
1452 	int first_in = 1;
1453 
1454 	NET_EPOCH_ENTER(et);
1455 	SCTP_INP_INFO_RLOCK();
1456 	SCTP_ITERATOR_LOCK();
1457 	sctp_it_ctl.cur_it = it;
1458 	if (it->inp) {
1459 		SCTP_INP_RLOCK(it->inp);
1460 		SCTP_INP_DECR_REF(it->inp);
1461 	}
1462 	if (it->inp == NULL) {
1463 		/* iterator is complete */
1464 done_with_iterator:
1465 		sctp_it_ctl.cur_it = NULL;
1466 		SCTP_ITERATOR_UNLOCK();
1467 		SCTP_INP_INFO_RUNLOCK();
1468 		if (it->function_atend != NULL) {
1469 			(*it->function_atend) (it->pointer, it->val);
1470 		}
1471 		SCTP_FREE(it, SCTP_M_ITER);
1472 		NET_EPOCH_EXIT(et);
1473 		return;
1474 	}
1475 select_a_new_ep:
1476 	if (first_in) {
1477 		first_in = 0;
1478 	} else {
1479 		SCTP_INP_RLOCK(it->inp);
1480 	}
1481 	while (((it->pcb_flags) &&
1482 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1483 	    ((it->pcb_features) &&
1484 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1485 		/* endpoint flags or features don't match, so keep looking */
1486 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1487 			SCTP_INP_RUNLOCK(it->inp);
1488 			goto done_with_iterator;
1489 		}
1490 		tinp = it->inp;
1491 		it->inp = LIST_NEXT(it->inp, sctp_list);
1492 		it->stcb = NULL;
1493 		SCTP_INP_RUNLOCK(tinp);
1494 		if (it->inp == NULL) {
1495 			goto done_with_iterator;
1496 		}
1497 		SCTP_INP_RLOCK(it->inp);
1498 	}
1499 	/* now go through each assoc which is in the desired state */
1500 	if (it->done_current_ep == 0) {
1501 		if (it->function_inp != NULL)
1502 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1503 		it->done_current_ep = 1;
1504 	}
1505 	if (it->stcb == NULL) {
1506 		/* run the per instance function */
1507 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1508 	}
1509 	if ((inp_skip) || it->stcb == NULL) {
1510 		if (it->function_inp_end != NULL) {
1511 			inp_skip = (*it->function_inp_end) (it->inp,
1512 			    it->pointer,
1513 			    it->val);
1514 		}
1515 		SCTP_INP_RUNLOCK(it->inp);
1516 		goto no_stcb;
1517 	}
1518 	while (it->stcb) {
1519 		SCTP_TCB_LOCK(it->stcb);
1520 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1521 			/* not in the right state... keep looking */
1522 			SCTP_TCB_UNLOCK(it->stcb);
1523 			goto next_assoc;
1524 		}
1525 		/* see if we have limited out the iterator loop */
1526 		iteration_count++;
1527 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1528 			/* Pause to let others grab the lock */
1529 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1530 			SCTP_TCB_UNLOCK(it->stcb);
1531 			SCTP_INP_INCR_REF(it->inp);
1532 			SCTP_INP_RUNLOCK(it->inp);
1533 			SCTP_ITERATOR_UNLOCK();
1534 			SCTP_INP_INFO_RUNLOCK();
1535 			SCTP_INP_INFO_RLOCK();
1536 			SCTP_ITERATOR_LOCK();
1537 			if (sctp_it_ctl.iterator_flags) {
1538 				/* We won't be staying here */
1539 				SCTP_INP_DECR_REF(it->inp);
1540 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1541 				if (sctp_it_ctl.iterator_flags &
1542 				    SCTP_ITERATOR_STOP_CUR_IT) {
1543 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1544 					goto done_with_iterator;
1545 				}
1546 				if (sctp_it_ctl.iterator_flags &
1547 				    SCTP_ITERATOR_STOP_CUR_INP) {
1548 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1549 					goto no_stcb;
1550 				}
1551 				/* If we reach here huh? */
1552 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1553 				    sctp_it_ctl.iterator_flags);
1554 				sctp_it_ctl.iterator_flags = 0;
1555 			}
1556 			SCTP_INP_RLOCK(it->inp);
1557 			SCTP_INP_DECR_REF(it->inp);
1558 			SCTP_TCB_LOCK(it->stcb);
1559 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1560 			iteration_count = 0;
1561 		}
1562 		KASSERT(it->inp == it->stcb->sctp_ep,
1563 		        ("%s: stcb %p does not belong to inp %p, but inp %p",
1564 		         __func__, it->stcb, it->inp, it->stcb->sctp_ep));
1565 
1566 		/* run function on this one */
1567 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1568 
1569 		/*
1570 		 * we lie here, it really needs to have its own type but
1571 		 * first I must verify that this won't effect things :-0
1572 		 */
1573 		if (it->no_chunk_output == 0)
1574 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1575 
1576 		SCTP_TCB_UNLOCK(it->stcb);
1577 next_assoc:
1578 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1579 		if (it->stcb == NULL) {
1580 			/* Run last function */
1581 			if (it->function_inp_end != NULL) {
1582 				inp_skip = (*it->function_inp_end) (it->inp,
1583 				    it->pointer,
1584 				    it->val);
1585 			}
1586 		}
1587 	}
1588 	SCTP_INP_RUNLOCK(it->inp);
1589 no_stcb:
1590 	/* done with all assocs on this endpoint, move on to next endpoint */
1591 	it->done_current_ep = 0;
1592 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1593 		it->inp = NULL;
1594 	} else {
1595 		it->inp = LIST_NEXT(it->inp, sctp_list);
1596 	}
1597 	it->stcb = NULL;
1598 	if (it->inp == NULL) {
1599 		goto done_with_iterator;
1600 	}
1601 	goto select_a_new_ep;
1602 }
1603 
1604 void
1605 sctp_iterator_worker(void)
1606 {
1607 	struct sctp_iterator *it;
1608 
1609 	/* This function is called with the WQ lock in place */
1610 	sctp_it_ctl.iterator_running = 1;
1611 	while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) {
1612 		/* now lets work on this one */
1613 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1614 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1615 		CURVNET_SET(it->vn);
1616 		sctp_iterator_work(it);
1617 		CURVNET_RESTORE();
1618 		SCTP_IPI_ITERATOR_WQ_LOCK();
1619 		/* sa_ignore FREED_MEMORY */
1620 	}
1621 	sctp_it_ctl.iterator_running = 0;
1622 	return;
1623 }
1624 
1625 
1626 static void
1627 sctp_handle_addr_wq(void)
1628 {
1629 	/* deal with the ADDR wq from the rtsock calls */
1630 	struct sctp_laddr *wi, *nwi;
1631 	struct sctp_asconf_iterator *asc;
1632 
1633 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1634 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1635 	if (asc == NULL) {
1636 		/* Try later, no memory */
1637 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1638 		    (struct sctp_inpcb *)NULL,
1639 		    (struct sctp_tcb *)NULL,
1640 		    (struct sctp_nets *)NULL);
1641 		return;
1642 	}
1643 	LIST_INIT(&asc->list_of_work);
1644 	asc->cnt = 0;
1645 
1646 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1647 		LIST_REMOVE(wi, sctp_nxt_addr);
1648 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1649 		asc->cnt++;
1650 	}
1651 
1652 	if (asc->cnt == 0) {
1653 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1654 	} else {
1655 		int ret;
1656 
1657 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1658 		    sctp_asconf_iterator_stcb,
1659 		    NULL,	/* No ep end for boundall */
1660 		    SCTP_PCB_FLAGS_BOUNDALL,
1661 		    SCTP_PCB_ANY_FEATURES,
1662 		    SCTP_ASOC_ANY_STATE,
1663 		    (void *)asc, 0,
1664 		    sctp_asconf_iterator_end, NULL, 0);
1665 		if (ret) {
1666 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1667 			/*
1668 			 * Freeing if we are stopping or put back on the
1669 			 * addr_wq.
1670 			 */
1671 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1672 				sctp_asconf_iterator_end(asc, 0);
1673 			} else {
1674 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1675 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1676 				}
1677 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1678 			}
1679 		}
1680 	}
1681 }
1682 
1683 /*-
1684  * The following table shows which pointers for the inp, stcb, or net are
1685  * stored for each timer after it was started.
1686  *
1687  *|Name                         |Timer                        |inp |stcb|net |
1688  *|-----------------------------|-----------------------------|----|----|----|
1689  *|SCTP_TIMER_TYPE_SEND         |net->rxt_timer               |Yes |Yes |Yes |
1690  *|SCTP_TIMER_TYPE_INIT         |net->rxt_timer               |Yes |Yes |Yes |
1691  *|SCTP_TIMER_TYPE_RECV         |stcb->asoc.dack_timer        |Yes |Yes |No  |
1692  *|SCTP_TIMER_TYPE_SHUTDOWN     |net->rxt_timer               |Yes |Yes |Yes |
1693  *|SCTP_TIMER_TYPE_HEARTBEAT    |net->hb_timer                |Yes |Yes |Yes |
1694  *|SCTP_TIMER_TYPE_COOKIE       |net->rxt_timer               |Yes |Yes |Yes |
1695  *|SCTP_TIMER_TYPE_NEWCOOKIE    |inp->sctp_ep.signature_change|Yes |No  |No  |
1696  *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer              |Yes |Yes |Yes |
1697  *|SCTP_TIMER_TYPE_SHUTDOWNACK  |net->rxt_timer               |Yes |Yes |Yes |
1698  *|SCTP_TIMER_TYPE_ASCONF       |stcb->asoc.asconf_timer      |Yes |Yes |Yes |
1699  *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer  |Yes |Yes |No  |
1700  *|SCTP_TIMER_TYPE_AUTOCLOSE    |stcb->asoc.autoclose_timer   |Yes |Yes |No  |
1701  *|SCTP_TIMER_TYPE_STRRESET     |stcb->asoc.strreset_timer    |Yes |Yes |No  |
1702  *|SCTP_TIMER_TYPE_INPKILL      |inp->sctp_ep.signature_change|Yes |No  |No  |
1703  *|SCTP_TIMER_TYPE_ASOCKILL     |stcb->asoc.strreset_timer    |Yes |Yes |No  |
1704  *|SCTP_TIMER_TYPE_ADDR_WQ      |SCTP_BASE_INFO(addr_wq_timer)|No  |No  |No  |
1705  *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No  |
1706  */
1707 
1708 void
1709 sctp_timeout_handler(void *t)
1710 {
1711 	struct epoch_tracker et;
1712 	struct timeval tv;
1713 	struct sctp_inpcb *inp;
1714 	struct sctp_tcb *stcb;
1715 	struct sctp_nets *net;
1716 	struct sctp_timer *tmr;
1717 	struct mbuf *op_err;
1718 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1719 	struct socket *so;
1720 #endif
1721 	int did_output;
1722 	int type;
1723 	int i, secret;
1724 
1725 	tmr = (struct sctp_timer *)t;
1726 	inp = (struct sctp_inpcb *)tmr->ep;
1727 	stcb = (struct sctp_tcb *)tmr->tcb;
1728 	net = (struct sctp_nets *)tmr->net;
1729 	CURVNET_SET((struct vnet *)tmr->vnet);
1730 	did_output = 1;
1731 
1732 #ifdef SCTP_AUDITING_ENABLED
1733 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1734 	sctp_auditing(3, inp, stcb, net);
1735 #endif
1736 
1737 	/* sanity checks... */
1738 	KASSERT(tmr->self == tmr,
1739 	    ("sctp_timeout_handler: tmr->self corrupted"));
1740 	KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type),
1741 	    ("sctp_timeout_handler: invalid timer type %d", tmr->type));
1742 	type = tmr->type;
1743 	KASSERT(stcb == NULL || stcb->sctp_ep == inp,
1744 	    ("sctp_timeout_handler of type %d: inp = %p, stcb->sctp_ep %p",
1745 	    type, stcb, stcb->sctp_ep));
1746 	if (inp) {
1747 		SCTP_INP_INCR_REF(inp);
1748 	}
1749 	tmr->stopped_from = 0xa001;
1750 	if (stcb) {
1751 		atomic_add_int(&stcb->asoc.refcnt, 1);
1752 		if (stcb->asoc.state == 0) {
1753 			atomic_add_int(&stcb->asoc.refcnt, -1);
1754 			if (inp) {
1755 				SCTP_INP_DECR_REF(inp);
1756 			}
1757 			SCTPDBG(SCTP_DEBUG_TIMER2,
1758 			    "Timer type %d handler exiting due to CLOSED association.\n",
1759 			    type);
1760 			CURVNET_RESTORE();
1761 			return;
1762 		}
1763 	}
1764 	tmr->stopped_from = 0xa002;
1765 	SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type);
1766 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1767 		if (inp) {
1768 			SCTP_INP_DECR_REF(inp);
1769 		}
1770 		if (stcb) {
1771 			atomic_add_int(&stcb->asoc.refcnt, -1);
1772 		}
1773 		SCTPDBG(SCTP_DEBUG_TIMER2,
1774 		    "Timer type %d handler exiting due to not being active.\n",
1775 		    type);
1776 		CURVNET_RESTORE();
1777 		return;
1778 	}
1779 
1780 	tmr->stopped_from = 0xa003;
1781 	if (stcb) {
1782 		SCTP_TCB_LOCK(stcb);
1783 		atomic_add_int(&stcb->asoc.refcnt, -1);
1784 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1785 		    ((stcb->asoc.state == 0) ||
1786 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1787 			SCTP_TCB_UNLOCK(stcb);
1788 			if (inp) {
1789 				SCTP_INP_DECR_REF(inp);
1790 			}
1791 			SCTPDBG(SCTP_DEBUG_TIMER2,
1792 			    "Timer type %d handler exiting due to CLOSED association.\n",
1793 			    type);
1794 			CURVNET_RESTORE();
1795 			return;
1796 		}
1797 	} else if (inp != NULL) {
1798 		SCTP_INP_WLOCK(inp);
1799 	} else {
1800 		SCTP_WQ_ADDR_LOCK();
1801 	}
1802 
1803 	/* Record in stopped_from which timeout occurred. */
1804 	tmr->stopped_from = type;
1805 	NET_EPOCH_ENTER(et);
1806 	/* mark as being serviced now */
1807 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1808 		/*
1809 		 * Callout has been rescheduled.
1810 		 */
1811 		goto get_out;
1812 	}
1813 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1814 		/*
1815 		 * Not active, so no action.
1816 		 */
1817 		goto get_out;
1818 	}
1819 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1820 
1821 	/* call the handler for the appropriate timer type */
1822 	switch (type) {
1823 	case SCTP_TIMER_TYPE_SEND:
1824 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1825 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1826 		    type, inp, stcb, net));
1827 		SCTP_STAT_INCR(sctps_timodata);
1828 		stcb->asoc.timodata++;
1829 		stcb->asoc.num_send_timers_up--;
1830 		if (stcb->asoc.num_send_timers_up < 0) {
1831 			stcb->asoc.num_send_timers_up = 0;
1832 		}
1833 		SCTP_TCB_LOCK_ASSERT(stcb);
1834 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1835 			/* no need to unlock on tcb its gone */
1836 
1837 			goto out_decr;
1838 		}
1839 		SCTP_TCB_LOCK_ASSERT(stcb);
1840 #ifdef SCTP_AUDITING_ENABLED
1841 		sctp_auditing(4, inp, stcb, net);
1842 #endif
1843 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1844 		if ((stcb->asoc.num_send_timers_up == 0) &&
1845 		    (stcb->asoc.sent_queue_cnt > 0)) {
1846 			struct sctp_tmit_chunk *chk;
1847 
1848 			/*
1849 			 * Safeguard. If there on some on the sent queue
1850 			 * somewhere but no timers running something is
1851 			 * wrong... so we start a timer on the first chunk
1852 			 * on the send queue on whatever net it is sent to.
1853 			 */
1854 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1855 				if (chk->whoTo != NULL) {
1856 					break;
1857 				}
1858 			}
1859 			if (chk != NULL) {
1860 				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
1861 			}
1862 		}
1863 		break;
1864 	case SCTP_TIMER_TYPE_INIT:
1865 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1866 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1867 		    type, inp, stcb, net));
1868 		SCTP_STAT_INCR(sctps_timoinit);
1869 		stcb->asoc.timoinit++;
1870 		if (sctp_t1init_timer(inp, stcb, net)) {
1871 			/* no need to unlock on tcb its gone */
1872 			goto out_decr;
1873 		}
1874 		/* We do output but not here */
1875 		did_output = 0;
1876 		break;
1877 	case SCTP_TIMER_TYPE_RECV:
1878 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
1879 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1880 		    type, inp, stcb, net));
1881 		SCTP_STAT_INCR(sctps_timosack);
1882 		stcb->asoc.timosack++;
1883 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1884 #ifdef SCTP_AUDITING_ENABLED
1885 		sctp_auditing(4, inp, stcb, NULL);
1886 #endif
1887 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1888 		break;
1889 	case SCTP_TIMER_TYPE_SHUTDOWN:
1890 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1891 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1892 		    type, inp, stcb, net));
1893 		SCTP_STAT_INCR(sctps_timoshutdown);
1894 		stcb->asoc.timoshutdown++;
1895 		if (sctp_shutdown_timer(inp, stcb, net)) {
1896 			/* no need to unlock on tcb its gone */
1897 			goto out_decr;
1898 		}
1899 #ifdef SCTP_AUDITING_ENABLED
1900 		sctp_auditing(4, inp, stcb, net);
1901 #endif
1902 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1903 		break;
1904 	case SCTP_TIMER_TYPE_HEARTBEAT:
1905 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1906 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1907 		    type, inp, stcb, net));
1908 		SCTP_STAT_INCR(sctps_timoheartbeat);
1909 		stcb->asoc.timoheartbeat++;
1910 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1911 			/* no need to unlock on tcb its gone */
1912 			goto out_decr;
1913 		}
1914 #ifdef SCTP_AUDITING_ENABLED
1915 		sctp_auditing(4, inp, stcb, net);
1916 #endif
1917 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1918 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1919 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1920 		}
1921 		break;
1922 	case SCTP_TIMER_TYPE_COOKIE:
1923 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1924 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1925 		    type, inp, stcb, net));
1926 		SCTP_STAT_INCR(sctps_timocookie);
1927 		stcb->asoc.timocookie++;
1928 		if (sctp_cookie_timer(inp, stcb, net)) {
1929 			/* no need to unlock on tcb its gone */
1930 			goto out_decr;
1931 		}
1932 #ifdef SCTP_AUDITING_ENABLED
1933 		sctp_auditing(4, inp, stcb, net);
1934 #endif
1935 		/*
1936 		 * We consider T3 and Cookie timer pretty much the same with
1937 		 * respect to where from in chunk_output.
1938 		 */
1939 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1940 		break;
1941 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1942 		KASSERT(inp != NULL && stcb == NULL && net == NULL,
1943 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1944 		    type, inp, stcb, net));
1945 		SCTP_STAT_INCR(sctps_timosecret);
1946 		(void)SCTP_GETTIME_TIMEVAL(&tv);
1947 		inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1948 		inp->sctp_ep.last_secret_number =
1949 		    inp->sctp_ep.current_secret_number;
1950 		inp->sctp_ep.current_secret_number++;
1951 		if (inp->sctp_ep.current_secret_number >=
1952 		    SCTP_HOW_MANY_SECRETS) {
1953 			inp->sctp_ep.current_secret_number = 0;
1954 		}
1955 		secret = (int)inp->sctp_ep.current_secret_number;
1956 		for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1957 			inp->sctp_ep.secret_key[secret][i] =
1958 			    sctp_select_initial_TSN(&inp->sctp_ep);
1959 		}
1960 		sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL);
1961 		did_output = 0;
1962 		break;
1963 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1964 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1965 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1966 		    type, inp, stcb, net));
1967 		SCTP_STAT_INCR(sctps_timopathmtu);
1968 		sctp_pathmtu_timer(inp, stcb, net);
1969 		did_output = 0;
1970 		break;
1971 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1972 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1973 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1974 		    type, inp, stcb, net));
1975 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1976 			/* no need to unlock on tcb its gone */
1977 			goto out_decr;
1978 		}
1979 		SCTP_STAT_INCR(sctps_timoshutdownack);
1980 		stcb->asoc.timoshutdownack++;
1981 #ifdef SCTP_AUDITING_ENABLED
1982 		sctp_auditing(4, inp, stcb, net);
1983 #endif
1984 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1985 		break;
1986 	case SCTP_TIMER_TYPE_ASCONF:
1987 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1988 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1989 		    type, inp, stcb, net));
1990 		SCTP_STAT_INCR(sctps_timoasconf);
1991 		if (sctp_asconf_timer(inp, stcb, net)) {
1992 			/* no need to unlock on tcb its gone */
1993 			goto out_decr;
1994 		}
1995 #ifdef SCTP_AUDITING_ENABLED
1996 		sctp_auditing(4, inp, stcb, net);
1997 #endif
1998 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1999 		break;
2000 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2001 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
2002 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2003 		    type, inp, stcb, net));
2004 		SCTP_STAT_INCR(sctps_timoshutdownguard);
2005 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
2006 		    "Shutdown guard timer expired");
2007 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2008 		/* no need to unlock on tcb its gone */
2009 		goto out_decr;
2010 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2011 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
2012 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2013 		    type, inp, stcb, net));
2014 		SCTP_STAT_INCR(sctps_timoautoclose);
2015 		sctp_autoclose_timer(inp, stcb);
2016 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
2017 		did_output = 0;
2018 		break;
2019 	case SCTP_TIMER_TYPE_STRRESET:
2020 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
2021 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2022 		    type, inp, stcb, net));
2023 		SCTP_STAT_INCR(sctps_timostrmrst);
2024 		if (sctp_strreset_timer(inp, stcb)) {
2025 			/* no need to unlock on tcb its gone */
2026 			goto out_decr;
2027 		}
2028 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
2029 		break;
2030 	case SCTP_TIMER_TYPE_INPKILL:
2031 		KASSERT(inp != NULL && stcb == NULL && net == NULL,
2032 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2033 		    type, inp, stcb, net));
2034 		SCTP_STAT_INCR(sctps_timoinpkill);
2035 		/*
2036 		 * special case, take away our increment since WE are the
2037 		 * killer
2038 		 */
2039 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
2040 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
2041 		SCTP_INP_DECR_REF(inp);
2042 		SCTP_INP_WUNLOCK(inp);
2043 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
2044 		    SCTP_CALLED_FROM_INPKILL_TIMER);
2045 		inp = NULL;
2046 		goto out_no_decr;
2047 	case SCTP_TIMER_TYPE_ASOCKILL:
2048 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
2049 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2050 		    type, inp, stcb, net));
2051 		SCTP_STAT_INCR(sctps_timoassockill);
2052 		/* Can we free it yet? */
2053 		SCTP_INP_DECR_REF(inp);
2054 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
2055 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
2056 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2057 		so = SCTP_INP_SO(inp);
2058 		atomic_add_int(&stcb->asoc.refcnt, 1);
2059 		SCTP_TCB_UNLOCK(stcb);
2060 		SCTP_SOCKET_LOCK(so, 1);
2061 		SCTP_TCB_LOCK(stcb);
2062 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2063 #endif
2064 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
2065 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
2066 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2067 		SCTP_SOCKET_UNLOCK(so, 1);
2068 #endif
2069 		/*
2070 		 * free asoc, always unlocks (or destroy's) so prevent
2071 		 * duplicate unlock or unlock of a free mtx :-0
2072 		 */
2073 		stcb = NULL;
2074 		goto out_no_decr;
2075 	case SCTP_TIMER_TYPE_ADDR_WQ:
2076 		KASSERT(inp == NULL && stcb == NULL && net == NULL,
2077 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2078 		    type, inp, stcb, net));
2079 		sctp_handle_addr_wq();
2080 		break;
2081 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2082 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
2083 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2084 		    type, inp, stcb, net));
2085 		SCTP_STAT_INCR(sctps_timodelprim);
2086 		sctp_delete_prim_timer(inp, stcb);
2087 		break;
2088 	default:
2089 #ifdef INVARIANTS
2090 		panic("Unknown timer type %d", type);
2091 #else
2092 		goto get_out;
2093 #endif
2094 	}
2095 #ifdef SCTP_AUDITING_ENABLED
2096 	sctp_audit_log(0xF1, (uint8_t)type);
2097 	if (inp)
2098 		sctp_auditing(5, inp, stcb, net);
2099 #endif
2100 	if ((did_output) && stcb) {
2101 		/*
2102 		 * Now we need to clean up the control chunk chain if an
2103 		 * ECNE is on it. It must be marked as UNSENT again so next
2104 		 * call will continue to send it until such time that we get
2105 		 * a CWR, to remove it. It is, however, less likely that we
2106 		 * will find a ecn echo on the chain though.
2107 		 */
2108 		sctp_fix_ecn_echo(&stcb->asoc);
2109 	}
2110 get_out:
2111 	if (stcb) {
2112 		SCTP_TCB_UNLOCK(stcb);
2113 	} else if (inp != NULL) {
2114 		SCTP_INP_WUNLOCK(inp);
2115 	} else {
2116 		SCTP_WQ_ADDR_UNLOCK();
2117 	}
2118 
2119 out_decr:
2120 	if (inp) {
2121 		SCTP_INP_DECR_REF(inp);
2122 	}
2123 
2124 out_no_decr:
2125 	SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type);
2126 	CURVNET_RESTORE();
2127 	NET_EPOCH_EXIT(et);
2128 }
2129 
2130 /*-
2131  * The following table shows which parameters must be provided
2132  * when calling sctp_timer_start(). For parameters not being
2133  * provided, NULL must be used.
2134  *
2135  * |Name                         |inp |stcb|net |
2136  * |-----------------------------|----|----|----|
2137  * |SCTP_TIMER_TYPE_SEND         |Yes |Yes |Yes |
2138  * |SCTP_TIMER_TYPE_INIT         |Yes |Yes |Yes |
2139  * |SCTP_TIMER_TYPE_RECV         |Yes |Yes |No  |
2140  * |SCTP_TIMER_TYPE_SHUTDOWN     |Yes |Yes |Yes |
2141  * |SCTP_TIMER_TYPE_HEARTBEAT    |Yes |Yes |Yes |
2142  * |SCTP_TIMER_TYPE_COOKIE       |Yes |Yes |Yes |
2143  * |SCTP_TIMER_TYPE_NEWCOOKIE    |Yes |No  |No  |
2144  * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes |
2145  * |SCTP_TIMER_TYPE_SHUTDOWNACK  |Yes |Yes |Yes |
2146  * |SCTP_TIMER_TYPE_ASCONF       |Yes |Yes |Yes |
2147  * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No  |
2148  * |SCTP_TIMER_TYPE_AUTOCLOSE    |Yes |Yes |No  |
2149  * |SCTP_TIMER_TYPE_STRRESET     |Yes |Yes |Yes |
2150  * |SCTP_TIMER_TYPE_INPKILL      |Yes |No  |No  |
2151  * |SCTP_TIMER_TYPE_ASOCKILL     |Yes |Yes |No  |
2152  * |SCTP_TIMER_TYPE_ADDR_WQ      |No  |No  |No  |
2153  * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No  |
2154  *
2155  */
2156 
2157 void
2158 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2159     struct sctp_nets *net)
2160 {
2161 	struct sctp_timer *tmr;
2162 	uint32_t to_ticks;
2163 	uint32_t rndval, jitter;
2164 
2165 	KASSERT(stcb == NULL || stcb->sctp_ep == inp,
2166 	    ("sctp_timer_start of type %d: inp = %p, stcb->sctp_ep %p",
2167 	    t_type, stcb, stcb->sctp_ep));
2168 	tmr = NULL;
2169 	to_ticks = 0;
2170 	if (stcb != NULL) {
2171 		SCTP_TCB_LOCK_ASSERT(stcb);
2172 	} else if (inp != NULL) {
2173 		SCTP_INP_WLOCK_ASSERT(inp);
2174 	} else {
2175 		SCTP_WQ_ADDR_LOCK_ASSERT();
2176 	}
2177 	if (stcb != NULL) {
2178 		/*
2179 		 * Don't restart timer on association that's about to be
2180 		 * killed.
2181 		 */
2182 		if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) &&
2183 		    (t_type != SCTP_TIMER_TYPE_ASOCKILL)) {
2184 			SCTPDBG(SCTP_DEBUG_TIMER2,
2185 			    "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n",
2186 			    t_type, inp, stcb, net);
2187 			return;
2188 		}
2189 		/* Don't restart timer on net that's been removed. */
2190 		if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) {
2191 			SCTPDBG(SCTP_DEBUG_TIMER2,
2192 			    "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n",
2193 			    t_type, inp, stcb, net);
2194 			return;
2195 		}
2196 	}
2197 	switch (t_type) {
2198 	case SCTP_TIMER_TYPE_SEND:
2199 		/* Here we use the RTO timer. */
2200 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2201 #ifdef INVARIANTS
2202 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2203 			    t_type, inp, stcb, net);
2204 #else
2205 			return;
2206 #endif
2207 		}
2208 		tmr = &net->rxt_timer;
2209 		if (net->RTO == 0) {
2210 			to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2211 		} else {
2212 			to_ticks = sctp_msecs_to_ticks(net->RTO);
2213 		}
2214 		break;
2215 	case SCTP_TIMER_TYPE_INIT:
2216 		/*
2217 		 * Here we use the INIT timer default usually about 1
2218 		 * second.
2219 		 */
2220 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2221 #ifdef INVARIANTS
2222 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2223 			    t_type, inp, stcb, net);
2224 #else
2225 			return;
2226 #endif
2227 		}
2228 		tmr = &net->rxt_timer;
2229 		if (net->RTO == 0) {
2230 			to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2231 		} else {
2232 			to_ticks = sctp_msecs_to_ticks(net->RTO);
2233 		}
2234 		break;
2235 	case SCTP_TIMER_TYPE_RECV:
2236 		/*
2237 		 * Here we use the Delayed-Ack timer value from the inp,
2238 		 * ususually about 200ms.
2239 		 */
2240 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2241 #ifdef INVARIANTS
2242 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2243 			    t_type, inp, stcb, net);
2244 #else
2245 			return;
2246 #endif
2247 		}
2248 		tmr = &stcb->asoc.dack_timer;
2249 		to_ticks = sctp_msecs_to_ticks(stcb->asoc.delayed_ack);
2250 		break;
2251 	case SCTP_TIMER_TYPE_SHUTDOWN:
2252 		/* Here we use the RTO of the destination. */
2253 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2254 #ifdef INVARIANTS
2255 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2256 			    t_type, inp, stcb, net);
2257 #else
2258 			return;
2259 #endif
2260 		}
2261 		tmr = &net->rxt_timer;
2262 		if (net->RTO == 0) {
2263 			to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2264 		} else {
2265 			to_ticks = sctp_msecs_to_ticks(net->RTO);
2266 		}
2267 		break;
2268 	case SCTP_TIMER_TYPE_HEARTBEAT:
2269 		/*
2270 		 * The net is used here so that we can add in the RTO. Even
2271 		 * though we use a different timer. We also add the HB timer
2272 		 * PLUS a random jitter.
2273 		 */
2274 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2275 #ifdef INVARIANTS
2276 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2277 			    t_type, inp, stcb, net);
2278 #else
2279 			return;
2280 #endif
2281 		}
2282 		if ((net->dest_state & SCTP_ADDR_NOHB) &&
2283 		    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2284 			SCTPDBG(SCTP_DEBUG_TIMER2,
2285 			    "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n",
2286 			    t_type, inp, stcb, net);
2287 			return;
2288 		}
2289 		tmr = &net->hb_timer;
2290 		if (net->RTO == 0) {
2291 			to_ticks = stcb->asoc.initial_rto;
2292 		} else {
2293 			to_ticks = net->RTO;
2294 		}
2295 		rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2296 		jitter = rndval % to_ticks;
2297 		if (jitter >= (to_ticks >> 1)) {
2298 			to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2299 		} else {
2300 			to_ticks = to_ticks - jitter;
2301 		}
2302 		if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2303 		    !(net->dest_state & SCTP_ADDR_PF)) {
2304 			to_ticks += net->heart_beat_delay;
2305 		}
2306 		/*
2307 		 * Now we must convert the to_ticks that are now in ms to
2308 		 * ticks.
2309 		 */
2310 		to_ticks = sctp_msecs_to_ticks(to_ticks);
2311 		break;
2312 	case SCTP_TIMER_TYPE_COOKIE:
2313 		/*
2314 		 * Here we can use the RTO timer from the network since one
2315 		 * RTT was complete. If a retransmission happened then we
2316 		 * will be using the RTO initial value.
2317 		 */
2318 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2319 #ifdef INVARIANTS
2320 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2321 			    t_type, inp, stcb, net);
2322 #else
2323 			return;
2324 #endif
2325 		}
2326 		tmr = &net->rxt_timer;
2327 		if (net->RTO == 0) {
2328 			to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2329 		} else {
2330 			to_ticks = sctp_msecs_to_ticks(net->RTO);
2331 		}
2332 		break;
2333 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2334 		/*
2335 		 * Nothing needed but the endpoint here ususually about 60
2336 		 * minutes.
2337 		 */
2338 		if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2339 #ifdef INVARIANTS
2340 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2341 			    t_type, inp, stcb, net);
2342 #else
2343 			return;
2344 #endif
2345 		}
2346 		tmr = &inp->sctp_ep.signature_change;
2347 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2348 		break;
2349 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2350 		/*
2351 		 * Here we use the value found in the EP for PMTUD,
2352 		 * ususually about 10 minutes.
2353 		 */
2354 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2355 #ifdef INVARIANTS
2356 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2357 			    t_type, inp, stcb, net);
2358 #else
2359 			return;
2360 #endif
2361 		}
2362 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2363 			SCTPDBG(SCTP_DEBUG_TIMER2,
2364 			    "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n",
2365 			    t_type, inp, stcb, net);
2366 			return;
2367 		}
2368 		tmr = &net->pmtu_timer;
2369 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2370 		break;
2371 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2372 		/* Here we use the RTO of the destination. */
2373 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2374 #ifdef INVARIANTS
2375 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2376 			    t_type, inp, stcb, net);
2377 #else
2378 			return;
2379 #endif
2380 		}
2381 		tmr = &net->rxt_timer;
2382 		if (net->RTO == 0) {
2383 			to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2384 		} else {
2385 			to_ticks = sctp_msecs_to_ticks(net->RTO);
2386 		}
2387 		break;
2388 	case SCTP_TIMER_TYPE_ASCONF:
2389 		/*
2390 		 * Here the timer comes from the stcb but its value is from
2391 		 * the net's RTO.
2392 		 */
2393 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2394 #ifdef INVARIANTS
2395 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2396 			    t_type, inp, stcb, net);
2397 #else
2398 			return;
2399 #endif
2400 		}
2401 		tmr = &stcb->asoc.asconf_timer;
2402 		if (net->RTO == 0) {
2403 			to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2404 		} else {
2405 			to_ticks = sctp_msecs_to_ticks(net->RTO);
2406 		}
2407 		break;
2408 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2409 		/*
2410 		 * Here we use the endpoints shutdown guard timer usually
2411 		 * about 3 minutes.
2412 		 */
2413 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2414 #ifdef INVARIANTS
2415 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2416 			    t_type, inp, stcb, net);
2417 #else
2418 			return;
2419 #endif
2420 		}
2421 		tmr = &stcb->asoc.shut_guard_timer;
2422 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2423 			if (stcb->asoc.maxrto < UINT32_MAX / 5) {
2424 				to_ticks = sctp_msecs_to_ticks(5 * stcb->asoc.maxrto);
2425 			} else {
2426 				to_ticks = sctp_msecs_to_ticks(UINT32_MAX);
2427 			}
2428 		} else {
2429 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2430 		}
2431 		break;
2432 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2433 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2434 #ifdef INVARIANTS
2435 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2436 			    t_type, inp, stcb, net);
2437 #else
2438 			return;
2439 #endif
2440 		}
2441 		tmr = &stcb->asoc.autoclose_timer;
2442 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2443 		break;
2444 	case SCTP_TIMER_TYPE_STRRESET:
2445 		/*
2446 		 * Here the timer comes from the stcb but its value is from
2447 		 * the net's RTO.
2448 		 */
2449 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2450 #ifdef INVARIANTS
2451 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2452 			    t_type, inp, stcb, net);
2453 #else
2454 			return;
2455 #endif
2456 		}
2457 		tmr = &stcb->asoc.strreset_timer;
2458 		if (net->RTO == 0) {
2459 			to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2460 		} else {
2461 			to_ticks = sctp_msecs_to_ticks(net->RTO);
2462 		}
2463 		break;
2464 	case SCTP_TIMER_TYPE_INPKILL:
2465 		/*
2466 		 * The inp is setup to die. We re-use the signature_chage
2467 		 * timer since that has stopped and we are in the GONE
2468 		 * state.
2469 		 */
2470 		if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2471 #ifdef INVARIANTS
2472 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2473 			    t_type, inp, stcb, net);
2474 #else
2475 			return;
2476 #endif
2477 		}
2478 		tmr = &inp->sctp_ep.signature_change;
2479 		to_ticks = sctp_msecs_to_ticks(SCTP_INP_KILL_TIMEOUT);
2480 		break;
2481 	case SCTP_TIMER_TYPE_ASOCKILL:
2482 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2483 #ifdef INVARIANTS
2484 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2485 			    t_type, inp, stcb, net);
2486 #else
2487 			return;
2488 #endif
2489 		}
2490 		tmr = &stcb->asoc.strreset_timer;
2491 		to_ticks = sctp_msecs_to_ticks(SCTP_ASOC_KILL_TIMEOUT);
2492 		break;
2493 	case SCTP_TIMER_TYPE_ADDR_WQ:
2494 		if ((inp != NULL) || (stcb != NULL) || (net != NULL)) {
2495 #ifdef INVARIANTS
2496 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2497 			    t_type, inp, stcb, net);
2498 #else
2499 			return;
2500 #endif
2501 		}
2502 		/* Only 1 tick away :-) */
2503 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2504 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
2505 		break;
2506 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2507 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2508 #ifdef INVARIANTS
2509 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2510 			    t_type, inp, stcb, net);
2511 #else
2512 			return;
2513 #endif
2514 		}
2515 		tmr = &stcb->asoc.delete_prim_timer;
2516 		to_ticks = sctp_msecs_to_ticks(stcb->asoc.initial_rto);
2517 		break;
2518 	default:
2519 #ifdef INVARIANTS
2520 		panic("Unknown timer type %d", t_type);
2521 #else
2522 		return;
2523 #endif
2524 	}
2525 	KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type));
2526 	KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type));
2527 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2528 		/*
2529 		 * We do NOT allow you to have it already running. If it is,
2530 		 * we leave the current one up unchanged.
2531 		 */
2532 		SCTPDBG(SCTP_DEBUG_TIMER2,
2533 		    "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n",
2534 		    t_type, inp, stcb, net);
2535 		return;
2536 	}
2537 	/* At this point we can proceed. */
2538 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2539 		stcb->asoc.num_send_timers_up++;
2540 	}
2541 	tmr->stopped_from = 0;
2542 	tmr->type = t_type;
2543 	tmr->ep = (void *)inp;
2544 	tmr->tcb = (void *)stcb;
2545 	if (t_type == SCTP_TIMER_TYPE_STRRESET) {
2546 		tmr->net = NULL;
2547 	} else {
2548 		tmr->net = (void *)net;
2549 	}
2550 	tmr->self = (void *)tmr;
2551 	tmr->vnet = (void *)curvnet;
2552 	tmr->ticks = sctp_get_tick_count();
2553 	if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) {
2554 		SCTPDBG(SCTP_DEBUG_TIMER2,
2555 		    "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n",
2556 		    t_type, to_ticks, inp, stcb, net);
2557 	} else {
2558 		/*
2559 		 * This should not happen, since we checked for pending
2560 		 * above.
2561 		 */
2562 		SCTPDBG(SCTP_DEBUG_TIMER2,
2563 		    "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n",
2564 		    t_type, to_ticks, inp, stcb, net);
2565 	}
2566 	return;
2567 }
2568 
2569 /*-
2570  * The following table shows which parameters must be provided
2571  * when calling sctp_timer_stop(). For parameters not being
2572  * provided, NULL must be used.
2573  *
2574  * |Name                         |inp |stcb|net |
2575  * |-----------------------------|----|----|----|
2576  * |SCTP_TIMER_TYPE_SEND         |Yes |Yes |Yes |
2577  * |SCTP_TIMER_TYPE_INIT         |Yes |Yes |Yes |
2578  * |SCTP_TIMER_TYPE_RECV         |Yes |Yes |No  |
2579  * |SCTP_TIMER_TYPE_SHUTDOWN     |Yes |Yes |Yes |
2580  * |SCTP_TIMER_TYPE_HEARTBEAT    |Yes |Yes |Yes |
2581  * |SCTP_TIMER_TYPE_COOKIE       |Yes |Yes |Yes |
2582  * |SCTP_TIMER_TYPE_NEWCOOKIE    |Yes |No  |No  |
2583  * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes |
2584  * |SCTP_TIMER_TYPE_SHUTDOWNACK  |Yes |Yes |Yes |
2585  * |SCTP_TIMER_TYPE_ASCONF       |Yes |Yes |No  |
2586  * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No  |
2587  * |SCTP_TIMER_TYPE_AUTOCLOSE    |Yes |Yes |No  |
2588  * |SCTP_TIMER_TYPE_STRRESET     |Yes |Yes |No  |
2589  * |SCTP_TIMER_TYPE_INPKILL      |Yes |No  |No  |
2590  * |SCTP_TIMER_TYPE_ASOCKILL     |Yes |Yes |No  |
2591  * |SCTP_TIMER_TYPE_ADDR_WQ      |No  |No  |No  |
2592  * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No  |
2593  *
2594  */
2595 
2596 void
2597 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2598     struct sctp_nets *net, uint32_t from)
2599 {
2600 	struct sctp_timer *tmr;
2601 
2602 	KASSERT(stcb == NULL || stcb->sctp_ep == inp,
2603 	    ("sctp_timer_stop of type %d: inp = %p, stcb->sctp_ep %p",
2604 	    t_type, stcb, stcb->sctp_ep));
2605 	if (stcb != NULL) {
2606 		SCTP_TCB_LOCK_ASSERT(stcb);
2607 	} else if (inp != NULL) {
2608 		SCTP_INP_WLOCK_ASSERT(inp);
2609 	} else {
2610 		SCTP_WQ_ADDR_LOCK_ASSERT();
2611 	}
2612 	tmr = NULL;
2613 	switch (t_type) {
2614 	case SCTP_TIMER_TYPE_SEND:
2615 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2616 #ifdef INVARIANTS
2617 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2618 			    t_type, inp, stcb, net);
2619 #else
2620 			return;
2621 #endif
2622 		}
2623 		tmr = &net->rxt_timer;
2624 		break;
2625 	case SCTP_TIMER_TYPE_INIT:
2626 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2627 #ifdef INVARIANTS
2628 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2629 			    t_type, inp, stcb, net);
2630 #else
2631 			return;
2632 #endif
2633 		}
2634 		tmr = &net->rxt_timer;
2635 		break;
2636 	case SCTP_TIMER_TYPE_RECV:
2637 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2638 #ifdef INVARIANTS
2639 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2640 			    t_type, inp, stcb, net);
2641 #else
2642 			return;
2643 #endif
2644 		}
2645 		tmr = &stcb->asoc.dack_timer;
2646 		break;
2647 	case SCTP_TIMER_TYPE_SHUTDOWN:
2648 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2649 #ifdef INVARIANTS
2650 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2651 			    t_type, inp, stcb, net);
2652 #else
2653 			return;
2654 #endif
2655 		}
2656 		tmr = &net->rxt_timer;
2657 		break;
2658 	case SCTP_TIMER_TYPE_HEARTBEAT:
2659 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2660 #ifdef INVARIANTS
2661 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2662 			    t_type, inp, stcb, net);
2663 #else
2664 			return;
2665 #endif
2666 		}
2667 		tmr = &net->hb_timer;
2668 		break;
2669 	case SCTP_TIMER_TYPE_COOKIE:
2670 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2671 #ifdef INVARIANTS
2672 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2673 			    t_type, inp, stcb, net);
2674 #else
2675 			return;
2676 #endif
2677 		}
2678 		tmr = &net->rxt_timer;
2679 		break;
2680 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2681 		if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2682 #ifdef INVARIANTS
2683 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2684 			    t_type, inp, stcb, net);
2685 #else
2686 			return;
2687 #endif
2688 		}
2689 		tmr = &inp->sctp_ep.signature_change;
2690 		break;
2691 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2692 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2693 #ifdef INVARIANTS
2694 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2695 			    t_type, inp, stcb, net);
2696 #else
2697 			return;
2698 #endif
2699 		}
2700 		tmr = &net->pmtu_timer;
2701 		break;
2702 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2703 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2704 #ifdef INVARIANTS
2705 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2706 			    t_type, inp, stcb, net);
2707 #else
2708 			return;
2709 #endif
2710 		}
2711 		tmr = &net->rxt_timer;
2712 		break;
2713 	case SCTP_TIMER_TYPE_ASCONF:
2714 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2715 #ifdef INVARIANTS
2716 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2717 			    t_type, inp, stcb, net);
2718 #else
2719 			return;
2720 #endif
2721 		}
2722 		tmr = &stcb->asoc.asconf_timer;
2723 		break;
2724 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2725 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2726 #ifdef INVARIANTS
2727 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2728 			    t_type, inp, stcb, net);
2729 #else
2730 			return;
2731 #endif
2732 		}
2733 		tmr = &stcb->asoc.shut_guard_timer;
2734 		break;
2735 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2736 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2737 #ifdef INVARIANTS
2738 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2739 			    t_type, inp, stcb, net);
2740 #else
2741 			return;
2742 #endif
2743 		}
2744 		tmr = &stcb->asoc.autoclose_timer;
2745 		break;
2746 	case SCTP_TIMER_TYPE_STRRESET:
2747 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2748 #ifdef INVARIANTS
2749 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2750 			    t_type, inp, stcb, net);
2751 #else
2752 			return;
2753 #endif
2754 		}
2755 		tmr = &stcb->asoc.strreset_timer;
2756 		break;
2757 	case SCTP_TIMER_TYPE_INPKILL:
2758 		/*
2759 		 * The inp is setup to die. We re-use the signature_chage
2760 		 * timer since that has stopped and we are in the GONE
2761 		 * state.
2762 		 */
2763 		if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2764 #ifdef INVARIANTS
2765 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2766 			    t_type, inp, stcb, net);
2767 #else
2768 			return;
2769 #endif
2770 		}
2771 		tmr = &inp->sctp_ep.signature_change;
2772 		break;
2773 	case SCTP_TIMER_TYPE_ASOCKILL:
2774 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2775 #ifdef INVARIANTS
2776 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2777 			    t_type, inp, stcb, net);
2778 #else
2779 			return;
2780 #endif
2781 		}
2782 		tmr = &stcb->asoc.strreset_timer;
2783 		break;
2784 	case SCTP_TIMER_TYPE_ADDR_WQ:
2785 		if ((inp != NULL) || (stcb != NULL) || (net != NULL)) {
2786 #ifdef INVARIANTS
2787 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2788 			    t_type, inp, stcb, net);
2789 #else
2790 			return;
2791 #endif
2792 		}
2793 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2794 		break;
2795 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2796 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2797 #ifdef INVARIANTS
2798 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2799 			    t_type, inp, stcb, net);
2800 #else
2801 			return;
2802 #endif
2803 		}
2804 		tmr = &stcb->asoc.delete_prim_timer;
2805 		break;
2806 	default:
2807 #ifdef INVARIANTS
2808 		panic("Unknown timer type %d", t_type);
2809 #else
2810 		return;
2811 #endif
2812 	}
2813 	KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type));
2814 	if ((tmr->type != SCTP_TIMER_TYPE_NONE) &&
2815 	    (tmr->type != t_type)) {
2816 		/*
2817 		 * Ok we have a timer that is under joint use. Cookie timer
2818 		 * per chance with the SEND timer. We therefore are NOT
2819 		 * running the timer that the caller wants stopped.  So just
2820 		 * return.
2821 		 */
2822 		SCTPDBG(SCTP_DEBUG_TIMER2,
2823 		    "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n",
2824 		    t_type, inp, stcb, net);
2825 		return;
2826 	}
2827 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2828 		stcb->asoc.num_send_timers_up--;
2829 		if (stcb->asoc.num_send_timers_up < 0) {
2830 			stcb->asoc.num_send_timers_up = 0;
2831 		}
2832 	}
2833 	tmr->self = NULL;
2834 	tmr->stopped_from = from;
2835 	if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) {
2836 		KASSERT(tmr->ep == inp,
2837 		    ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p",
2838 		    t_type, inp, tmr->ep));
2839 		KASSERT(tmr->tcb == stcb,
2840 		    ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p",
2841 		    t_type, stcb, tmr->tcb));
2842 		KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) ||
2843 		    ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)),
2844 		    ("sctp_timer_stop of type %d: net = %p, tmr->net = %p",
2845 		    t_type, net, tmr->net));
2846 		SCTPDBG(SCTP_DEBUG_TIMER2,
2847 		    "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n",
2848 		    t_type, inp, stcb, net);
2849 		tmr->ep = NULL;
2850 		tmr->tcb = NULL;
2851 		tmr->net = NULL;
2852 	} else {
2853 		SCTPDBG(SCTP_DEBUG_TIMER2,
2854 		    "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n",
2855 		    t_type, inp, stcb, net);
2856 	}
2857 	return;
2858 }
2859 
2860 uint32_t
2861 sctp_calculate_len(struct mbuf *m)
2862 {
2863 	uint32_t tlen = 0;
2864 	struct mbuf *at;
2865 
2866 	at = m;
2867 	while (at) {
2868 		tlen += SCTP_BUF_LEN(at);
2869 		at = SCTP_BUF_NEXT(at);
2870 	}
2871 	return (tlen);
2872 }
2873 
2874 void
2875 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2876     struct sctp_association *asoc, uint32_t mtu)
2877 {
2878 	/*
2879 	 * Reset the P-MTU size on this association, this involves changing
2880 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2881 	 * allow the DF flag to be cleared.
2882 	 */
2883 	struct sctp_tmit_chunk *chk;
2884 	unsigned int eff_mtu, ovh;
2885 
2886 	asoc->smallest_mtu = mtu;
2887 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2888 		ovh = SCTP_MIN_OVERHEAD;
2889 	} else {
2890 		ovh = SCTP_MIN_V4_OVERHEAD;
2891 	}
2892 	eff_mtu = mtu - ovh;
2893 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2894 		if (chk->send_size > eff_mtu) {
2895 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2896 		}
2897 	}
2898 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2899 		if (chk->send_size > eff_mtu) {
2900 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2901 		}
2902 	}
2903 }
2904 
2905 
2906 /*
2907  * Given an association and starting time of the current RTT period, update
2908  * RTO in number of msecs. net should point to the current network.
2909  * Return 1, if an RTO update was performed, return 0 if no update was
2910  * performed due to invalid starting point.
2911  */
2912 
2913 int
2914 sctp_calculate_rto(struct sctp_tcb *stcb,
2915     struct sctp_association *asoc,
2916     struct sctp_nets *net,
2917     struct timeval *old,
2918     int rtt_from_sack)
2919 {
2920 	struct timeval now;
2921 	uint64_t rtt_us;	/* RTT in us */
2922 	int32_t rtt;		/* RTT in ms */
2923 	uint32_t new_rto;
2924 	int first_measure = 0;
2925 
2926 	/************************/
2927 	/* 1. calculate new RTT */
2928 	/************************/
2929 	/* get the current time */
2930 	if (stcb->asoc.use_precise_time) {
2931 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2932 	} else {
2933 		(void)SCTP_GETTIME_TIMEVAL(&now);
2934 	}
2935 	if ((old->tv_sec > now.tv_sec) ||
2936 	    ((old->tv_sec == now.tv_sec) && (old->tv_sec > now.tv_sec))) {
2937 		/* The starting point is in the future. */
2938 		return (0);
2939 	}
2940 	timevalsub(&now, old);
2941 	rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec;
2942 	if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) {
2943 		/* The RTT is larger than a sane value. */
2944 		return (0);
2945 	}
2946 	/* store the current RTT in us */
2947 	net->rtt = rtt_us;
2948 	/* compute rtt in ms */
2949 	rtt = (int32_t)(net->rtt / 1000);
2950 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2951 		/*
2952 		 * Tell the CC module that a new update has just occurred
2953 		 * from a sack
2954 		 */
2955 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2956 	}
2957 	/*
2958 	 * Do we need to determine the lan? We do this only on sacks i.e.
2959 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2960 	 */
2961 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2962 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2963 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2964 			net->lan_type = SCTP_LAN_INTERNET;
2965 		} else {
2966 			net->lan_type = SCTP_LAN_LOCAL;
2967 		}
2968 	}
2969 
2970 	/***************************/
2971 	/* 2. update RTTVAR & SRTT */
2972 	/***************************/
2973 	/*-
2974 	 * Compute the scaled average lastsa and the
2975 	 * scaled variance lastsv as described in van Jacobson
2976 	 * Paper "Congestion Avoidance and Control", Annex A.
2977 	 *
2978 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2979 	 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar
2980 	 */
2981 	if (net->RTO_measured) {
2982 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2983 		net->lastsa += rtt;
2984 		if (rtt < 0) {
2985 			rtt = -rtt;
2986 		}
2987 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2988 		net->lastsv += rtt;
2989 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2990 			rto_logging(net, SCTP_LOG_RTTVAR);
2991 		}
2992 	} else {
2993 		/* First RTO measurment */
2994 		net->RTO_measured = 1;
2995 		first_measure = 1;
2996 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2997 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2998 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2999 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
3000 		}
3001 	}
3002 	if (net->lastsv == 0) {
3003 		net->lastsv = SCTP_CLOCK_GRANULARITY;
3004 	}
3005 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
3006 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
3007 	    (stcb->asoc.sat_network_lockout == 0)) {
3008 		stcb->asoc.sat_network = 1;
3009 	} else if ((!first_measure) && stcb->asoc.sat_network) {
3010 		stcb->asoc.sat_network = 0;
3011 		stcb->asoc.sat_network_lockout = 1;
3012 	}
3013 	/* bound it, per C6/C7 in Section 5.3.1 */
3014 	if (new_rto < stcb->asoc.minrto) {
3015 		new_rto = stcb->asoc.minrto;
3016 	}
3017 	if (new_rto > stcb->asoc.maxrto) {
3018 		new_rto = stcb->asoc.maxrto;
3019 	}
3020 	net->RTO = new_rto;
3021 	return (1);
3022 }
3023 
3024 /*
3025  * return a pointer to a contiguous piece of data from the given mbuf chain
3026  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
3027  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
3028  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
3029  */
3030 caddr_t
3031 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
3032 {
3033 	uint32_t count;
3034 	uint8_t *ptr;
3035 
3036 	ptr = in_ptr;
3037 	if ((off < 0) || (len <= 0))
3038 		return (NULL);
3039 
3040 	/* find the desired start location */
3041 	while ((m != NULL) && (off > 0)) {
3042 		if (off < SCTP_BUF_LEN(m))
3043 			break;
3044 		off -= SCTP_BUF_LEN(m);
3045 		m = SCTP_BUF_NEXT(m);
3046 	}
3047 	if (m == NULL)
3048 		return (NULL);
3049 
3050 	/* is the current mbuf large enough (eg. contiguous)? */
3051 	if ((SCTP_BUF_LEN(m) - off) >= len) {
3052 		return (mtod(m, caddr_t)+off);
3053 	} else {
3054 		/* else, it spans more than one mbuf, so save a temp copy... */
3055 		while ((m != NULL) && (len > 0)) {
3056 			count = min(SCTP_BUF_LEN(m) - off, len);
3057 			memcpy(ptr, mtod(m, caddr_t)+off, count);
3058 			len -= count;
3059 			ptr += count;
3060 			off = 0;
3061 			m = SCTP_BUF_NEXT(m);
3062 		}
3063 		if ((m == NULL) && (len > 0))
3064 			return (NULL);
3065 		else
3066 			return ((caddr_t)in_ptr);
3067 	}
3068 }
3069 
3070 
3071 
3072 struct sctp_paramhdr *
3073 sctp_get_next_param(struct mbuf *m,
3074     int offset,
3075     struct sctp_paramhdr *pull,
3076     int pull_limit)
3077 {
3078 	/* This just provides a typed signature to Peter's Pull routine */
3079 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
3080 	    (uint8_t *)pull));
3081 }
3082 
3083 
3084 struct mbuf *
3085 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
3086 {
3087 	struct mbuf *m_last;
3088 	caddr_t dp;
3089 
3090 	if (padlen > 3) {
3091 		return (NULL);
3092 	}
3093 	if (padlen <= M_TRAILINGSPACE(m)) {
3094 		/*
3095 		 * The easy way. We hope the majority of the time we hit
3096 		 * here :)
3097 		 */
3098 		m_last = m;
3099 	} else {
3100 		/* Hard way we must grow the mbuf chain */
3101 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
3102 		if (m_last == NULL) {
3103 			return (NULL);
3104 		}
3105 		SCTP_BUF_LEN(m_last) = 0;
3106 		SCTP_BUF_NEXT(m_last) = NULL;
3107 		SCTP_BUF_NEXT(m) = m_last;
3108 	}
3109 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
3110 	SCTP_BUF_LEN(m_last) += padlen;
3111 	memset(dp, 0, padlen);
3112 	return (m_last);
3113 }
3114 
3115 struct mbuf *
3116 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
3117 {
3118 	/* find the last mbuf in chain and pad it */
3119 	struct mbuf *m_at;
3120 
3121 	if (last_mbuf != NULL) {
3122 		return (sctp_add_pad_tombuf(last_mbuf, padval));
3123 	} else {
3124 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3125 			if (SCTP_BUF_NEXT(m_at) == NULL) {
3126 				return (sctp_add_pad_tombuf(m_at, padval));
3127 			}
3128 		}
3129 	}
3130 	return (NULL);
3131 }
3132 
3133 static void
3134 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
3135     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
3136 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3137     SCTP_UNUSED
3138 #endif
3139 )
3140 {
3141 	struct mbuf *m_notify;
3142 	struct sctp_assoc_change *sac;
3143 	struct sctp_queued_to_read *control;
3144 	unsigned int notif_len;
3145 	uint16_t abort_len;
3146 	unsigned int i;
3147 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3148 	struct socket *so;
3149 #endif
3150 
3151 	if (stcb == NULL) {
3152 		return;
3153 	}
3154 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
3155 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
3156 		if (abort != NULL) {
3157 			abort_len = ntohs(abort->ch.chunk_length);
3158 			/*
3159 			 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
3160 			 * contiguous.
3161 			 */
3162 			if (abort_len > SCTP_CHUNK_BUFFER_SIZE) {
3163 				abort_len = SCTP_CHUNK_BUFFER_SIZE;
3164 			}
3165 		} else {
3166 			abort_len = 0;
3167 		}
3168 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
3169 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
3170 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
3171 			notif_len += abort_len;
3172 		}
3173 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3174 		if (m_notify == NULL) {
3175 			/* Retry with smaller value. */
3176 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
3177 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3178 			if (m_notify == NULL) {
3179 				goto set_error;
3180 			}
3181 		}
3182 		SCTP_BUF_NEXT(m_notify) = NULL;
3183 		sac = mtod(m_notify, struct sctp_assoc_change *);
3184 		memset(sac, 0, notif_len);
3185 		sac->sac_type = SCTP_ASSOC_CHANGE;
3186 		sac->sac_flags = 0;
3187 		sac->sac_length = sizeof(struct sctp_assoc_change);
3188 		sac->sac_state = state;
3189 		sac->sac_error = error;
3190 		/* XXX verify these stream counts */
3191 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
3192 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
3193 		sac->sac_assoc_id = sctp_get_associd(stcb);
3194 		if (notif_len > sizeof(struct sctp_assoc_change)) {
3195 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
3196 				i = 0;
3197 				if (stcb->asoc.prsctp_supported == 1) {
3198 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
3199 				}
3200 				if (stcb->asoc.auth_supported == 1) {
3201 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
3202 				}
3203 				if (stcb->asoc.asconf_supported == 1) {
3204 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
3205 				}
3206 				if (stcb->asoc.idata_supported == 1) {
3207 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
3208 				}
3209 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
3210 				if (stcb->asoc.reconfig_supported == 1) {
3211 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
3212 				}
3213 				sac->sac_length += i;
3214 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
3215 				memcpy(sac->sac_info, abort, abort_len);
3216 				sac->sac_length += abort_len;
3217 			}
3218 		}
3219 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
3220 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3221 		    0, 0, stcb->asoc.context, 0, 0, 0,
3222 		    m_notify);
3223 		if (control != NULL) {
3224 			control->length = SCTP_BUF_LEN(m_notify);
3225 			control->spec_flags = M_NOTIFICATION;
3226 			/* not that we need this */
3227 			control->tail_mbuf = m_notify;
3228 			sctp_add_to_readq(stcb->sctp_ep, stcb,
3229 			    control,
3230 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
3231 			    so_locked);
3232 		} else {
3233 			sctp_m_freem(m_notify);
3234 		}
3235 	}
3236 	/*
3237 	 * For 1-to-1 style sockets, we send up and error when an ABORT
3238 	 * comes in.
3239 	 */
3240 set_error:
3241 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3242 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
3243 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
3244 		SOCK_LOCK(stcb->sctp_socket);
3245 		if (from_peer) {
3246 			if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
3247 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
3248 				stcb->sctp_socket->so_error = ECONNREFUSED;
3249 			} else {
3250 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
3251 				stcb->sctp_socket->so_error = ECONNRESET;
3252 			}
3253 		} else {
3254 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3255 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3256 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
3257 				stcb->sctp_socket->so_error = ETIMEDOUT;
3258 			} else {
3259 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
3260 				stcb->sctp_socket->so_error = ECONNABORTED;
3261 			}
3262 		}
3263 		SOCK_UNLOCK(stcb->sctp_socket);
3264 	}
3265 	/* Wake ANY sleepers */
3266 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3267 	so = SCTP_INP_SO(stcb->sctp_ep);
3268 	if (!so_locked) {
3269 		atomic_add_int(&stcb->asoc.refcnt, 1);
3270 		SCTP_TCB_UNLOCK(stcb);
3271 		SCTP_SOCKET_LOCK(so, 1);
3272 		SCTP_TCB_LOCK(stcb);
3273 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3274 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3275 			SCTP_SOCKET_UNLOCK(so, 1);
3276 			return;
3277 		}
3278 	}
3279 #endif
3280 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3281 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
3282 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
3283 		socantrcvmore(stcb->sctp_socket);
3284 	}
3285 	sorwakeup(stcb->sctp_socket);
3286 	sowwakeup(stcb->sctp_socket);
3287 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3288 	if (!so_locked) {
3289 		SCTP_SOCKET_UNLOCK(so, 1);
3290 	}
3291 #endif
3292 }
3293 
3294 static void
3295 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
3296     struct sockaddr *sa, uint32_t error, int so_locked
3297 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3298     SCTP_UNUSED
3299 #endif
3300 )
3301 {
3302 	struct mbuf *m_notify;
3303 	struct sctp_paddr_change *spc;
3304 	struct sctp_queued_to_read *control;
3305 
3306 	if ((stcb == NULL) ||
3307 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
3308 		/* event not enabled */
3309 		return;
3310 	}
3311 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
3312 	if (m_notify == NULL)
3313 		return;
3314 	SCTP_BUF_LEN(m_notify) = 0;
3315 	spc = mtod(m_notify, struct sctp_paddr_change *);
3316 	memset(spc, 0, sizeof(struct sctp_paddr_change));
3317 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
3318 	spc->spc_flags = 0;
3319 	spc->spc_length = sizeof(struct sctp_paddr_change);
3320 	switch (sa->sa_family) {
3321 #ifdef INET
3322 	case AF_INET:
3323 #ifdef INET6
3324 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
3325 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
3326 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
3327 		} else {
3328 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3329 		}
3330 #else
3331 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3332 #endif
3333 		break;
3334 #endif
3335 #ifdef INET6
3336 	case AF_INET6:
3337 		{
3338 			struct sockaddr_in6 *sin6;
3339 
3340 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
3341 
3342 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
3343 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
3344 				if (sin6->sin6_scope_id == 0) {
3345 					/* recover scope_id for user */
3346 					(void)sa6_recoverscope(sin6);
3347 				} else {
3348 					/* clear embedded scope_id for user */
3349 					in6_clearscope(&sin6->sin6_addr);
3350 				}
3351 			}
3352 			break;
3353 		}
3354 #endif
3355 	default:
3356 		/* TSNH */
3357 		break;
3358 	}
3359 	spc->spc_state = state;
3360 	spc->spc_error = error;
3361 	spc->spc_assoc_id = sctp_get_associd(stcb);
3362 
3363 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
3364 	SCTP_BUF_NEXT(m_notify) = NULL;
3365 
3366 	/* append to socket */
3367 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3368 	    0, 0, stcb->asoc.context, 0, 0, 0,
3369 	    m_notify);
3370 	if (control == NULL) {
3371 		/* no memory */
3372 		sctp_m_freem(m_notify);
3373 		return;
3374 	}
3375 	control->length = SCTP_BUF_LEN(m_notify);
3376 	control->spec_flags = M_NOTIFICATION;
3377 	/* not that we need this */
3378 	control->tail_mbuf = m_notify;
3379 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3380 	    control,
3381 	    &stcb->sctp_socket->so_rcv, 1,
3382 	    SCTP_READ_LOCK_NOT_HELD,
3383 	    so_locked);
3384 }
3385 
3386 
3387 static void
3388 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
3389     struct sctp_tmit_chunk *chk, int so_locked
3390 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3391     SCTP_UNUSED
3392 #endif
3393 )
3394 {
3395 	struct mbuf *m_notify;
3396 	struct sctp_send_failed *ssf;
3397 	struct sctp_send_failed_event *ssfe;
3398 	struct sctp_queued_to_read *control;
3399 	struct sctp_chunkhdr *chkhdr;
3400 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
3401 
3402 	if ((stcb == NULL) ||
3403 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3404 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3405 		/* event not enabled */
3406 		return;
3407 	}
3408 
3409 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3410 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3411 	} else {
3412 		notifhdr_len = sizeof(struct sctp_send_failed);
3413 	}
3414 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3415 	if (m_notify == NULL)
3416 		/* no space left */
3417 		return;
3418 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3419 	if (stcb->asoc.idata_supported) {
3420 		chkhdr_len = sizeof(struct sctp_idata_chunk);
3421 	} else {
3422 		chkhdr_len = sizeof(struct sctp_data_chunk);
3423 	}
3424 	/* Use some defaults in case we can't access the chunk header */
3425 	if (chk->send_size >= chkhdr_len) {
3426 		payload_len = chk->send_size - chkhdr_len;
3427 	} else {
3428 		payload_len = 0;
3429 	}
3430 	padding_len = 0;
3431 	if (chk->data != NULL) {
3432 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
3433 		if (chkhdr != NULL) {
3434 			chk_len = ntohs(chkhdr->chunk_length);
3435 			if ((chk_len >= chkhdr_len) &&
3436 			    (chk->send_size >= chk_len) &&
3437 			    (chk->send_size - chk_len < 4)) {
3438 				padding_len = chk->send_size - chk_len;
3439 				payload_len = chk->send_size - chkhdr_len - padding_len;
3440 			}
3441 		}
3442 	}
3443 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3444 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3445 		memset(ssfe, 0, notifhdr_len);
3446 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3447 		if (sent) {
3448 			ssfe->ssfe_flags = SCTP_DATA_SENT;
3449 		} else {
3450 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3451 		}
3452 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
3453 		ssfe->ssfe_error = error;
3454 		/* not exactly what the user sent in, but should be close :) */
3455 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
3456 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
3457 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
3458 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
3459 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3460 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3461 	} else {
3462 		ssf = mtod(m_notify, struct sctp_send_failed *);
3463 		memset(ssf, 0, notifhdr_len);
3464 		ssf->ssf_type = SCTP_SEND_FAILED;
3465 		if (sent) {
3466 			ssf->ssf_flags = SCTP_DATA_SENT;
3467 		} else {
3468 			ssf->ssf_flags = SCTP_DATA_UNSENT;
3469 		}
3470 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3471 		ssf->ssf_error = error;
3472 		/* not exactly what the user sent in, but should be close :) */
3473 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3474 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3475 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3476 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3477 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3478 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3479 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3480 	}
3481 	if (chk->data != NULL) {
3482 		/* Trim off the sctp chunk header (it should be there) */
3483 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3484 			m_adj(chk->data, chkhdr_len);
3485 			m_adj(chk->data, -padding_len);
3486 			sctp_mbuf_crush(chk->data);
3487 			chk->send_size -= (chkhdr_len + padding_len);
3488 		}
3489 	}
3490 	SCTP_BUF_NEXT(m_notify) = chk->data;
3491 	/* Steal off the mbuf */
3492 	chk->data = NULL;
3493 	/*
3494 	 * For this case, we check the actual socket buffer, since the assoc
3495 	 * is going away we don't want to overfill the socket buffer for a
3496 	 * non-reader
3497 	 */
3498 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3499 		sctp_m_freem(m_notify);
3500 		return;
3501 	}
3502 	/* append to socket */
3503 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3504 	    0, 0, stcb->asoc.context, 0, 0, 0,
3505 	    m_notify);
3506 	if (control == NULL) {
3507 		/* no memory */
3508 		sctp_m_freem(m_notify);
3509 		return;
3510 	}
3511 	control->length = SCTP_BUF_LEN(m_notify);
3512 	control->spec_flags = M_NOTIFICATION;
3513 	/* not that we need this */
3514 	control->tail_mbuf = m_notify;
3515 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3516 	    control,
3517 	    &stcb->sctp_socket->so_rcv, 1,
3518 	    SCTP_READ_LOCK_NOT_HELD,
3519 	    so_locked);
3520 }
3521 
3522 
3523 static void
3524 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3525     struct sctp_stream_queue_pending *sp, int so_locked
3526 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3527     SCTP_UNUSED
3528 #endif
3529 )
3530 {
3531 	struct mbuf *m_notify;
3532 	struct sctp_send_failed *ssf;
3533 	struct sctp_send_failed_event *ssfe;
3534 	struct sctp_queued_to_read *control;
3535 	int notifhdr_len;
3536 
3537 	if ((stcb == NULL) ||
3538 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3539 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3540 		/* event not enabled */
3541 		return;
3542 	}
3543 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3544 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3545 	} else {
3546 		notifhdr_len = sizeof(struct sctp_send_failed);
3547 	}
3548 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3549 	if (m_notify == NULL) {
3550 		/* no space left */
3551 		return;
3552 	}
3553 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3554 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3555 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3556 		memset(ssfe, 0, notifhdr_len);
3557 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3558 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3559 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3560 		ssfe->ssfe_error = error;
3561 		/* not exactly what the user sent in, but should be close :) */
3562 		ssfe->ssfe_info.snd_sid = sp->sid;
3563 		if (sp->some_taken) {
3564 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3565 		} else {
3566 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3567 		}
3568 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3569 		ssfe->ssfe_info.snd_context = sp->context;
3570 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3571 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3572 	} else {
3573 		ssf = mtod(m_notify, struct sctp_send_failed *);
3574 		memset(ssf, 0, notifhdr_len);
3575 		ssf->ssf_type = SCTP_SEND_FAILED;
3576 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3577 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3578 		ssf->ssf_error = error;
3579 		/* not exactly what the user sent in, but should be close :) */
3580 		ssf->ssf_info.sinfo_stream = sp->sid;
3581 		ssf->ssf_info.sinfo_ssn = 0;
3582 		if (sp->some_taken) {
3583 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3584 		} else {
3585 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3586 		}
3587 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3588 		ssf->ssf_info.sinfo_context = sp->context;
3589 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3590 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3591 	}
3592 	SCTP_BUF_NEXT(m_notify) = sp->data;
3593 
3594 	/* Steal off the mbuf */
3595 	sp->data = NULL;
3596 	/*
3597 	 * For this case, we check the actual socket buffer, since the assoc
3598 	 * is going away we don't want to overfill the socket buffer for a
3599 	 * non-reader
3600 	 */
3601 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3602 		sctp_m_freem(m_notify);
3603 		return;
3604 	}
3605 	/* append to socket */
3606 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3607 	    0, 0, stcb->asoc.context, 0, 0, 0,
3608 	    m_notify);
3609 	if (control == NULL) {
3610 		/* no memory */
3611 		sctp_m_freem(m_notify);
3612 		return;
3613 	}
3614 	control->length = SCTP_BUF_LEN(m_notify);
3615 	control->spec_flags = M_NOTIFICATION;
3616 	/* not that we need this */
3617 	control->tail_mbuf = m_notify;
3618 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3619 	    control,
3620 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3621 }
3622 
3623 
3624 
3625 static void
3626 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3627 {
3628 	struct mbuf *m_notify;
3629 	struct sctp_adaptation_event *sai;
3630 	struct sctp_queued_to_read *control;
3631 
3632 	if ((stcb == NULL) ||
3633 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3634 		/* event not enabled */
3635 		return;
3636 	}
3637 
3638 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3639 	if (m_notify == NULL)
3640 		/* no space left */
3641 		return;
3642 	SCTP_BUF_LEN(m_notify) = 0;
3643 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3644 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3645 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3646 	sai->sai_flags = 0;
3647 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3648 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3649 	sai->sai_assoc_id = sctp_get_associd(stcb);
3650 
3651 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3652 	SCTP_BUF_NEXT(m_notify) = NULL;
3653 
3654 	/* append to socket */
3655 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3656 	    0, 0, stcb->asoc.context, 0, 0, 0,
3657 	    m_notify);
3658 	if (control == NULL) {
3659 		/* no memory */
3660 		sctp_m_freem(m_notify);
3661 		return;
3662 	}
3663 	control->length = SCTP_BUF_LEN(m_notify);
3664 	control->spec_flags = M_NOTIFICATION;
3665 	/* not that we need this */
3666 	control->tail_mbuf = m_notify;
3667 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3668 	    control,
3669 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3670 }
3671 
3672 /* This always must be called with the read-queue LOCKED in the INP */
3673 static void
3674 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3675     uint32_t val, int so_locked
3676 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3677     SCTP_UNUSED
3678 #endif
3679 )
3680 {
3681 	struct mbuf *m_notify;
3682 	struct sctp_pdapi_event *pdapi;
3683 	struct sctp_queued_to_read *control;
3684 	struct sockbuf *sb;
3685 
3686 	if ((stcb == NULL) ||
3687 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3688 		/* event not enabled */
3689 		return;
3690 	}
3691 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3692 		return;
3693 	}
3694 
3695 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3696 	if (m_notify == NULL)
3697 		/* no space left */
3698 		return;
3699 	SCTP_BUF_LEN(m_notify) = 0;
3700 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3701 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3702 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3703 	pdapi->pdapi_flags = 0;
3704 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3705 	pdapi->pdapi_indication = error;
3706 	pdapi->pdapi_stream = (val >> 16);
3707 	pdapi->pdapi_seq = (val & 0x0000ffff);
3708 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3709 
3710 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3711 	SCTP_BUF_NEXT(m_notify) = NULL;
3712 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3713 	    0, 0, stcb->asoc.context, 0, 0, 0,
3714 	    m_notify);
3715 	if (control == NULL) {
3716 		/* no memory */
3717 		sctp_m_freem(m_notify);
3718 		return;
3719 	}
3720 	control->length = SCTP_BUF_LEN(m_notify);
3721 	control->spec_flags = M_NOTIFICATION;
3722 	/* not that we need this */
3723 	control->tail_mbuf = m_notify;
3724 	sb = &stcb->sctp_socket->so_rcv;
3725 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3726 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3727 	}
3728 	sctp_sballoc(stcb, sb, m_notify);
3729 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3730 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3731 	}
3732 	control->end_added = 1;
3733 	if (stcb->asoc.control_pdapi)
3734 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3735 	else {
3736 		/* we really should not see this case */
3737 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3738 	}
3739 	if (stcb->sctp_ep && stcb->sctp_socket) {
3740 		/* This should always be the case */
3741 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3742 		struct socket *so;
3743 
3744 		so = SCTP_INP_SO(stcb->sctp_ep);
3745 		if (!so_locked) {
3746 			atomic_add_int(&stcb->asoc.refcnt, 1);
3747 			SCTP_TCB_UNLOCK(stcb);
3748 			SCTP_SOCKET_LOCK(so, 1);
3749 			SCTP_TCB_LOCK(stcb);
3750 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3751 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3752 				SCTP_SOCKET_UNLOCK(so, 1);
3753 				return;
3754 			}
3755 		}
3756 #endif
3757 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3758 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3759 		if (!so_locked) {
3760 			SCTP_SOCKET_UNLOCK(so, 1);
3761 		}
3762 #endif
3763 	}
3764 }
3765 
3766 static void
3767 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3768 {
3769 	struct mbuf *m_notify;
3770 	struct sctp_shutdown_event *sse;
3771 	struct sctp_queued_to_read *control;
3772 
3773 	/*
3774 	 * For TCP model AND UDP connected sockets we will send an error up
3775 	 * when an SHUTDOWN completes
3776 	 */
3777 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3778 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3779 		/* mark socket closed for read/write and wakeup! */
3780 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3781 		struct socket *so;
3782 
3783 		so = SCTP_INP_SO(stcb->sctp_ep);
3784 		atomic_add_int(&stcb->asoc.refcnt, 1);
3785 		SCTP_TCB_UNLOCK(stcb);
3786 		SCTP_SOCKET_LOCK(so, 1);
3787 		SCTP_TCB_LOCK(stcb);
3788 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3789 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3790 			SCTP_SOCKET_UNLOCK(so, 1);
3791 			return;
3792 		}
3793 #endif
3794 		socantsendmore(stcb->sctp_socket);
3795 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3796 		SCTP_SOCKET_UNLOCK(so, 1);
3797 #endif
3798 	}
3799 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3800 		/* event not enabled */
3801 		return;
3802 	}
3803 
3804 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3805 	if (m_notify == NULL)
3806 		/* no space left */
3807 		return;
3808 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3809 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3810 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3811 	sse->sse_flags = 0;
3812 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3813 	sse->sse_assoc_id = sctp_get_associd(stcb);
3814 
3815 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3816 	SCTP_BUF_NEXT(m_notify) = NULL;
3817 
3818 	/* append to socket */
3819 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3820 	    0, 0, stcb->asoc.context, 0, 0, 0,
3821 	    m_notify);
3822 	if (control == NULL) {
3823 		/* no memory */
3824 		sctp_m_freem(m_notify);
3825 		return;
3826 	}
3827 	control->length = SCTP_BUF_LEN(m_notify);
3828 	control->spec_flags = M_NOTIFICATION;
3829 	/* not that we need this */
3830 	control->tail_mbuf = m_notify;
3831 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3832 	    control,
3833 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3834 }
3835 
3836 static void
3837 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3838     int so_locked
3839 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3840     SCTP_UNUSED
3841 #endif
3842 )
3843 {
3844 	struct mbuf *m_notify;
3845 	struct sctp_sender_dry_event *event;
3846 	struct sctp_queued_to_read *control;
3847 
3848 	if ((stcb == NULL) ||
3849 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3850 		/* event not enabled */
3851 		return;
3852 	}
3853 
3854 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3855 	if (m_notify == NULL) {
3856 		/* no space left */
3857 		return;
3858 	}
3859 	SCTP_BUF_LEN(m_notify) = 0;
3860 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3861 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3862 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3863 	event->sender_dry_flags = 0;
3864 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3865 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3866 
3867 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3868 	SCTP_BUF_NEXT(m_notify) = NULL;
3869 
3870 	/* append to socket */
3871 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3872 	    0, 0, stcb->asoc.context, 0, 0, 0,
3873 	    m_notify);
3874 	if (control == NULL) {
3875 		/* no memory */
3876 		sctp_m_freem(m_notify);
3877 		return;
3878 	}
3879 	control->length = SCTP_BUF_LEN(m_notify);
3880 	control->spec_flags = M_NOTIFICATION;
3881 	/* not that we need this */
3882 	control->tail_mbuf = m_notify;
3883 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3884 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3885 }
3886 
3887 
3888 void
3889 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3890 {
3891 	struct mbuf *m_notify;
3892 	struct sctp_queued_to_read *control;
3893 	struct sctp_stream_change_event *stradd;
3894 
3895 	if ((stcb == NULL) ||
3896 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3897 		/* event not enabled */
3898 		return;
3899 	}
3900 	if ((stcb->asoc.peer_req_out) && flag) {
3901 		/* Peer made the request, don't tell the local user */
3902 		stcb->asoc.peer_req_out = 0;
3903 		return;
3904 	}
3905 	stcb->asoc.peer_req_out = 0;
3906 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3907 	if (m_notify == NULL)
3908 		/* no space left */
3909 		return;
3910 	SCTP_BUF_LEN(m_notify) = 0;
3911 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3912 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3913 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3914 	stradd->strchange_flags = flag;
3915 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3916 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3917 	stradd->strchange_instrms = numberin;
3918 	stradd->strchange_outstrms = numberout;
3919 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3920 	SCTP_BUF_NEXT(m_notify) = NULL;
3921 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3922 		/* no space */
3923 		sctp_m_freem(m_notify);
3924 		return;
3925 	}
3926 	/* append to socket */
3927 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3928 	    0, 0, stcb->asoc.context, 0, 0, 0,
3929 	    m_notify);
3930 	if (control == NULL) {
3931 		/* no memory */
3932 		sctp_m_freem(m_notify);
3933 		return;
3934 	}
3935 	control->length = SCTP_BUF_LEN(m_notify);
3936 	control->spec_flags = M_NOTIFICATION;
3937 	/* not that we need this */
3938 	control->tail_mbuf = m_notify;
3939 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3940 	    control,
3941 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3942 }
3943 
3944 void
3945 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3946 {
3947 	struct mbuf *m_notify;
3948 	struct sctp_queued_to_read *control;
3949 	struct sctp_assoc_reset_event *strasoc;
3950 
3951 	if ((stcb == NULL) ||
3952 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3953 		/* event not enabled */
3954 		return;
3955 	}
3956 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3957 	if (m_notify == NULL)
3958 		/* no space left */
3959 		return;
3960 	SCTP_BUF_LEN(m_notify) = 0;
3961 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3962 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3963 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3964 	strasoc->assocreset_flags = flag;
3965 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3966 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3967 	strasoc->assocreset_local_tsn = sending_tsn;
3968 	strasoc->assocreset_remote_tsn = recv_tsn;
3969 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3970 	SCTP_BUF_NEXT(m_notify) = NULL;
3971 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3972 		/* no space */
3973 		sctp_m_freem(m_notify);
3974 		return;
3975 	}
3976 	/* append to socket */
3977 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3978 	    0, 0, stcb->asoc.context, 0, 0, 0,
3979 	    m_notify);
3980 	if (control == NULL) {
3981 		/* no memory */
3982 		sctp_m_freem(m_notify);
3983 		return;
3984 	}
3985 	control->length = SCTP_BUF_LEN(m_notify);
3986 	control->spec_flags = M_NOTIFICATION;
3987 	/* not that we need this */
3988 	control->tail_mbuf = m_notify;
3989 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3990 	    control,
3991 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3992 }
3993 
3994 
3995 
3996 static void
3997 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3998     int number_entries, uint16_t *list, int flag)
3999 {
4000 	struct mbuf *m_notify;
4001 	struct sctp_queued_to_read *control;
4002 	struct sctp_stream_reset_event *strreset;
4003 	int len;
4004 
4005 	if ((stcb == NULL) ||
4006 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
4007 		/* event not enabled */
4008 		return;
4009 	}
4010 
4011 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
4012 	if (m_notify == NULL)
4013 		/* no space left */
4014 		return;
4015 	SCTP_BUF_LEN(m_notify) = 0;
4016 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
4017 	if (len > M_TRAILINGSPACE(m_notify)) {
4018 		/* never enough room */
4019 		sctp_m_freem(m_notify);
4020 		return;
4021 	}
4022 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
4023 	memset(strreset, 0, len);
4024 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
4025 	strreset->strreset_flags = flag;
4026 	strreset->strreset_length = len;
4027 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
4028 	if (number_entries) {
4029 		int i;
4030 
4031 		for (i = 0; i < number_entries; i++) {
4032 			strreset->strreset_stream_list[i] = ntohs(list[i]);
4033 		}
4034 	}
4035 	SCTP_BUF_LEN(m_notify) = len;
4036 	SCTP_BUF_NEXT(m_notify) = NULL;
4037 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
4038 		/* no space */
4039 		sctp_m_freem(m_notify);
4040 		return;
4041 	}
4042 	/* append to socket */
4043 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
4044 	    0, 0, stcb->asoc.context, 0, 0, 0,
4045 	    m_notify);
4046 	if (control == NULL) {
4047 		/* no memory */
4048 		sctp_m_freem(m_notify);
4049 		return;
4050 	}
4051 	control->length = SCTP_BUF_LEN(m_notify);
4052 	control->spec_flags = M_NOTIFICATION;
4053 	/* not that we need this */
4054 	control->tail_mbuf = m_notify;
4055 	sctp_add_to_readq(stcb->sctp_ep, stcb,
4056 	    control,
4057 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
4058 }
4059 
4060 
4061 static void
4062 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
4063 {
4064 	struct mbuf *m_notify;
4065 	struct sctp_remote_error *sre;
4066 	struct sctp_queued_to_read *control;
4067 	unsigned int notif_len;
4068 	uint16_t chunk_len;
4069 
4070 	if ((stcb == NULL) ||
4071 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
4072 		return;
4073 	}
4074 	if (chunk != NULL) {
4075 		chunk_len = ntohs(chunk->ch.chunk_length);
4076 		/*
4077 		 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
4078 		 * contiguous.
4079 		 */
4080 		if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) {
4081 			chunk_len = SCTP_CHUNK_BUFFER_SIZE;
4082 		}
4083 	} else {
4084 		chunk_len = 0;
4085 	}
4086 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
4087 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
4088 	if (m_notify == NULL) {
4089 		/* Retry with smaller value. */
4090 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
4091 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
4092 		if (m_notify == NULL) {
4093 			return;
4094 		}
4095 	}
4096 	SCTP_BUF_NEXT(m_notify) = NULL;
4097 	sre = mtod(m_notify, struct sctp_remote_error *);
4098 	memset(sre, 0, notif_len);
4099 	sre->sre_type = SCTP_REMOTE_ERROR;
4100 	sre->sre_flags = 0;
4101 	sre->sre_length = sizeof(struct sctp_remote_error);
4102 	sre->sre_error = error;
4103 	sre->sre_assoc_id = sctp_get_associd(stcb);
4104 	if (notif_len > sizeof(struct sctp_remote_error)) {
4105 		memcpy(sre->sre_data, chunk, chunk_len);
4106 		sre->sre_length += chunk_len;
4107 	}
4108 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
4109 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
4110 	    0, 0, stcb->asoc.context, 0, 0, 0,
4111 	    m_notify);
4112 	if (control != NULL) {
4113 		control->length = SCTP_BUF_LEN(m_notify);
4114 		control->spec_flags = M_NOTIFICATION;
4115 		/* not that we need this */
4116 		control->tail_mbuf = m_notify;
4117 		sctp_add_to_readq(stcb->sctp_ep, stcb,
4118 		    control,
4119 		    &stcb->sctp_socket->so_rcv, 1,
4120 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
4121 	} else {
4122 		sctp_m_freem(m_notify);
4123 	}
4124 }
4125 
4126 
4127 void
4128 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
4129     uint32_t error, void *data, int so_locked
4130 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4131     SCTP_UNUSED
4132 #endif
4133 )
4134 {
4135 	if ((stcb == NULL) ||
4136 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4137 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4138 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4139 		/* If the socket is gone we are out of here */
4140 		return;
4141 	}
4142 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
4143 		return;
4144 	}
4145 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4146 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4147 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
4148 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
4149 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
4150 			/* Don't report these in front states */
4151 			return;
4152 		}
4153 	}
4154 	switch (notification) {
4155 	case SCTP_NOTIFY_ASSOC_UP:
4156 		if (stcb->asoc.assoc_up_sent == 0) {
4157 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
4158 			stcb->asoc.assoc_up_sent = 1;
4159 		}
4160 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
4161 			sctp_notify_adaptation_layer(stcb);
4162 		}
4163 		if (stcb->asoc.auth_supported == 0) {
4164 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
4165 			    NULL, so_locked);
4166 		}
4167 		break;
4168 	case SCTP_NOTIFY_ASSOC_DOWN:
4169 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
4170 		break;
4171 	case SCTP_NOTIFY_INTERFACE_DOWN:
4172 		{
4173 			struct sctp_nets *net;
4174 
4175 			net = (struct sctp_nets *)data;
4176 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
4177 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4178 			break;
4179 		}
4180 	case SCTP_NOTIFY_INTERFACE_UP:
4181 		{
4182 			struct sctp_nets *net;
4183 
4184 			net = (struct sctp_nets *)data;
4185 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
4186 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4187 			break;
4188 		}
4189 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
4190 		{
4191 			struct sctp_nets *net;
4192 
4193 			net = (struct sctp_nets *)data;
4194 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
4195 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4196 			break;
4197 		}
4198 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
4199 		sctp_notify_send_failed2(stcb, error,
4200 		    (struct sctp_stream_queue_pending *)data, so_locked);
4201 		break;
4202 	case SCTP_NOTIFY_SENT_DG_FAIL:
4203 		sctp_notify_send_failed(stcb, 1, error,
4204 		    (struct sctp_tmit_chunk *)data, so_locked);
4205 		break;
4206 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
4207 		sctp_notify_send_failed(stcb, 0, error,
4208 		    (struct sctp_tmit_chunk *)data, so_locked);
4209 		break;
4210 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
4211 		{
4212 			uint32_t val;
4213 
4214 			val = *((uint32_t *)data);
4215 
4216 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
4217 			break;
4218 		}
4219 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
4220 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4221 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4222 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
4223 		} else {
4224 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
4225 		}
4226 		break;
4227 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
4228 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4229 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4230 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
4231 		} else {
4232 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
4233 		}
4234 		break;
4235 	case SCTP_NOTIFY_ASSOC_RESTART:
4236 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
4237 		if (stcb->asoc.auth_supported == 0) {
4238 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
4239 			    NULL, so_locked);
4240 		}
4241 		break;
4242 	case SCTP_NOTIFY_STR_RESET_SEND:
4243 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
4244 		break;
4245 	case SCTP_NOTIFY_STR_RESET_RECV:
4246 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
4247 		break;
4248 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
4249 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
4250 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
4251 		break;
4252 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
4253 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
4254 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
4255 		break;
4256 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
4257 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
4258 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
4259 		break;
4260 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
4261 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
4262 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
4263 		break;
4264 	case SCTP_NOTIFY_ASCONF_ADD_IP:
4265 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
4266 		    error, so_locked);
4267 		break;
4268 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
4269 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
4270 		    error, so_locked);
4271 		break;
4272 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
4273 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
4274 		    error, so_locked);
4275 		break;
4276 	case SCTP_NOTIFY_PEER_SHUTDOWN:
4277 		sctp_notify_shutdown_event(stcb);
4278 		break;
4279 	case SCTP_NOTIFY_AUTH_NEW_KEY:
4280 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
4281 		    (uint16_t)(uintptr_t)data,
4282 		    so_locked);
4283 		break;
4284 	case SCTP_NOTIFY_AUTH_FREE_KEY:
4285 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
4286 		    (uint16_t)(uintptr_t)data,
4287 		    so_locked);
4288 		break;
4289 	case SCTP_NOTIFY_NO_PEER_AUTH:
4290 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
4291 		    (uint16_t)(uintptr_t)data,
4292 		    so_locked);
4293 		break;
4294 	case SCTP_NOTIFY_SENDER_DRY:
4295 		sctp_notify_sender_dry_event(stcb, so_locked);
4296 		break;
4297 	case SCTP_NOTIFY_REMOTE_ERROR:
4298 		sctp_notify_remote_error(stcb, error, data);
4299 		break;
4300 	default:
4301 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
4302 		    __func__, notification, notification);
4303 		break;
4304 	}			/* end switch */
4305 }
4306 
4307 void
4308 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
4309 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4310     SCTP_UNUSED
4311 #endif
4312 )
4313 {
4314 	struct sctp_association *asoc;
4315 	struct sctp_stream_out *outs;
4316 	struct sctp_tmit_chunk *chk, *nchk;
4317 	struct sctp_stream_queue_pending *sp, *nsp;
4318 	int i;
4319 
4320 	if (stcb == NULL) {
4321 		return;
4322 	}
4323 	asoc = &stcb->asoc;
4324 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4325 		/* already being freed */
4326 		return;
4327 	}
4328 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4329 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4330 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
4331 		return;
4332 	}
4333 	/* now through all the gunk freeing chunks */
4334 	if (holds_lock == 0) {
4335 		SCTP_TCB_SEND_LOCK(stcb);
4336 	}
4337 	/* sent queue SHOULD be empty */
4338 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
4339 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
4340 		asoc->sent_queue_cnt--;
4341 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
4342 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
4343 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
4344 #ifdef INVARIANTS
4345 			} else {
4346 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
4347 #endif
4348 			}
4349 		}
4350 		if (chk->data != NULL) {
4351 			sctp_free_bufspace(stcb, asoc, chk, 1);
4352 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
4353 			    error, chk, so_locked);
4354 			if (chk->data) {
4355 				sctp_m_freem(chk->data);
4356 				chk->data = NULL;
4357 			}
4358 		}
4359 		sctp_free_a_chunk(stcb, chk, so_locked);
4360 		/* sa_ignore FREED_MEMORY */
4361 	}
4362 	/* pending send queue SHOULD be empty */
4363 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
4364 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
4365 		asoc->send_queue_cnt--;
4366 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
4367 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
4368 #ifdef INVARIANTS
4369 		} else {
4370 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
4371 #endif
4372 		}
4373 		if (chk->data != NULL) {
4374 			sctp_free_bufspace(stcb, asoc, chk, 1);
4375 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
4376 			    error, chk, so_locked);
4377 			if (chk->data) {
4378 				sctp_m_freem(chk->data);
4379 				chk->data = NULL;
4380 			}
4381 		}
4382 		sctp_free_a_chunk(stcb, chk, so_locked);
4383 		/* sa_ignore FREED_MEMORY */
4384 	}
4385 	for (i = 0; i < asoc->streamoutcnt; i++) {
4386 		/* For each stream */
4387 		outs = &asoc->strmout[i];
4388 		/* clean up any sends there */
4389 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
4390 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
4391 			TAILQ_REMOVE(&outs->outqueue, sp, next);
4392 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
4393 			sctp_free_spbufspace(stcb, asoc, sp);
4394 			if (sp->data) {
4395 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
4396 				    error, (void *)sp, so_locked);
4397 				if (sp->data) {
4398 					sctp_m_freem(sp->data);
4399 					sp->data = NULL;
4400 					sp->tail_mbuf = NULL;
4401 					sp->length = 0;
4402 				}
4403 			}
4404 			if (sp->net) {
4405 				sctp_free_remote_addr(sp->net);
4406 				sp->net = NULL;
4407 			}
4408 			/* Free the chunk */
4409 			sctp_free_a_strmoq(stcb, sp, so_locked);
4410 			/* sa_ignore FREED_MEMORY */
4411 		}
4412 	}
4413 
4414 	if (holds_lock == 0) {
4415 		SCTP_TCB_SEND_UNLOCK(stcb);
4416 	}
4417 }
4418 
4419 void
4420 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
4421     struct sctp_abort_chunk *abort, int so_locked
4422 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4423     SCTP_UNUSED
4424 #endif
4425 )
4426 {
4427 	if (stcb == NULL) {
4428 		return;
4429 	}
4430 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
4431 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4432 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
4433 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
4434 	}
4435 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4436 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4437 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4438 		return;
4439 	}
4440 	/* Tell them we lost the asoc */
4441 	sctp_report_all_outbound(stcb, error, 0, so_locked);
4442 	if (from_peer) {
4443 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
4444 	} else {
4445 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
4446 	}
4447 }
4448 
4449 void
4450 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4451     struct mbuf *m, int iphlen,
4452     struct sockaddr *src, struct sockaddr *dst,
4453     struct sctphdr *sh, struct mbuf *op_err,
4454     uint8_t mflowtype, uint32_t mflowid,
4455     uint32_t vrf_id, uint16_t port)
4456 {
4457 	uint32_t vtag;
4458 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4459 	struct socket *so;
4460 #endif
4461 
4462 	vtag = 0;
4463 	if (stcb != NULL) {
4464 		vtag = stcb->asoc.peer_vtag;
4465 		vrf_id = stcb->asoc.vrf_id;
4466 	}
4467 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4468 	    mflowtype, mflowid, inp->fibnum,
4469 	    vrf_id, port);
4470 	if (stcb != NULL) {
4471 		/* We have a TCB to abort, send notification too */
4472 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4473 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4474 		/* Ok, now lets free it */
4475 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4476 		so = SCTP_INP_SO(inp);
4477 		atomic_add_int(&stcb->asoc.refcnt, 1);
4478 		SCTP_TCB_UNLOCK(stcb);
4479 		SCTP_SOCKET_LOCK(so, 1);
4480 		SCTP_TCB_LOCK(stcb);
4481 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4482 #endif
4483 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4484 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4485 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4486 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4487 		}
4488 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4489 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4490 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4491 		SCTP_SOCKET_UNLOCK(so, 1);
4492 #endif
4493 	}
4494 }
4495 #ifdef SCTP_ASOCLOG_OF_TSNS
4496 void
4497 sctp_print_out_track_log(struct sctp_tcb *stcb)
4498 {
4499 #ifdef NOSIY_PRINTS
4500 	int i;
4501 
4502 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4503 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4504 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4505 		SCTP_PRINTF("None rcvd\n");
4506 		goto none_in;
4507 	}
4508 	if (stcb->asoc.tsn_in_wrapped) {
4509 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4510 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4511 			    stcb->asoc.in_tsnlog[i].tsn,
4512 			    stcb->asoc.in_tsnlog[i].strm,
4513 			    stcb->asoc.in_tsnlog[i].seq,
4514 			    stcb->asoc.in_tsnlog[i].flgs,
4515 			    stcb->asoc.in_tsnlog[i].sz);
4516 		}
4517 	}
4518 	if (stcb->asoc.tsn_in_at) {
4519 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4520 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4521 			    stcb->asoc.in_tsnlog[i].tsn,
4522 			    stcb->asoc.in_tsnlog[i].strm,
4523 			    stcb->asoc.in_tsnlog[i].seq,
4524 			    stcb->asoc.in_tsnlog[i].flgs,
4525 			    stcb->asoc.in_tsnlog[i].sz);
4526 		}
4527 	}
4528 none_in:
4529 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4530 	if ((stcb->asoc.tsn_out_at == 0) &&
4531 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4532 		SCTP_PRINTF("None sent\n");
4533 	}
4534 	if (stcb->asoc.tsn_out_wrapped) {
4535 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4536 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4537 			    stcb->asoc.out_tsnlog[i].tsn,
4538 			    stcb->asoc.out_tsnlog[i].strm,
4539 			    stcb->asoc.out_tsnlog[i].seq,
4540 			    stcb->asoc.out_tsnlog[i].flgs,
4541 			    stcb->asoc.out_tsnlog[i].sz);
4542 		}
4543 	}
4544 	if (stcb->asoc.tsn_out_at) {
4545 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4546 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4547 			    stcb->asoc.out_tsnlog[i].tsn,
4548 			    stcb->asoc.out_tsnlog[i].strm,
4549 			    stcb->asoc.out_tsnlog[i].seq,
4550 			    stcb->asoc.out_tsnlog[i].flgs,
4551 			    stcb->asoc.out_tsnlog[i].sz);
4552 		}
4553 	}
4554 #endif
4555 }
4556 #endif
4557 
4558 void
4559 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4560     struct mbuf *op_err,
4561     int so_locked
4562 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4563     SCTP_UNUSED
4564 #endif
4565 )
4566 {
4567 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4568 	struct socket *so;
4569 #endif
4570 
4571 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4572 	so = SCTP_INP_SO(inp);
4573 #endif
4574 	if (stcb == NULL) {
4575 		/* Got to have a TCB */
4576 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4577 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4578 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4579 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4580 			}
4581 		}
4582 		return;
4583 	} else {
4584 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4585 	}
4586 	/* notify the peer */
4587 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4588 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4589 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4590 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4591 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4592 	}
4593 	/* notify the ulp */
4594 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4595 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4596 	}
4597 	/* now free the asoc */
4598 #ifdef SCTP_ASOCLOG_OF_TSNS
4599 	sctp_print_out_track_log(stcb);
4600 #endif
4601 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4602 	if (!so_locked) {
4603 		atomic_add_int(&stcb->asoc.refcnt, 1);
4604 		SCTP_TCB_UNLOCK(stcb);
4605 		SCTP_SOCKET_LOCK(so, 1);
4606 		SCTP_TCB_LOCK(stcb);
4607 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4608 	}
4609 #endif
4610 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4611 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4612 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4613 	if (!so_locked) {
4614 		SCTP_SOCKET_UNLOCK(so, 1);
4615 	}
4616 #endif
4617 }
4618 
4619 void
4620 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4621     struct sockaddr *src, struct sockaddr *dst,
4622     struct sctphdr *sh, struct sctp_inpcb *inp,
4623     struct mbuf *cause,
4624     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4625     uint32_t vrf_id, uint16_t port)
4626 {
4627 	struct sctp_chunkhdr *ch, chunk_buf;
4628 	unsigned int chk_length;
4629 	int contains_init_chunk;
4630 
4631 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4632 	/* Generate a TO address for future reference */
4633 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4634 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4635 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4636 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4637 		}
4638 	}
4639 	contains_init_chunk = 0;
4640 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4641 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4642 	while (ch != NULL) {
4643 		chk_length = ntohs(ch->chunk_length);
4644 		if (chk_length < sizeof(*ch)) {
4645 			/* break to abort land */
4646 			break;
4647 		}
4648 		switch (ch->chunk_type) {
4649 		case SCTP_INIT:
4650 			contains_init_chunk = 1;
4651 			break;
4652 		case SCTP_PACKET_DROPPED:
4653 			/* we don't respond to pkt-dropped */
4654 			return;
4655 		case SCTP_ABORT_ASSOCIATION:
4656 			/* we don't respond with an ABORT to an ABORT */
4657 			return;
4658 		case SCTP_SHUTDOWN_COMPLETE:
4659 			/*
4660 			 * we ignore it since we are not waiting for it and
4661 			 * peer is gone
4662 			 */
4663 			return;
4664 		case SCTP_SHUTDOWN_ACK:
4665 			sctp_send_shutdown_complete2(src, dst, sh,
4666 			    mflowtype, mflowid, fibnum,
4667 			    vrf_id, port);
4668 			return;
4669 		default:
4670 			break;
4671 		}
4672 		offset += SCTP_SIZE32(chk_length);
4673 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4674 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4675 	}
4676 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4677 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4678 	    (contains_init_chunk == 0))) {
4679 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4680 		    mflowtype, mflowid, fibnum,
4681 		    vrf_id, port);
4682 	}
4683 }
4684 
4685 /*
4686  * check the inbound datagram to make sure there is not an abort inside it,
4687  * if there is return 1, else return 0.
4688  */
4689 int
4690 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4691 {
4692 	struct sctp_chunkhdr *ch;
4693 	struct sctp_init_chunk *init_chk, chunk_buf;
4694 	int offset;
4695 	unsigned int chk_length;
4696 
4697 	offset = iphlen + sizeof(struct sctphdr);
4698 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4699 	    (uint8_t *)&chunk_buf);
4700 	while (ch != NULL) {
4701 		chk_length = ntohs(ch->chunk_length);
4702 		if (chk_length < sizeof(*ch)) {
4703 			/* packet is probably corrupt */
4704 			break;
4705 		}
4706 		/* we seem to be ok, is it an abort? */
4707 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4708 			/* yep, tell them */
4709 			return (1);
4710 		}
4711 		if (ch->chunk_type == SCTP_INITIATION) {
4712 			/* need to update the Vtag */
4713 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4714 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4715 			if (init_chk != NULL) {
4716 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4717 			}
4718 		}
4719 		/* Nope, move to the next chunk */
4720 		offset += SCTP_SIZE32(chk_length);
4721 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4722 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4723 	}
4724 	return (0);
4725 }
4726 
4727 /*
4728  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4729  * set (i.e. it's 0) so, create this function to compare link local scopes
4730  */
4731 #ifdef INET6
4732 uint32_t
4733 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4734 {
4735 	struct sockaddr_in6 a, b;
4736 
4737 	/* save copies */
4738 	a = *addr1;
4739 	b = *addr2;
4740 
4741 	if (a.sin6_scope_id == 0)
4742 		if (sa6_recoverscope(&a)) {
4743 			/* can't get scope, so can't match */
4744 			return (0);
4745 		}
4746 	if (b.sin6_scope_id == 0)
4747 		if (sa6_recoverscope(&b)) {
4748 			/* can't get scope, so can't match */
4749 			return (0);
4750 		}
4751 	if (a.sin6_scope_id != b.sin6_scope_id)
4752 		return (0);
4753 
4754 	return (1);
4755 }
4756 
4757 /*
4758  * returns a sockaddr_in6 with embedded scope recovered and removed
4759  */
4760 struct sockaddr_in6 *
4761 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4762 {
4763 	/* check and strip embedded scope junk */
4764 	if (addr->sin6_family == AF_INET6) {
4765 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4766 			if (addr->sin6_scope_id == 0) {
4767 				*store = *addr;
4768 				if (!sa6_recoverscope(store)) {
4769 					/* use the recovered scope */
4770 					addr = store;
4771 				}
4772 			} else {
4773 				/* else, return the original "to" addr */
4774 				in6_clearscope(&addr->sin6_addr);
4775 			}
4776 		}
4777 	}
4778 	return (addr);
4779 }
4780 #endif
4781 
4782 /*
4783  * are the two addresses the same?  currently a "scopeless" check returns: 1
4784  * if same, 0 if not
4785  */
4786 int
4787 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4788 {
4789 
4790 	/* must be valid */
4791 	if (sa1 == NULL || sa2 == NULL)
4792 		return (0);
4793 
4794 	/* must be the same family */
4795 	if (sa1->sa_family != sa2->sa_family)
4796 		return (0);
4797 
4798 	switch (sa1->sa_family) {
4799 #ifdef INET6
4800 	case AF_INET6:
4801 		{
4802 			/* IPv6 addresses */
4803 			struct sockaddr_in6 *sin6_1, *sin6_2;
4804 
4805 			sin6_1 = (struct sockaddr_in6 *)sa1;
4806 			sin6_2 = (struct sockaddr_in6 *)sa2;
4807 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4808 			    sin6_2));
4809 		}
4810 #endif
4811 #ifdef INET
4812 	case AF_INET:
4813 		{
4814 			/* IPv4 addresses */
4815 			struct sockaddr_in *sin_1, *sin_2;
4816 
4817 			sin_1 = (struct sockaddr_in *)sa1;
4818 			sin_2 = (struct sockaddr_in *)sa2;
4819 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4820 		}
4821 #endif
4822 	default:
4823 		/* we don't do these... */
4824 		return (0);
4825 	}
4826 }
4827 
4828 void
4829 sctp_print_address(struct sockaddr *sa)
4830 {
4831 #ifdef INET6
4832 	char ip6buf[INET6_ADDRSTRLEN];
4833 #endif
4834 
4835 	switch (sa->sa_family) {
4836 #ifdef INET6
4837 	case AF_INET6:
4838 		{
4839 			struct sockaddr_in6 *sin6;
4840 
4841 			sin6 = (struct sockaddr_in6 *)sa;
4842 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4843 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4844 			    ntohs(sin6->sin6_port),
4845 			    sin6->sin6_scope_id);
4846 			break;
4847 		}
4848 #endif
4849 #ifdef INET
4850 	case AF_INET:
4851 		{
4852 			struct sockaddr_in *sin;
4853 			unsigned char *p;
4854 
4855 			sin = (struct sockaddr_in *)sa;
4856 			p = (unsigned char *)&sin->sin_addr;
4857 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4858 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4859 			break;
4860 		}
4861 #endif
4862 	default:
4863 		SCTP_PRINTF("?\n");
4864 		break;
4865 	}
4866 }
4867 
4868 void
4869 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4870     struct sctp_inpcb *new_inp,
4871     struct sctp_tcb *stcb,
4872     int waitflags)
4873 {
4874 	/*
4875 	 * go through our old INP and pull off any control structures that
4876 	 * belong to stcb and move then to the new inp.
4877 	 */
4878 	struct socket *old_so, *new_so;
4879 	struct sctp_queued_to_read *control, *nctl;
4880 	struct sctp_readhead tmp_queue;
4881 	struct mbuf *m;
4882 	int error = 0;
4883 
4884 	old_so = old_inp->sctp_socket;
4885 	new_so = new_inp->sctp_socket;
4886 	TAILQ_INIT(&tmp_queue);
4887 	error = sblock(&old_so->so_rcv, waitflags);
4888 	if (error) {
4889 		/*
4890 		 * Gak, can't get sblock, we have a problem. data will be
4891 		 * left stranded.. and we don't dare look at it since the
4892 		 * other thread may be reading something. Oh well, its a
4893 		 * screwed up app that does a peeloff OR a accept while
4894 		 * reading from the main socket... actually its only the
4895 		 * peeloff() case, since I think read will fail on a
4896 		 * listening socket..
4897 		 */
4898 		return;
4899 	}
4900 	/* lock the socket buffers */
4901 	SCTP_INP_READ_LOCK(old_inp);
4902 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4903 		/* Pull off all for out target stcb */
4904 		if (control->stcb == stcb) {
4905 			/* remove it we want it */
4906 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4907 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4908 			m = control->data;
4909 			while (m) {
4910 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4911 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4912 				}
4913 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4914 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4915 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4916 				}
4917 				m = SCTP_BUF_NEXT(m);
4918 			}
4919 		}
4920 	}
4921 	SCTP_INP_READ_UNLOCK(old_inp);
4922 	/* Remove the sb-lock on the old socket */
4923 
4924 	sbunlock(&old_so->so_rcv);
4925 	/* Now we move them over to the new socket buffer */
4926 	SCTP_INP_READ_LOCK(new_inp);
4927 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4928 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4929 		m = control->data;
4930 		while (m) {
4931 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4932 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4933 			}
4934 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4935 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4936 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4937 			}
4938 			m = SCTP_BUF_NEXT(m);
4939 		}
4940 	}
4941 	SCTP_INP_READ_UNLOCK(new_inp);
4942 }
4943 
4944 void
4945 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4946     struct sctp_tcb *stcb,
4947     int so_locked
4948 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4949     SCTP_UNUSED
4950 #endif
4951 )
4952 {
4953 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4954 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4955 		struct socket *so;
4956 
4957 		so = SCTP_INP_SO(inp);
4958 		if (!so_locked) {
4959 			if (stcb) {
4960 				atomic_add_int(&stcb->asoc.refcnt, 1);
4961 				SCTP_TCB_UNLOCK(stcb);
4962 			}
4963 			SCTP_SOCKET_LOCK(so, 1);
4964 			if (stcb) {
4965 				SCTP_TCB_LOCK(stcb);
4966 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4967 			}
4968 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4969 				SCTP_SOCKET_UNLOCK(so, 1);
4970 				return;
4971 			}
4972 		}
4973 #endif
4974 		sctp_sorwakeup(inp, inp->sctp_socket);
4975 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4976 		if (!so_locked) {
4977 			SCTP_SOCKET_UNLOCK(so, 1);
4978 		}
4979 #endif
4980 	}
4981 }
4982 
4983 void
4984 sctp_add_to_readq(struct sctp_inpcb *inp,
4985     struct sctp_tcb *stcb,
4986     struct sctp_queued_to_read *control,
4987     struct sockbuf *sb,
4988     int end,
4989     int inp_read_lock_held,
4990     int so_locked
4991 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4992     SCTP_UNUSED
4993 #endif
4994 )
4995 {
4996 	/*
4997 	 * Here we must place the control on the end of the socket read
4998 	 * queue AND increment sb_cc so that select will work properly on
4999 	 * read.
5000 	 */
5001 	struct mbuf *m, *prev = NULL;
5002 
5003 	if (inp == NULL) {
5004 		/* Gak, TSNH!! */
5005 #ifdef INVARIANTS
5006 		panic("Gak, inp NULL on add_to_readq");
5007 #endif
5008 		return;
5009 	}
5010 	if (inp_read_lock_held == 0)
5011 		SCTP_INP_READ_LOCK(inp);
5012 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
5013 		if (!control->on_strm_q) {
5014 			sctp_free_remote_addr(control->whoFrom);
5015 			if (control->data) {
5016 				sctp_m_freem(control->data);
5017 				control->data = NULL;
5018 			}
5019 			sctp_free_a_readq(stcb, control);
5020 		}
5021 		if (inp_read_lock_held == 0)
5022 			SCTP_INP_READ_UNLOCK(inp);
5023 		return;
5024 	}
5025 	if (!(control->spec_flags & M_NOTIFICATION)) {
5026 		atomic_add_int(&inp->total_recvs, 1);
5027 		if (!control->do_not_ref_stcb) {
5028 			atomic_add_int(&stcb->total_recvs, 1);
5029 		}
5030 	}
5031 	m = control->data;
5032 	control->held_length = 0;
5033 	control->length = 0;
5034 	while (m) {
5035 		if (SCTP_BUF_LEN(m) == 0) {
5036 			/* Skip mbufs with NO length */
5037 			if (prev == NULL) {
5038 				/* First one */
5039 				control->data = sctp_m_free(m);
5040 				m = control->data;
5041 			} else {
5042 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
5043 				m = SCTP_BUF_NEXT(prev);
5044 			}
5045 			if (m == NULL) {
5046 				control->tail_mbuf = prev;
5047 			}
5048 			continue;
5049 		}
5050 		prev = m;
5051 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5052 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
5053 		}
5054 		sctp_sballoc(stcb, sb, m);
5055 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5056 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5057 		}
5058 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
5059 		m = SCTP_BUF_NEXT(m);
5060 	}
5061 	if (prev != NULL) {
5062 		control->tail_mbuf = prev;
5063 	} else {
5064 		/* Everything got collapsed out?? */
5065 		if (!control->on_strm_q) {
5066 			sctp_free_remote_addr(control->whoFrom);
5067 			sctp_free_a_readq(stcb, control);
5068 		}
5069 		if (inp_read_lock_held == 0)
5070 			SCTP_INP_READ_UNLOCK(inp);
5071 		return;
5072 	}
5073 	if (end) {
5074 		control->end_added = 1;
5075 	}
5076 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
5077 	control->on_read_q = 1;
5078 	if (inp_read_lock_held == 0)
5079 		SCTP_INP_READ_UNLOCK(inp);
5080 	if (inp && inp->sctp_socket) {
5081 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
5082 	}
5083 }
5084 
5085 /*************HOLD THIS COMMENT FOR PATCH FILE OF
5086  *************ALTERNATE ROUTING CODE
5087  */
5088 
5089 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
5090  *************ALTERNATE ROUTING CODE
5091  */
5092 
5093 struct mbuf *
5094 sctp_generate_cause(uint16_t code, char *info)
5095 {
5096 	struct mbuf *m;
5097 	struct sctp_gen_error_cause *cause;
5098 	size_t info_len;
5099 	uint16_t len;
5100 
5101 	if ((code == 0) || (info == NULL)) {
5102 		return (NULL);
5103 	}
5104 	info_len = strlen(info);
5105 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
5106 		return (NULL);
5107 	}
5108 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
5109 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5110 	if (m != NULL) {
5111 		SCTP_BUF_LEN(m) = len;
5112 		cause = mtod(m, struct sctp_gen_error_cause *);
5113 		cause->code = htons(code);
5114 		cause->length = htons(len);
5115 		memcpy(cause->info, info, info_len);
5116 	}
5117 	return (m);
5118 }
5119 
5120 struct mbuf *
5121 sctp_generate_no_user_data_cause(uint32_t tsn)
5122 {
5123 	struct mbuf *m;
5124 	struct sctp_error_no_user_data *no_user_data_cause;
5125 	uint16_t len;
5126 
5127 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
5128 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5129 	if (m != NULL) {
5130 		SCTP_BUF_LEN(m) = len;
5131 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
5132 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
5133 		no_user_data_cause->cause.length = htons(len);
5134 		no_user_data_cause->tsn = htonl(tsn);
5135 	}
5136 	return (m);
5137 }
5138 
5139 #ifdef SCTP_MBCNT_LOGGING
5140 void
5141 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
5142     struct sctp_tmit_chunk *tp1, int chk_cnt)
5143 {
5144 	if (tp1->data == NULL) {
5145 		return;
5146 	}
5147 	asoc->chunks_on_out_queue -= chk_cnt;
5148 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
5149 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
5150 		    asoc->total_output_queue_size,
5151 		    tp1->book_size,
5152 		    0,
5153 		    tp1->mbcnt);
5154 	}
5155 	if (asoc->total_output_queue_size >= tp1->book_size) {
5156 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
5157 	} else {
5158 		asoc->total_output_queue_size = 0;
5159 	}
5160 
5161 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
5162 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
5163 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
5164 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
5165 		} else {
5166 			stcb->sctp_socket->so_snd.sb_cc = 0;
5167 
5168 		}
5169 	}
5170 }
5171 
5172 #endif
5173 
5174 int
5175 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
5176     uint8_t sent, int so_locked
5177 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
5178     SCTP_UNUSED
5179 #endif
5180 )
5181 {
5182 	struct sctp_stream_out *strq;
5183 	struct sctp_tmit_chunk *chk = NULL, *tp2;
5184 	struct sctp_stream_queue_pending *sp;
5185 	uint32_t mid;
5186 	uint16_t sid;
5187 	uint8_t foundeom = 0;
5188 	int ret_sz = 0;
5189 	int notdone;
5190 	int do_wakeup_routine = 0;
5191 
5192 	sid = tp1->rec.data.sid;
5193 	mid = tp1->rec.data.mid;
5194 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
5195 		stcb->asoc.abandoned_sent[0]++;
5196 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5197 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
5198 #if defined(SCTP_DETAILED_STR_STATS)
5199 		stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5200 #endif
5201 	} else {
5202 		stcb->asoc.abandoned_unsent[0]++;
5203 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5204 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
5205 #if defined(SCTP_DETAILED_STR_STATS)
5206 		stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5207 #endif
5208 	}
5209 	do {
5210 		ret_sz += tp1->book_size;
5211 		if (tp1->data != NULL) {
5212 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5213 				sctp_flight_size_decrease(tp1);
5214 				sctp_total_flight_decrease(stcb, tp1);
5215 			}
5216 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5217 			stcb->asoc.peers_rwnd += tp1->send_size;
5218 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
5219 			if (sent) {
5220 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5221 			} else {
5222 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5223 			}
5224 			if (tp1->data) {
5225 				sctp_m_freem(tp1->data);
5226 				tp1->data = NULL;
5227 			}
5228 			do_wakeup_routine = 1;
5229 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
5230 				stcb->asoc.sent_queue_cnt_removeable--;
5231 			}
5232 		}
5233 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
5234 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
5235 		    SCTP_DATA_NOT_FRAG) {
5236 			/* not frag'ed we ae done   */
5237 			notdone = 0;
5238 			foundeom = 1;
5239 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5240 			/* end of frag, we are done */
5241 			notdone = 0;
5242 			foundeom = 1;
5243 		} else {
5244 			/*
5245 			 * Its a begin or middle piece, we must mark all of
5246 			 * it
5247 			 */
5248 			notdone = 1;
5249 			tp1 = TAILQ_NEXT(tp1, sctp_next);
5250 		}
5251 	} while (tp1 && notdone);
5252 	if (foundeom == 0) {
5253 		/*
5254 		 * The multi-part message was scattered across the send and
5255 		 * sent queue.
5256 		 */
5257 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
5258 			if ((tp1->rec.data.sid != sid) ||
5259 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
5260 				break;
5261 			}
5262 			/*
5263 			 * save to chk in case we have some on stream out
5264 			 * queue. If so and we have an un-transmitted one we
5265 			 * don't have to fudge the TSN.
5266 			 */
5267 			chk = tp1;
5268 			ret_sz += tp1->book_size;
5269 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5270 			if (sent) {
5271 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5272 			} else {
5273 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5274 			}
5275 			if (tp1->data) {
5276 				sctp_m_freem(tp1->data);
5277 				tp1->data = NULL;
5278 			}
5279 			/* No flight involved here book the size to 0 */
5280 			tp1->book_size = 0;
5281 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5282 				foundeom = 1;
5283 			}
5284 			do_wakeup_routine = 1;
5285 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
5286 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
5287 			/*
5288 			 * on to the sent queue so we can wait for it to be
5289 			 * passed by.
5290 			 */
5291 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
5292 			    sctp_next);
5293 			stcb->asoc.send_queue_cnt--;
5294 			stcb->asoc.sent_queue_cnt++;
5295 		}
5296 	}
5297 	if (foundeom == 0) {
5298 		/*
5299 		 * Still no eom found. That means there is stuff left on the
5300 		 * stream out queue.. yuck.
5301 		 */
5302 		SCTP_TCB_SEND_LOCK(stcb);
5303 		strq = &stcb->asoc.strmout[sid];
5304 		sp = TAILQ_FIRST(&strq->outqueue);
5305 		if (sp != NULL) {
5306 			sp->discard_rest = 1;
5307 			/*
5308 			 * We may need to put a chunk on the queue that
5309 			 * holds the TSN that would have been sent with the
5310 			 * LAST bit.
5311 			 */
5312 			if (chk == NULL) {
5313 				/* Yep, we have to */
5314 				sctp_alloc_a_chunk(stcb, chk);
5315 				if (chk == NULL) {
5316 					/*
5317 					 * we are hosed. All we can do is
5318 					 * nothing.. which will cause an
5319 					 * abort if the peer is paying
5320 					 * attention.
5321 					 */
5322 					goto oh_well;
5323 				}
5324 				memset(chk, 0, sizeof(*chk));
5325 				chk->rec.data.rcv_flags = 0;
5326 				chk->sent = SCTP_FORWARD_TSN_SKIP;
5327 				chk->asoc = &stcb->asoc;
5328 				if (stcb->asoc.idata_supported == 0) {
5329 					if (sp->sinfo_flags & SCTP_UNORDERED) {
5330 						chk->rec.data.mid = 0;
5331 					} else {
5332 						chk->rec.data.mid = strq->next_mid_ordered;
5333 					}
5334 				} else {
5335 					if (sp->sinfo_flags & SCTP_UNORDERED) {
5336 						chk->rec.data.mid = strq->next_mid_unordered;
5337 					} else {
5338 						chk->rec.data.mid = strq->next_mid_ordered;
5339 					}
5340 				}
5341 				chk->rec.data.sid = sp->sid;
5342 				chk->rec.data.ppid = sp->ppid;
5343 				chk->rec.data.context = sp->context;
5344 				chk->flags = sp->act_flags;
5345 				chk->whoTo = NULL;
5346 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
5347 				strq->chunks_on_queues++;
5348 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
5349 				stcb->asoc.sent_queue_cnt++;
5350 				stcb->asoc.pr_sctp_cnt++;
5351 			}
5352 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
5353 			if (sp->sinfo_flags & SCTP_UNORDERED) {
5354 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
5355 			}
5356 			if (stcb->asoc.idata_supported == 0) {
5357 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
5358 					strq->next_mid_ordered++;
5359 				}
5360 			} else {
5361 				if (sp->sinfo_flags & SCTP_UNORDERED) {
5362 					strq->next_mid_unordered++;
5363 				} else {
5364 					strq->next_mid_ordered++;
5365 				}
5366 			}
5367 	oh_well:
5368 			if (sp->data) {
5369 				/*
5370 				 * Pull any data to free up the SB and allow
5371 				 * sender to "add more" while we will throw
5372 				 * away :-)
5373 				 */
5374 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
5375 				ret_sz += sp->length;
5376 				do_wakeup_routine = 1;
5377 				sp->some_taken = 1;
5378 				sctp_m_freem(sp->data);
5379 				sp->data = NULL;
5380 				sp->tail_mbuf = NULL;
5381 				sp->length = 0;
5382 			}
5383 		}
5384 		SCTP_TCB_SEND_UNLOCK(stcb);
5385 	}
5386 	if (do_wakeup_routine) {
5387 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5388 		struct socket *so;
5389 
5390 		so = SCTP_INP_SO(stcb->sctp_ep);
5391 		if (!so_locked) {
5392 			atomic_add_int(&stcb->asoc.refcnt, 1);
5393 			SCTP_TCB_UNLOCK(stcb);
5394 			SCTP_SOCKET_LOCK(so, 1);
5395 			SCTP_TCB_LOCK(stcb);
5396 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
5397 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5398 				/* assoc was freed while we were unlocked */
5399 				SCTP_SOCKET_UNLOCK(so, 1);
5400 				return (ret_sz);
5401 			}
5402 		}
5403 #endif
5404 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5405 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5406 		if (!so_locked) {
5407 			SCTP_SOCKET_UNLOCK(so, 1);
5408 		}
5409 #endif
5410 	}
5411 	return (ret_sz);
5412 }
5413 
5414 /*
5415  * checks to see if the given address, sa, is one that is currently known by
5416  * the kernel note: can't distinguish the same address on multiple interfaces
5417  * and doesn't handle multiple addresses with different zone/scope id's note:
5418  * ifa_ifwithaddr() compares the entire sockaddr struct
5419  */
5420 struct sctp_ifa *
5421 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5422     int holds_lock)
5423 {
5424 	struct sctp_laddr *laddr;
5425 
5426 	if (holds_lock == 0) {
5427 		SCTP_INP_RLOCK(inp);
5428 	}
5429 
5430 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5431 		if (laddr->ifa == NULL)
5432 			continue;
5433 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5434 			continue;
5435 #ifdef INET
5436 		if (addr->sa_family == AF_INET) {
5437 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5438 			    laddr->ifa->address.sin.sin_addr.s_addr) {
5439 				/* found him. */
5440 				if (holds_lock == 0) {
5441 					SCTP_INP_RUNLOCK(inp);
5442 				}
5443 				return (laddr->ifa);
5444 				break;
5445 			}
5446 		}
5447 #endif
5448 #ifdef INET6
5449 		if (addr->sa_family == AF_INET6) {
5450 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5451 			    &laddr->ifa->address.sin6)) {
5452 				/* found him. */
5453 				if (holds_lock == 0) {
5454 					SCTP_INP_RUNLOCK(inp);
5455 				}
5456 				return (laddr->ifa);
5457 				break;
5458 			}
5459 		}
5460 #endif
5461 	}
5462 	if (holds_lock == 0) {
5463 		SCTP_INP_RUNLOCK(inp);
5464 	}
5465 	return (NULL);
5466 }
5467 
5468 uint32_t
5469 sctp_get_ifa_hash_val(struct sockaddr *addr)
5470 {
5471 	switch (addr->sa_family) {
5472 #ifdef INET
5473 	case AF_INET:
5474 		{
5475 			struct sockaddr_in *sin;
5476 
5477 			sin = (struct sockaddr_in *)addr;
5478 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5479 		}
5480 #endif
5481 #ifdef INET6
5482 	case AF_INET6:
5483 		{
5484 			struct sockaddr_in6 *sin6;
5485 			uint32_t hash_of_addr;
5486 
5487 			sin6 = (struct sockaddr_in6 *)addr;
5488 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5489 			    sin6->sin6_addr.s6_addr32[1] +
5490 			    sin6->sin6_addr.s6_addr32[2] +
5491 			    sin6->sin6_addr.s6_addr32[3]);
5492 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5493 			return (hash_of_addr);
5494 		}
5495 #endif
5496 	default:
5497 		break;
5498 	}
5499 	return (0);
5500 }
5501 
5502 struct sctp_ifa *
5503 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5504 {
5505 	struct sctp_ifa *sctp_ifap;
5506 	struct sctp_vrf *vrf;
5507 	struct sctp_ifalist *hash_head;
5508 	uint32_t hash_of_addr;
5509 
5510 	if (holds_lock == 0)
5511 		SCTP_IPI_ADDR_RLOCK();
5512 
5513 	vrf = sctp_find_vrf(vrf_id);
5514 	if (vrf == NULL) {
5515 		if (holds_lock == 0)
5516 			SCTP_IPI_ADDR_RUNLOCK();
5517 		return (NULL);
5518 	}
5519 
5520 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5521 
5522 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5523 	if (hash_head == NULL) {
5524 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5525 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5526 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5527 		sctp_print_address(addr);
5528 		SCTP_PRINTF("No such bucket for address\n");
5529 		if (holds_lock == 0)
5530 			SCTP_IPI_ADDR_RUNLOCK();
5531 
5532 		return (NULL);
5533 	}
5534 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5535 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5536 			continue;
5537 #ifdef INET
5538 		if (addr->sa_family == AF_INET) {
5539 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5540 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5541 				/* found him. */
5542 				if (holds_lock == 0)
5543 					SCTP_IPI_ADDR_RUNLOCK();
5544 				return (sctp_ifap);
5545 				break;
5546 			}
5547 		}
5548 #endif
5549 #ifdef INET6
5550 		if (addr->sa_family == AF_INET6) {
5551 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5552 			    &sctp_ifap->address.sin6)) {
5553 				/* found him. */
5554 				if (holds_lock == 0)
5555 					SCTP_IPI_ADDR_RUNLOCK();
5556 				return (sctp_ifap);
5557 				break;
5558 			}
5559 		}
5560 #endif
5561 	}
5562 	if (holds_lock == 0)
5563 		SCTP_IPI_ADDR_RUNLOCK();
5564 	return (NULL);
5565 }
5566 
5567 static void
5568 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5569     uint32_t rwnd_req)
5570 {
5571 	/* User pulled some data, do we need a rwnd update? */
5572 	struct epoch_tracker et;
5573 	int r_unlocked = 0;
5574 	uint32_t dif, rwnd;
5575 	struct socket *so = NULL;
5576 
5577 	if (stcb == NULL)
5578 		return;
5579 
5580 	atomic_add_int(&stcb->asoc.refcnt, 1);
5581 
5582 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
5583 	    (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) {
5584 		/* Pre-check If we are freeing no update */
5585 		goto no_lock;
5586 	}
5587 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5588 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5589 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5590 		goto out;
5591 	}
5592 	so = stcb->sctp_socket;
5593 	if (so == NULL) {
5594 		goto out;
5595 	}
5596 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5597 	/* Have you have freed enough to look */
5598 	*freed_so_far = 0;
5599 	/* Yep, its worth a look and the lock overhead */
5600 
5601 	/* Figure out what the rwnd would be */
5602 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5603 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5604 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5605 	} else {
5606 		dif = 0;
5607 	}
5608 	if (dif >= rwnd_req) {
5609 		if (hold_rlock) {
5610 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5611 			r_unlocked = 1;
5612 		}
5613 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5614 			/*
5615 			 * One last check before we allow the guy possibly
5616 			 * to get in. There is a race, where the guy has not
5617 			 * reached the gate. In that case
5618 			 */
5619 			goto out;
5620 		}
5621 		SCTP_TCB_LOCK(stcb);
5622 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5623 			/* No reports here */
5624 			SCTP_TCB_UNLOCK(stcb);
5625 			goto out;
5626 		}
5627 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5628 		NET_EPOCH_ENTER(et);
5629 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5630 
5631 		sctp_chunk_output(stcb->sctp_ep, stcb,
5632 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5633 		/* make sure no timer is running */
5634 		NET_EPOCH_EXIT(et);
5635 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5636 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5637 		SCTP_TCB_UNLOCK(stcb);
5638 	} else {
5639 		/* Update how much we have pending */
5640 		stcb->freed_by_sorcv_sincelast = dif;
5641 	}
5642 out:
5643 	if (so && r_unlocked && hold_rlock) {
5644 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5645 	}
5646 
5647 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5648 no_lock:
5649 	atomic_add_int(&stcb->asoc.refcnt, -1);
5650 	return;
5651 }
5652 
5653 int
5654 sctp_sorecvmsg(struct socket *so,
5655     struct uio *uio,
5656     struct mbuf **mp,
5657     struct sockaddr *from,
5658     int fromlen,
5659     int *msg_flags,
5660     struct sctp_sndrcvinfo *sinfo,
5661     int filling_sinfo)
5662 {
5663 	/*
5664 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5665 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5666 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5667 	 * On the way out we may send out any combination of:
5668 	 * MSG_NOTIFICATION MSG_EOR
5669 	 *
5670 	 */
5671 	struct sctp_inpcb *inp = NULL;
5672 	ssize_t my_len = 0;
5673 	ssize_t cp_len = 0;
5674 	int error = 0;
5675 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5676 	struct mbuf *m = NULL;
5677 	struct sctp_tcb *stcb = NULL;
5678 	int wakeup_read_socket = 0;
5679 	int freecnt_applied = 0;
5680 	int out_flags = 0, in_flags = 0;
5681 	int block_allowed = 1;
5682 	uint32_t freed_so_far = 0;
5683 	ssize_t copied_so_far = 0;
5684 	int in_eeor_mode = 0;
5685 	int no_rcv_needed = 0;
5686 	uint32_t rwnd_req = 0;
5687 	int hold_sblock = 0;
5688 	int hold_rlock = 0;
5689 	ssize_t slen = 0;
5690 	uint32_t held_length = 0;
5691 	int sockbuf_lock = 0;
5692 
5693 	if (uio == NULL) {
5694 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5695 		return (EINVAL);
5696 	}
5697 
5698 	if (msg_flags) {
5699 		in_flags = *msg_flags;
5700 		if (in_flags & MSG_PEEK)
5701 			SCTP_STAT_INCR(sctps_read_peeks);
5702 	} else {
5703 		in_flags = 0;
5704 	}
5705 	slen = uio->uio_resid;
5706 
5707 	/* Pull in and set up our int flags */
5708 	if (in_flags & MSG_OOB) {
5709 		/* Out of band's NOT supported */
5710 		return (EOPNOTSUPP);
5711 	}
5712 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5713 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5714 		return (EINVAL);
5715 	}
5716 	if ((in_flags & (MSG_DONTWAIT
5717 	    | MSG_NBIO
5718 	    )) ||
5719 	    SCTP_SO_IS_NBIO(so)) {
5720 		block_allowed = 0;
5721 	}
5722 	/* setup the endpoint */
5723 	inp = (struct sctp_inpcb *)so->so_pcb;
5724 	if (inp == NULL) {
5725 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5726 		return (EFAULT);
5727 	}
5728 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5729 	/* Must be at least a MTU's worth */
5730 	if (rwnd_req < SCTP_MIN_RWND)
5731 		rwnd_req = SCTP_MIN_RWND;
5732 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5733 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5734 		sctp_misc_ints(SCTP_SORECV_ENTER,
5735 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5736 	}
5737 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5738 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5739 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5740 	}
5741 
5742 
5743 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5744 	if (error) {
5745 		goto release_unlocked;
5746 	}
5747 	sockbuf_lock = 1;
5748 restart:
5749 
5750 
5751 restart_nosblocks:
5752 	if (hold_sblock == 0) {
5753 		SOCKBUF_LOCK(&so->so_rcv);
5754 		hold_sblock = 1;
5755 	}
5756 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5757 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5758 		goto out;
5759 	}
5760 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5761 		if (so->so_error) {
5762 			error = so->so_error;
5763 			if ((in_flags & MSG_PEEK) == 0)
5764 				so->so_error = 0;
5765 			goto out;
5766 		} else {
5767 			if (so->so_rcv.sb_cc == 0) {
5768 				/* indicate EOF */
5769 				error = 0;
5770 				goto out;
5771 			}
5772 		}
5773 	}
5774 	if (so->so_rcv.sb_cc <= held_length) {
5775 		if (so->so_error) {
5776 			error = so->so_error;
5777 			if ((in_flags & MSG_PEEK) == 0) {
5778 				so->so_error = 0;
5779 			}
5780 			goto out;
5781 		}
5782 		if ((so->so_rcv.sb_cc == 0) &&
5783 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5784 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5785 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5786 				/*
5787 				 * For active open side clear flags for
5788 				 * re-use passive open is blocked by
5789 				 * connect.
5790 				 */
5791 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5792 					/*
5793 					 * You were aborted, passive side
5794 					 * always hits here
5795 					 */
5796 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5797 					error = ECONNRESET;
5798 				}
5799 				so->so_state &= ~(SS_ISCONNECTING |
5800 				    SS_ISDISCONNECTING |
5801 				    SS_ISCONFIRMING |
5802 				    SS_ISCONNECTED);
5803 				if (error == 0) {
5804 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5805 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5806 						error = ENOTCONN;
5807 					}
5808 				}
5809 				goto out;
5810 			}
5811 		}
5812 		if (block_allowed) {
5813 			error = sbwait(&so->so_rcv);
5814 			if (error) {
5815 				goto out;
5816 			}
5817 			held_length = 0;
5818 			goto restart_nosblocks;
5819 		} else {
5820 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5821 			error = EWOULDBLOCK;
5822 			goto out;
5823 		}
5824 	}
5825 	if (hold_sblock == 1) {
5826 		SOCKBUF_UNLOCK(&so->so_rcv);
5827 		hold_sblock = 0;
5828 	}
5829 	/* we possibly have data we can read */
5830 	/* sa_ignore FREED_MEMORY */
5831 	control = TAILQ_FIRST(&inp->read_queue);
5832 	if (control == NULL) {
5833 		/*
5834 		 * This could be happening since the appender did the
5835 		 * increment but as not yet did the tailq insert onto the
5836 		 * read_queue
5837 		 */
5838 		if (hold_rlock == 0) {
5839 			SCTP_INP_READ_LOCK(inp);
5840 		}
5841 		control = TAILQ_FIRST(&inp->read_queue);
5842 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5843 #ifdef INVARIANTS
5844 			panic("Huh, its non zero and nothing on control?");
5845 #endif
5846 			so->so_rcv.sb_cc = 0;
5847 		}
5848 		SCTP_INP_READ_UNLOCK(inp);
5849 		hold_rlock = 0;
5850 		goto restart;
5851 	}
5852 
5853 	if ((control->length == 0) &&
5854 	    (control->do_not_ref_stcb)) {
5855 		/*
5856 		 * Clean up code for freeing assoc that left behind a
5857 		 * pdapi.. maybe a peer in EEOR that just closed after
5858 		 * sending and never indicated a EOR.
5859 		 */
5860 		if (hold_rlock == 0) {
5861 			hold_rlock = 1;
5862 			SCTP_INP_READ_LOCK(inp);
5863 		}
5864 		control->held_length = 0;
5865 		if (control->data) {
5866 			/* Hmm there is data here .. fix */
5867 			struct mbuf *m_tmp;
5868 			int cnt = 0;
5869 
5870 			m_tmp = control->data;
5871 			while (m_tmp) {
5872 				cnt += SCTP_BUF_LEN(m_tmp);
5873 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5874 					control->tail_mbuf = m_tmp;
5875 					control->end_added = 1;
5876 				}
5877 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5878 			}
5879 			control->length = cnt;
5880 		} else {
5881 			/* remove it */
5882 			TAILQ_REMOVE(&inp->read_queue, control, next);
5883 			/* Add back any hiddend data */
5884 			sctp_free_remote_addr(control->whoFrom);
5885 			sctp_free_a_readq(stcb, control);
5886 		}
5887 		if (hold_rlock) {
5888 			hold_rlock = 0;
5889 			SCTP_INP_READ_UNLOCK(inp);
5890 		}
5891 		goto restart;
5892 	}
5893 	if ((control->length == 0) &&
5894 	    (control->end_added == 1)) {
5895 		/*
5896 		 * Do we also need to check for (control->pdapi_aborted ==
5897 		 * 1)?
5898 		 */
5899 		if (hold_rlock == 0) {
5900 			hold_rlock = 1;
5901 			SCTP_INP_READ_LOCK(inp);
5902 		}
5903 		TAILQ_REMOVE(&inp->read_queue, control, next);
5904 		if (control->data) {
5905 #ifdef INVARIANTS
5906 			panic("control->data not null but control->length == 0");
5907 #else
5908 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5909 			sctp_m_freem(control->data);
5910 			control->data = NULL;
5911 #endif
5912 		}
5913 		if (control->aux_data) {
5914 			sctp_m_free(control->aux_data);
5915 			control->aux_data = NULL;
5916 		}
5917 #ifdef INVARIANTS
5918 		if (control->on_strm_q) {
5919 			panic("About to free ctl:%p so:%p and its in %d",
5920 			    control, so, control->on_strm_q);
5921 		}
5922 #endif
5923 		sctp_free_remote_addr(control->whoFrom);
5924 		sctp_free_a_readq(stcb, control);
5925 		if (hold_rlock) {
5926 			hold_rlock = 0;
5927 			SCTP_INP_READ_UNLOCK(inp);
5928 		}
5929 		goto restart;
5930 	}
5931 	if (control->length == 0) {
5932 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5933 		    (filling_sinfo)) {
5934 			/* find a more suitable one then this */
5935 			ctl = TAILQ_NEXT(control, next);
5936 			while (ctl) {
5937 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5938 				    (ctl->some_taken ||
5939 				    (ctl->spec_flags & M_NOTIFICATION) ||
5940 				    ((ctl->do_not_ref_stcb == 0) &&
5941 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5942 				    ) {
5943 					/*-
5944 					 * If we have a different TCB next, and there is data
5945 					 * present. If we have already taken some (pdapi), OR we can
5946 					 * ref the tcb and no delivery as started on this stream, we
5947 					 * take it. Note we allow a notification on a different
5948 					 * assoc to be delivered..
5949 					 */
5950 					control = ctl;
5951 					goto found_one;
5952 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5953 					    (ctl->length) &&
5954 					    ((ctl->some_taken) ||
5955 					    ((ctl->do_not_ref_stcb == 0) &&
5956 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5957 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5958 					/*-
5959 					 * If we have the same tcb, and there is data present, and we
5960 					 * have the strm interleave feature present. Then if we have
5961 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5962 					 * not started a delivery for this stream, we can take it.
5963 					 * Note we do NOT allow a notificaiton on the same assoc to
5964 					 * be delivered.
5965 					 */
5966 					control = ctl;
5967 					goto found_one;
5968 				}
5969 				ctl = TAILQ_NEXT(ctl, next);
5970 			}
5971 		}
5972 		/*
5973 		 * if we reach here, not suitable replacement is available
5974 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5975 		 * into the our held count, and its time to sleep again.
5976 		 */
5977 		held_length = so->so_rcv.sb_cc;
5978 		control->held_length = so->so_rcv.sb_cc;
5979 		goto restart;
5980 	}
5981 	/* Clear the held length since there is something to read */
5982 	control->held_length = 0;
5983 found_one:
5984 	/*
5985 	 * If we reach here, control has a some data for us to read off.
5986 	 * Note that stcb COULD be NULL.
5987 	 */
5988 	if (hold_rlock == 0) {
5989 		hold_rlock = 1;
5990 		SCTP_INP_READ_LOCK(inp);
5991 	}
5992 	control->some_taken++;
5993 	stcb = control->stcb;
5994 	if (stcb) {
5995 		if ((control->do_not_ref_stcb == 0) &&
5996 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5997 			if (freecnt_applied == 0)
5998 				stcb = NULL;
5999 		} else if (control->do_not_ref_stcb == 0) {
6000 			/* you can't free it on me please */
6001 			/*
6002 			 * The lock on the socket buffer protects us so the
6003 			 * free code will stop. But since we used the
6004 			 * socketbuf lock and the sender uses the tcb_lock
6005 			 * to increment, we need to use the atomic add to
6006 			 * the refcnt
6007 			 */
6008 			if (freecnt_applied) {
6009 #ifdef INVARIANTS
6010 				panic("refcnt already incremented");
6011 #else
6012 				SCTP_PRINTF("refcnt already incremented?\n");
6013 #endif
6014 			} else {
6015 				atomic_add_int(&stcb->asoc.refcnt, 1);
6016 				freecnt_applied = 1;
6017 			}
6018 			/*
6019 			 * Setup to remember how much we have not yet told
6020 			 * the peer our rwnd has opened up. Note we grab the
6021 			 * value from the tcb from last time. Note too that
6022 			 * sack sending clears this when a sack is sent,
6023 			 * which is fine. Once we hit the rwnd_req, we then
6024 			 * will go to the sctp_user_rcvd() that will not
6025 			 * lock until it KNOWs it MUST send a WUP-SACK.
6026 			 */
6027 			freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast;
6028 			stcb->freed_by_sorcv_sincelast = 0;
6029 		}
6030 	}
6031 	if (stcb &&
6032 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
6033 	    control->do_not_ref_stcb == 0) {
6034 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
6035 	}
6036 
6037 	/* First lets get off the sinfo and sockaddr info */
6038 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
6039 		sinfo->sinfo_stream = control->sinfo_stream;
6040 		sinfo->sinfo_ssn = (uint16_t)control->mid;
6041 		sinfo->sinfo_flags = control->sinfo_flags;
6042 		sinfo->sinfo_ppid = control->sinfo_ppid;
6043 		sinfo->sinfo_context = control->sinfo_context;
6044 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
6045 		sinfo->sinfo_tsn = control->sinfo_tsn;
6046 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
6047 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
6048 		nxt = TAILQ_NEXT(control, next);
6049 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6050 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
6051 			struct sctp_extrcvinfo *s_extra;
6052 
6053 			s_extra = (struct sctp_extrcvinfo *)sinfo;
6054 			if ((nxt) &&
6055 			    (nxt->length)) {
6056 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
6057 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
6058 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
6059 				}
6060 				if (nxt->spec_flags & M_NOTIFICATION) {
6061 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
6062 				}
6063 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
6064 				s_extra->serinfo_next_length = nxt->length;
6065 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
6066 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
6067 				if (nxt->tail_mbuf != NULL) {
6068 					if (nxt->end_added) {
6069 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
6070 					}
6071 				}
6072 			} else {
6073 				/*
6074 				 * we explicitly 0 this, since the memcpy
6075 				 * got some other things beyond the older
6076 				 * sinfo_ that is on the control's structure
6077 				 * :-D
6078 				 */
6079 				nxt = NULL;
6080 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6081 				s_extra->serinfo_next_aid = 0;
6082 				s_extra->serinfo_next_length = 0;
6083 				s_extra->serinfo_next_ppid = 0;
6084 				s_extra->serinfo_next_stream = 0;
6085 			}
6086 		}
6087 		/*
6088 		 * update off the real current cum-ack, if we have an stcb.
6089 		 */
6090 		if ((control->do_not_ref_stcb == 0) && stcb)
6091 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
6092 		/*
6093 		 * mask off the high bits, we keep the actual chunk bits in
6094 		 * there.
6095 		 */
6096 		sinfo->sinfo_flags &= 0x00ff;
6097 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
6098 			sinfo->sinfo_flags |= SCTP_UNORDERED;
6099 		}
6100 	}
6101 #ifdef SCTP_ASOCLOG_OF_TSNS
6102 	{
6103 		int index, newindex;
6104 		struct sctp_pcbtsn_rlog *entry;
6105 
6106 		do {
6107 			index = inp->readlog_index;
6108 			newindex = index + 1;
6109 			if (newindex >= SCTP_READ_LOG_SIZE) {
6110 				newindex = 0;
6111 			}
6112 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
6113 		entry = &inp->readlog[index];
6114 		entry->vtag = control->sinfo_assoc_id;
6115 		entry->strm = control->sinfo_stream;
6116 		entry->seq = (uint16_t)control->mid;
6117 		entry->sz = control->length;
6118 		entry->flgs = control->sinfo_flags;
6119 	}
6120 #endif
6121 	if ((fromlen > 0) && (from != NULL)) {
6122 		union sctp_sockstore store;
6123 		size_t len;
6124 
6125 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
6126 #ifdef INET6
6127 		case AF_INET6:
6128 			len = sizeof(struct sockaddr_in6);
6129 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
6130 			store.sin6.sin6_port = control->port_from;
6131 			break;
6132 #endif
6133 #ifdef INET
6134 		case AF_INET:
6135 #ifdef INET6
6136 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
6137 				len = sizeof(struct sockaddr_in6);
6138 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
6139 				    &store.sin6);
6140 				store.sin6.sin6_port = control->port_from;
6141 			} else {
6142 				len = sizeof(struct sockaddr_in);
6143 				store.sin = control->whoFrom->ro._l_addr.sin;
6144 				store.sin.sin_port = control->port_from;
6145 			}
6146 #else
6147 			len = sizeof(struct sockaddr_in);
6148 			store.sin = control->whoFrom->ro._l_addr.sin;
6149 			store.sin.sin_port = control->port_from;
6150 #endif
6151 			break;
6152 #endif
6153 		default:
6154 			len = 0;
6155 			break;
6156 		}
6157 		memcpy(from, &store, min((size_t)fromlen, len));
6158 #ifdef INET6
6159 		{
6160 			struct sockaddr_in6 lsa6, *from6;
6161 
6162 			from6 = (struct sockaddr_in6 *)from;
6163 			sctp_recover_scope_mac(from6, (&lsa6));
6164 		}
6165 #endif
6166 	}
6167 	if (hold_rlock) {
6168 		SCTP_INP_READ_UNLOCK(inp);
6169 		hold_rlock = 0;
6170 	}
6171 	if (hold_sblock) {
6172 		SOCKBUF_UNLOCK(&so->so_rcv);
6173 		hold_sblock = 0;
6174 	}
6175 	/* now copy out what data we can */
6176 	if (mp == NULL) {
6177 		/* copy out each mbuf in the chain up to length */
6178 get_more_data:
6179 		m = control->data;
6180 		while (m) {
6181 			/* Move out all we can */
6182 			cp_len = uio->uio_resid;
6183 			my_len = SCTP_BUF_LEN(m);
6184 			if (cp_len > my_len) {
6185 				/* not enough in this buf */
6186 				cp_len = my_len;
6187 			}
6188 			if (hold_rlock) {
6189 				SCTP_INP_READ_UNLOCK(inp);
6190 				hold_rlock = 0;
6191 			}
6192 			if (cp_len > 0)
6193 				error = uiomove(mtod(m, char *), (int)cp_len, uio);
6194 			/* re-read */
6195 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
6196 				goto release;
6197 			}
6198 
6199 			if ((control->do_not_ref_stcb == 0) && stcb &&
6200 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
6201 				no_rcv_needed = 1;
6202 			}
6203 			if (error) {
6204 				/* error we are out of here */
6205 				goto release;
6206 			}
6207 			SCTP_INP_READ_LOCK(inp);
6208 			hold_rlock = 1;
6209 			if (cp_len == SCTP_BUF_LEN(m)) {
6210 				if ((SCTP_BUF_NEXT(m) == NULL) &&
6211 				    (control->end_added)) {
6212 					out_flags |= MSG_EOR;
6213 					if ((control->do_not_ref_stcb == 0) &&
6214 					    (control->stcb != NULL) &&
6215 					    ((control->spec_flags & M_NOTIFICATION) == 0))
6216 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6217 				}
6218 				if (control->spec_flags & M_NOTIFICATION) {
6219 					out_flags |= MSG_NOTIFICATION;
6220 				}
6221 				/* we ate up the mbuf */
6222 				if (in_flags & MSG_PEEK) {
6223 					/* just looking */
6224 					m = SCTP_BUF_NEXT(m);
6225 					copied_so_far += cp_len;
6226 				} else {
6227 					/* dispose of the mbuf */
6228 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6229 						sctp_sblog(&so->so_rcv,
6230 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6231 					}
6232 					sctp_sbfree(control, stcb, &so->so_rcv, m);
6233 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6234 						sctp_sblog(&so->so_rcv,
6235 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6236 					}
6237 					copied_so_far += cp_len;
6238 					freed_so_far += (uint32_t)cp_len;
6239 					freed_so_far += MSIZE;
6240 					atomic_subtract_int(&control->length, cp_len);
6241 					control->data = sctp_m_free(m);
6242 					m = control->data;
6243 					/*
6244 					 * been through it all, must hold sb
6245 					 * lock ok to null tail
6246 					 */
6247 					if (control->data == NULL) {
6248 #ifdef INVARIANTS
6249 						if ((control->end_added == 0) ||
6250 						    (TAILQ_NEXT(control, next) == NULL)) {
6251 							/*
6252 							 * If the end is not
6253 							 * added, OR the
6254 							 * next is NOT null
6255 							 * we MUST have the
6256 							 * lock.
6257 							 */
6258 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
6259 								panic("Hmm we don't own the lock?");
6260 							}
6261 						}
6262 #endif
6263 						control->tail_mbuf = NULL;
6264 #ifdef INVARIANTS
6265 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
6266 							panic("end_added, nothing left and no MSG_EOR");
6267 						}
6268 #endif
6269 					}
6270 				}
6271 			} else {
6272 				/* Do we need to trim the mbuf? */
6273 				if (control->spec_flags & M_NOTIFICATION) {
6274 					out_flags |= MSG_NOTIFICATION;
6275 				}
6276 				if ((in_flags & MSG_PEEK) == 0) {
6277 					SCTP_BUF_RESV_UF(m, cp_len);
6278 					SCTP_BUF_LEN(m) -= (int)cp_len;
6279 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6280 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len);
6281 					}
6282 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
6283 					if ((control->do_not_ref_stcb == 0) &&
6284 					    stcb) {
6285 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
6286 					}
6287 					copied_so_far += cp_len;
6288 					freed_so_far += (uint32_t)cp_len;
6289 					freed_so_far += MSIZE;
6290 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6291 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
6292 						    SCTP_LOG_SBRESULT, 0);
6293 					}
6294 					atomic_subtract_int(&control->length, cp_len);
6295 				} else {
6296 					copied_so_far += cp_len;
6297 				}
6298 			}
6299 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
6300 				break;
6301 			}
6302 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6303 			    (control->do_not_ref_stcb == 0) &&
6304 			    (freed_so_far >= rwnd_req)) {
6305 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6306 			}
6307 		}		/* end while(m) */
6308 		/*
6309 		 * At this point we have looked at it all and we either have
6310 		 * a MSG_EOR/or read all the user wants... <OR>
6311 		 * control->length == 0.
6312 		 */
6313 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
6314 			/* we are done with this control */
6315 			if (control->length == 0) {
6316 				if (control->data) {
6317 #ifdef INVARIANTS
6318 					panic("control->data not null at read eor?");
6319 #else
6320 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
6321 					sctp_m_freem(control->data);
6322 					control->data = NULL;
6323 #endif
6324 				}
6325 		done_with_control:
6326 				if (hold_rlock == 0) {
6327 					SCTP_INP_READ_LOCK(inp);
6328 					hold_rlock = 1;
6329 				}
6330 				TAILQ_REMOVE(&inp->read_queue, control, next);
6331 				/* Add back any hiddend data */
6332 				if (control->held_length) {
6333 					held_length = 0;
6334 					control->held_length = 0;
6335 					wakeup_read_socket = 1;
6336 				}
6337 				if (control->aux_data) {
6338 					sctp_m_free(control->aux_data);
6339 					control->aux_data = NULL;
6340 				}
6341 				no_rcv_needed = control->do_not_ref_stcb;
6342 				sctp_free_remote_addr(control->whoFrom);
6343 				control->data = NULL;
6344 #ifdef INVARIANTS
6345 				if (control->on_strm_q) {
6346 					panic("About to free ctl:%p so:%p and its in %d",
6347 					    control, so, control->on_strm_q);
6348 				}
6349 #endif
6350 				sctp_free_a_readq(stcb, control);
6351 				control = NULL;
6352 				if ((freed_so_far >= rwnd_req) &&
6353 				    (no_rcv_needed == 0))
6354 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6355 
6356 			} else {
6357 				/*
6358 				 * The user did not read all of this
6359 				 * message, turn off the returned MSG_EOR
6360 				 * since we are leaving more behind on the
6361 				 * control to read.
6362 				 */
6363 #ifdef INVARIANTS
6364 				if (control->end_added &&
6365 				    (control->data == NULL) &&
6366 				    (control->tail_mbuf == NULL)) {
6367 					panic("Gak, control->length is corrupt?");
6368 				}
6369 #endif
6370 				no_rcv_needed = control->do_not_ref_stcb;
6371 				out_flags &= ~MSG_EOR;
6372 			}
6373 		}
6374 		if (out_flags & MSG_EOR) {
6375 			goto release;
6376 		}
6377 		if ((uio->uio_resid == 0) ||
6378 		    ((in_eeor_mode) &&
6379 		    (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) {
6380 			goto release;
6381 		}
6382 		/*
6383 		 * If I hit here the receiver wants more and this message is
6384 		 * NOT done (pd-api). So two questions. Can we block? if not
6385 		 * we are done. Did the user NOT set MSG_WAITALL?
6386 		 */
6387 		if (block_allowed == 0) {
6388 			goto release;
6389 		}
6390 		/*
6391 		 * We need to wait for more data a few things: - We don't
6392 		 * sbunlock() so we don't get someone else reading. - We
6393 		 * must be sure to account for the case where what is added
6394 		 * is NOT to our control when we wakeup.
6395 		 */
6396 
6397 		/*
6398 		 * Do we need to tell the transport a rwnd update might be
6399 		 * needed before we go to sleep?
6400 		 */
6401 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6402 		    ((freed_so_far >= rwnd_req) &&
6403 		    (control->do_not_ref_stcb == 0) &&
6404 		    (no_rcv_needed == 0))) {
6405 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6406 		}
6407 wait_some_more:
6408 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6409 			goto release;
6410 		}
6411 
6412 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6413 			goto release;
6414 
6415 		if (hold_rlock == 1) {
6416 			SCTP_INP_READ_UNLOCK(inp);
6417 			hold_rlock = 0;
6418 		}
6419 		if (hold_sblock == 0) {
6420 			SOCKBUF_LOCK(&so->so_rcv);
6421 			hold_sblock = 1;
6422 		}
6423 		if ((copied_so_far) && (control->length == 0) &&
6424 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6425 			goto release;
6426 		}
6427 		if (so->so_rcv.sb_cc <= control->held_length) {
6428 			error = sbwait(&so->so_rcv);
6429 			if (error) {
6430 				goto release;
6431 			}
6432 			control->held_length = 0;
6433 		}
6434 		if (hold_sblock) {
6435 			SOCKBUF_UNLOCK(&so->so_rcv);
6436 			hold_sblock = 0;
6437 		}
6438 		if (control->length == 0) {
6439 			/* still nothing here */
6440 			if (control->end_added == 1) {
6441 				/* he aborted, or is done i.e.did a shutdown */
6442 				out_flags |= MSG_EOR;
6443 				if (control->pdapi_aborted) {
6444 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6445 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6446 
6447 					out_flags |= MSG_TRUNC;
6448 				} else {
6449 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6450 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6451 				}
6452 				goto done_with_control;
6453 			}
6454 			if (so->so_rcv.sb_cc > held_length) {
6455 				control->held_length = so->so_rcv.sb_cc;
6456 				held_length = 0;
6457 			}
6458 			goto wait_some_more;
6459 		} else if (control->data == NULL) {
6460 			/*
6461 			 * we must re-sync since data is probably being
6462 			 * added
6463 			 */
6464 			SCTP_INP_READ_LOCK(inp);
6465 			if ((control->length > 0) && (control->data == NULL)) {
6466 				/*
6467 				 * big trouble.. we have the lock and its
6468 				 * corrupt?
6469 				 */
6470 #ifdef INVARIANTS
6471 				panic("Impossible data==NULL length !=0");
6472 #endif
6473 				out_flags |= MSG_EOR;
6474 				out_flags |= MSG_TRUNC;
6475 				control->length = 0;
6476 				SCTP_INP_READ_UNLOCK(inp);
6477 				goto done_with_control;
6478 			}
6479 			SCTP_INP_READ_UNLOCK(inp);
6480 			/* We will fall around to get more data */
6481 		}
6482 		goto get_more_data;
6483 	} else {
6484 		/*-
6485 		 * Give caller back the mbuf chain,
6486 		 * store in uio_resid the length
6487 		 */
6488 		wakeup_read_socket = 0;
6489 		if ((control->end_added == 0) ||
6490 		    (TAILQ_NEXT(control, next) == NULL)) {
6491 			/* Need to get rlock */
6492 			if (hold_rlock == 0) {
6493 				SCTP_INP_READ_LOCK(inp);
6494 				hold_rlock = 1;
6495 			}
6496 		}
6497 		if (control->end_added) {
6498 			out_flags |= MSG_EOR;
6499 			if ((control->do_not_ref_stcb == 0) &&
6500 			    (control->stcb != NULL) &&
6501 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6502 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6503 		}
6504 		if (control->spec_flags & M_NOTIFICATION) {
6505 			out_flags |= MSG_NOTIFICATION;
6506 		}
6507 		uio->uio_resid = control->length;
6508 		*mp = control->data;
6509 		m = control->data;
6510 		while (m) {
6511 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6512 				sctp_sblog(&so->so_rcv,
6513 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6514 			}
6515 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6516 			freed_so_far += (uint32_t)SCTP_BUF_LEN(m);
6517 			freed_so_far += MSIZE;
6518 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6519 				sctp_sblog(&so->so_rcv,
6520 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6521 			}
6522 			m = SCTP_BUF_NEXT(m);
6523 		}
6524 		control->data = control->tail_mbuf = NULL;
6525 		control->length = 0;
6526 		if (out_flags & MSG_EOR) {
6527 			/* Done with this control */
6528 			goto done_with_control;
6529 		}
6530 	}
6531 release:
6532 	if (hold_rlock == 1) {
6533 		SCTP_INP_READ_UNLOCK(inp);
6534 		hold_rlock = 0;
6535 	}
6536 	if (hold_sblock == 1) {
6537 		SOCKBUF_UNLOCK(&so->so_rcv);
6538 		hold_sblock = 0;
6539 	}
6540 
6541 	sbunlock(&so->so_rcv);
6542 	sockbuf_lock = 0;
6543 
6544 release_unlocked:
6545 	if (hold_sblock) {
6546 		SOCKBUF_UNLOCK(&so->so_rcv);
6547 		hold_sblock = 0;
6548 	}
6549 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6550 		if ((freed_so_far >= rwnd_req) &&
6551 		    (control && (control->do_not_ref_stcb == 0)) &&
6552 		    (no_rcv_needed == 0))
6553 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6554 	}
6555 out:
6556 	if (msg_flags) {
6557 		*msg_flags = out_flags;
6558 	}
6559 	if (((out_flags & MSG_EOR) == 0) &&
6560 	    ((in_flags & MSG_PEEK) == 0) &&
6561 	    (sinfo) &&
6562 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6563 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6564 		struct sctp_extrcvinfo *s_extra;
6565 
6566 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6567 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6568 	}
6569 	if (hold_rlock == 1) {
6570 		SCTP_INP_READ_UNLOCK(inp);
6571 	}
6572 	if (hold_sblock) {
6573 		SOCKBUF_UNLOCK(&so->so_rcv);
6574 	}
6575 	if (sockbuf_lock) {
6576 		sbunlock(&so->so_rcv);
6577 	}
6578 
6579 	if (freecnt_applied) {
6580 		/*
6581 		 * The lock on the socket buffer protects us so the free
6582 		 * code will stop. But since we used the socketbuf lock and
6583 		 * the sender uses the tcb_lock to increment, we need to use
6584 		 * the atomic add to the refcnt.
6585 		 */
6586 		if (stcb == NULL) {
6587 #ifdef INVARIANTS
6588 			panic("stcb for refcnt has gone NULL?");
6589 			goto stage_left;
6590 #else
6591 			goto stage_left;
6592 #endif
6593 		}
6594 		/* Save the value back for next time */
6595 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6596 		atomic_add_int(&stcb->asoc.refcnt, -1);
6597 	}
6598 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6599 		if (stcb) {
6600 			sctp_misc_ints(SCTP_SORECV_DONE,
6601 			    freed_so_far,
6602 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6603 			    stcb->asoc.my_rwnd,
6604 			    so->so_rcv.sb_cc);
6605 		} else {
6606 			sctp_misc_ints(SCTP_SORECV_DONE,
6607 			    freed_so_far,
6608 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6609 			    0,
6610 			    so->so_rcv.sb_cc);
6611 		}
6612 	}
6613 stage_left:
6614 	if (wakeup_read_socket) {
6615 		sctp_sorwakeup(inp, so);
6616 	}
6617 	return (error);
6618 }
6619 
6620 
6621 #ifdef SCTP_MBUF_LOGGING
6622 struct mbuf *
6623 sctp_m_free(struct mbuf *m)
6624 {
6625 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6626 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6627 	}
6628 	return (m_free(m));
6629 }
6630 
6631 void
6632 sctp_m_freem(struct mbuf *mb)
6633 {
6634 	while (mb != NULL)
6635 		mb = sctp_m_free(mb);
6636 }
6637 
6638 #endif
6639 
6640 int
6641 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6642 {
6643 	/*
6644 	 * Given a local address. For all associations that holds the
6645 	 * address, request a peer-set-primary.
6646 	 */
6647 	struct sctp_ifa *ifa;
6648 	struct sctp_laddr *wi;
6649 
6650 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6651 	if (ifa == NULL) {
6652 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6653 		return (EADDRNOTAVAIL);
6654 	}
6655 	/*
6656 	 * Now that we have the ifa we must awaken the iterator with this
6657 	 * message.
6658 	 */
6659 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6660 	if (wi == NULL) {
6661 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6662 		return (ENOMEM);
6663 	}
6664 	/* Now incr the count and int wi structure */
6665 	SCTP_INCR_LADDR_COUNT();
6666 	memset(wi, 0, sizeof(*wi));
6667 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6668 	wi->ifa = ifa;
6669 	wi->action = SCTP_SET_PRIM_ADDR;
6670 	atomic_add_int(&ifa->refcount, 1);
6671 
6672 	/* Now add it to the work queue */
6673 	SCTP_WQ_ADDR_LOCK();
6674 	/*
6675 	 * Should this really be a tailq? As it is we will process the
6676 	 * newest first :-0
6677 	 */
6678 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6679 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6680 	    (struct sctp_inpcb *)NULL,
6681 	    (struct sctp_tcb *)NULL,
6682 	    (struct sctp_nets *)NULL);
6683 	SCTP_WQ_ADDR_UNLOCK();
6684 	return (0);
6685 }
6686 
6687 
6688 int
6689 sctp_soreceive(struct socket *so,
6690     struct sockaddr **psa,
6691     struct uio *uio,
6692     struct mbuf **mp0,
6693     struct mbuf **controlp,
6694     int *flagsp)
6695 {
6696 	int error, fromlen;
6697 	uint8_t sockbuf[256];
6698 	struct sockaddr *from;
6699 	struct sctp_extrcvinfo sinfo;
6700 	int filling_sinfo = 1;
6701 	int flags;
6702 	struct sctp_inpcb *inp;
6703 
6704 	inp = (struct sctp_inpcb *)so->so_pcb;
6705 	/* pickup the assoc we are reading from */
6706 	if (inp == NULL) {
6707 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6708 		return (EINVAL);
6709 	}
6710 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6711 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6712 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6713 	    (controlp == NULL)) {
6714 		/* user does not want the sndrcv ctl */
6715 		filling_sinfo = 0;
6716 	}
6717 	if (psa) {
6718 		from = (struct sockaddr *)sockbuf;
6719 		fromlen = sizeof(sockbuf);
6720 		from->sa_len = 0;
6721 	} else {
6722 		from = NULL;
6723 		fromlen = 0;
6724 	}
6725 
6726 	if (filling_sinfo) {
6727 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6728 	}
6729 	if (flagsp != NULL) {
6730 		flags = *flagsp;
6731 	} else {
6732 		flags = 0;
6733 	}
6734 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags,
6735 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6736 	if (flagsp != NULL) {
6737 		*flagsp = flags;
6738 	}
6739 	if (controlp != NULL) {
6740 		/* copy back the sinfo in a CMSG format */
6741 		if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) {
6742 			*controlp = sctp_build_ctl_nchunk(inp,
6743 			    (struct sctp_sndrcvinfo *)&sinfo);
6744 		} else {
6745 			*controlp = NULL;
6746 		}
6747 	}
6748 	if (psa) {
6749 		/* copy back the address info */
6750 		if (from && from->sa_len) {
6751 			*psa = sodupsockaddr(from, M_NOWAIT);
6752 		} else {
6753 			*psa = NULL;
6754 		}
6755 	}
6756 	return (error);
6757 }
6758 
6759 
6760 
6761 
6762 
6763 int
6764 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6765     int totaddr, int *error)
6766 {
6767 	int added = 0;
6768 	int i;
6769 	struct sctp_inpcb *inp;
6770 	struct sockaddr *sa;
6771 	size_t incr = 0;
6772 #ifdef INET
6773 	struct sockaddr_in *sin;
6774 #endif
6775 #ifdef INET6
6776 	struct sockaddr_in6 *sin6;
6777 #endif
6778 
6779 	sa = addr;
6780 	inp = stcb->sctp_ep;
6781 	*error = 0;
6782 	for (i = 0; i < totaddr; i++) {
6783 		switch (sa->sa_family) {
6784 #ifdef INET
6785 		case AF_INET:
6786 			incr = sizeof(struct sockaddr_in);
6787 			sin = (struct sockaddr_in *)sa;
6788 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6789 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6790 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6791 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6792 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6793 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6794 				*error = EINVAL;
6795 				goto out_now;
6796 			}
6797 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6798 			    SCTP_DONOT_SETSCOPE,
6799 			    SCTP_ADDR_IS_CONFIRMED)) {
6800 				/* assoc gone no un-lock */
6801 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6802 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6803 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6804 				*error = ENOBUFS;
6805 				goto out_now;
6806 			}
6807 			added++;
6808 			break;
6809 #endif
6810 #ifdef INET6
6811 		case AF_INET6:
6812 			incr = sizeof(struct sockaddr_in6);
6813 			sin6 = (struct sockaddr_in6 *)sa;
6814 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6815 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6816 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6817 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6818 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6819 				*error = EINVAL;
6820 				goto out_now;
6821 			}
6822 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6823 			    SCTP_DONOT_SETSCOPE,
6824 			    SCTP_ADDR_IS_CONFIRMED)) {
6825 				/* assoc gone no un-lock */
6826 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6827 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6828 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6829 				*error = ENOBUFS;
6830 				goto out_now;
6831 			}
6832 			added++;
6833 			break;
6834 #endif
6835 		default:
6836 			break;
6837 		}
6838 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6839 	}
6840 out_now:
6841 	return (added);
6842 }
6843 
6844 int
6845 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6846     unsigned int totaddr,
6847     unsigned int *num_v4, unsigned int *num_v6,
6848     unsigned int limit)
6849 {
6850 	struct sockaddr *sa;
6851 	struct sctp_tcb *stcb;
6852 	unsigned int incr, at, i;
6853 
6854 	at = 0;
6855 	sa = addr;
6856 	*num_v6 = *num_v4 = 0;
6857 	/* account and validate addresses */
6858 	if (totaddr == 0) {
6859 		return (EINVAL);
6860 	}
6861 	for (i = 0; i < totaddr; i++) {
6862 		if (at + sizeof(struct sockaddr) > limit) {
6863 			return (EINVAL);
6864 		}
6865 		switch (sa->sa_family) {
6866 #ifdef INET
6867 		case AF_INET:
6868 			incr = (unsigned int)sizeof(struct sockaddr_in);
6869 			if (sa->sa_len != incr) {
6870 				return (EINVAL);
6871 			}
6872 			(*num_v4) += 1;
6873 			break;
6874 #endif
6875 #ifdef INET6
6876 		case AF_INET6:
6877 			{
6878 				struct sockaddr_in6 *sin6;
6879 
6880 				sin6 = (struct sockaddr_in6 *)sa;
6881 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6882 					/* Must be non-mapped for connectx */
6883 					return (EINVAL);
6884 				}
6885 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6886 				if (sa->sa_len != incr) {
6887 					return (EINVAL);
6888 				}
6889 				(*num_v6) += 1;
6890 				break;
6891 			}
6892 #endif
6893 		default:
6894 			return (EINVAL);
6895 		}
6896 		if ((at + incr) > limit) {
6897 			return (EINVAL);
6898 		}
6899 		SCTP_INP_INCR_REF(inp);
6900 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6901 		if (stcb != NULL) {
6902 			SCTP_TCB_UNLOCK(stcb);
6903 			return (EALREADY);
6904 		} else {
6905 			SCTP_INP_DECR_REF(inp);
6906 		}
6907 		at += incr;
6908 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6909 	}
6910 	return (0);
6911 }
6912 
6913 /*
6914  * sctp_bindx(ADD) for one address.
6915  * assumes all arguments are valid/checked by caller.
6916  */
6917 void
6918 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6919     struct sockaddr *sa, sctp_assoc_t assoc_id,
6920     uint32_t vrf_id, int *error, void *p)
6921 {
6922 	struct sockaddr *addr_touse;
6923 #if defined(INET) && defined(INET6)
6924 	struct sockaddr_in sin;
6925 #endif
6926 
6927 	/* see if we're bound all already! */
6928 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6929 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6930 		*error = EINVAL;
6931 		return;
6932 	}
6933 	addr_touse = sa;
6934 #ifdef INET6
6935 	if (sa->sa_family == AF_INET6) {
6936 #ifdef INET
6937 		struct sockaddr_in6 *sin6;
6938 
6939 #endif
6940 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6941 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6942 			*error = EINVAL;
6943 			return;
6944 		}
6945 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6946 			/* can only bind v6 on PF_INET6 sockets */
6947 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6948 			*error = EINVAL;
6949 			return;
6950 		}
6951 #ifdef INET
6952 		sin6 = (struct sockaddr_in6 *)addr_touse;
6953 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6954 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6955 			    SCTP_IPV6_V6ONLY(inp)) {
6956 				/* can't bind v4-mapped on PF_INET sockets */
6957 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6958 				*error = EINVAL;
6959 				return;
6960 			}
6961 			in6_sin6_2_sin(&sin, sin6);
6962 			addr_touse = (struct sockaddr *)&sin;
6963 		}
6964 #endif
6965 	}
6966 #endif
6967 #ifdef INET
6968 	if (sa->sa_family == AF_INET) {
6969 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6970 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6971 			*error = EINVAL;
6972 			return;
6973 		}
6974 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6975 		    SCTP_IPV6_V6ONLY(inp)) {
6976 			/* can't bind v4 on PF_INET sockets */
6977 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6978 			*error = EINVAL;
6979 			return;
6980 		}
6981 	}
6982 #endif
6983 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6984 		if (p == NULL) {
6985 			/* Can't get proc for Net/Open BSD */
6986 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6987 			*error = EINVAL;
6988 			return;
6989 		}
6990 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6991 		return;
6992 	}
6993 	/*
6994 	 * No locks required here since bind and mgmt_ep_sa all do their own
6995 	 * locking. If we do something for the FIX: below we may need to
6996 	 * lock in that case.
6997 	 */
6998 	if (assoc_id == 0) {
6999 		/* add the address */
7000 		struct sctp_inpcb *lep;
7001 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
7002 
7003 		/* validate the incoming port */
7004 		if ((lsin->sin_port != 0) &&
7005 		    (lsin->sin_port != inp->sctp_lport)) {
7006 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7007 			*error = EINVAL;
7008 			return;
7009 		} else {
7010 			/* user specified 0 port, set it to existing port */
7011 			lsin->sin_port = inp->sctp_lport;
7012 		}
7013 
7014 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
7015 		if (lep != NULL) {
7016 			/*
7017 			 * We must decrement the refcount since we have the
7018 			 * ep already and are binding. No remove going on
7019 			 * here.
7020 			 */
7021 			SCTP_INP_DECR_REF(lep);
7022 		}
7023 		if (lep == inp) {
7024 			/* already bound to it.. ok */
7025 			return;
7026 		} else if (lep == NULL) {
7027 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
7028 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
7029 			    SCTP_ADD_IP_ADDRESS,
7030 			    vrf_id, NULL);
7031 		} else {
7032 			*error = EADDRINUSE;
7033 		}
7034 		if (*error)
7035 			return;
7036 	} else {
7037 		/*
7038 		 * FIX: decide whether we allow assoc based bindx
7039 		 */
7040 	}
7041 }
7042 
7043 /*
7044  * sctp_bindx(DELETE) for one address.
7045  * assumes all arguments are valid/checked by caller.
7046  */
7047 void
7048 sctp_bindx_delete_address(struct sctp_inpcb *inp,
7049     struct sockaddr *sa, sctp_assoc_t assoc_id,
7050     uint32_t vrf_id, int *error)
7051 {
7052 	struct sockaddr *addr_touse;
7053 #if defined(INET) && defined(INET6)
7054 	struct sockaddr_in sin;
7055 #endif
7056 
7057 	/* see if we're bound all already! */
7058 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7059 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7060 		*error = EINVAL;
7061 		return;
7062 	}
7063 	addr_touse = sa;
7064 #ifdef INET6
7065 	if (sa->sa_family == AF_INET6) {
7066 #ifdef INET
7067 		struct sockaddr_in6 *sin6;
7068 #endif
7069 
7070 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
7071 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7072 			*error = EINVAL;
7073 			return;
7074 		}
7075 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
7076 			/* can only bind v6 on PF_INET6 sockets */
7077 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7078 			*error = EINVAL;
7079 			return;
7080 		}
7081 #ifdef INET
7082 		sin6 = (struct sockaddr_in6 *)addr_touse;
7083 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7084 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7085 			    SCTP_IPV6_V6ONLY(inp)) {
7086 				/* can't bind mapped-v4 on PF_INET sockets */
7087 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7088 				*error = EINVAL;
7089 				return;
7090 			}
7091 			in6_sin6_2_sin(&sin, sin6);
7092 			addr_touse = (struct sockaddr *)&sin;
7093 		}
7094 #endif
7095 	}
7096 #endif
7097 #ifdef INET
7098 	if (sa->sa_family == AF_INET) {
7099 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
7100 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7101 			*error = EINVAL;
7102 			return;
7103 		}
7104 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7105 		    SCTP_IPV6_V6ONLY(inp)) {
7106 			/* can't bind v4 on PF_INET sockets */
7107 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7108 			*error = EINVAL;
7109 			return;
7110 		}
7111 	}
7112 #endif
7113 	/*
7114 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
7115 	 * below is ever changed we may need to lock before calling
7116 	 * association level binding.
7117 	 */
7118 	if (assoc_id == 0) {
7119 		/* delete the address */
7120 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
7121 		    SCTP_DEL_IP_ADDRESS,
7122 		    vrf_id, NULL);
7123 	} else {
7124 		/*
7125 		 * FIX: decide whether we allow assoc based bindx
7126 		 */
7127 	}
7128 }
7129 
7130 /*
7131  * returns the valid local address count for an assoc, taking into account
7132  * all scoping rules
7133  */
7134 int
7135 sctp_local_addr_count(struct sctp_tcb *stcb)
7136 {
7137 	int loopback_scope;
7138 #if defined(INET)
7139 	int ipv4_local_scope, ipv4_addr_legal;
7140 #endif
7141 #if defined (INET6)
7142 	int local_scope, site_scope, ipv6_addr_legal;
7143 #endif
7144 	struct sctp_vrf *vrf;
7145 	struct sctp_ifn *sctp_ifn;
7146 	struct sctp_ifa *sctp_ifa;
7147 	int count = 0;
7148 
7149 	/* Turn on all the appropriate scopes */
7150 	loopback_scope = stcb->asoc.scope.loopback_scope;
7151 #if defined(INET)
7152 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
7153 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
7154 #endif
7155 #if defined(INET6)
7156 	local_scope = stcb->asoc.scope.local_scope;
7157 	site_scope = stcb->asoc.scope.site_scope;
7158 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
7159 #endif
7160 	SCTP_IPI_ADDR_RLOCK();
7161 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
7162 	if (vrf == NULL) {
7163 		/* no vrf, no addresses */
7164 		SCTP_IPI_ADDR_RUNLOCK();
7165 		return (0);
7166 	}
7167 
7168 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7169 		/*
7170 		 * bound all case: go through all ifns on the vrf
7171 		 */
7172 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
7173 			if ((loopback_scope == 0) &&
7174 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
7175 				continue;
7176 			}
7177 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
7178 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
7179 					continue;
7180 				switch (sctp_ifa->address.sa.sa_family) {
7181 #ifdef INET
7182 				case AF_INET:
7183 					if (ipv4_addr_legal) {
7184 						struct sockaddr_in *sin;
7185 
7186 						sin = &sctp_ifa->address.sin;
7187 						if (sin->sin_addr.s_addr == 0) {
7188 							/*
7189 							 * skip unspecified
7190 							 * addrs
7191 							 */
7192 							continue;
7193 						}
7194 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
7195 						    &sin->sin_addr) != 0) {
7196 							continue;
7197 						}
7198 						if ((ipv4_local_scope == 0) &&
7199 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
7200 							continue;
7201 						}
7202 						/* count this one */
7203 						count++;
7204 					} else {
7205 						continue;
7206 					}
7207 					break;
7208 #endif
7209 #ifdef INET6
7210 				case AF_INET6:
7211 					if (ipv6_addr_legal) {
7212 						struct sockaddr_in6 *sin6;
7213 
7214 						sin6 = &sctp_ifa->address.sin6;
7215 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
7216 							continue;
7217 						}
7218 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
7219 						    &sin6->sin6_addr) != 0) {
7220 							continue;
7221 						}
7222 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
7223 							if (local_scope == 0)
7224 								continue;
7225 							if (sin6->sin6_scope_id == 0) {
7226 								if (sa6_recoverscope(sin6) != 0)
7227 									/*
7228 									 *
7229 									 * bad
7230 									 * link
7231 									 *
7232 									 * local
7233 									 *
7234 									 * address
7235 									 */
7236 									continue;
7237 							}
7238 						}
7239 						if ((site_scope == 0) &&
7240 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
7241 							continue;
7242 						}
7243 						/* count this one */
7244 						count++;
7245 					}
7246 					break;
7247 #endif
7248 				default:
7249 					/* TSNH */
7250 					break;
7251 				}
7252 			}
7253 		}
7254 	} else {
7255 		/*
7256 		 * subset bound case
7257 		 */
7258 		struct sctp_laddr *laddr;
7259 
7260 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
7261 		    sctp_nxt_addr) {
7262 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
7263 				continue;
7264 			}
7265 			/* count this one */
7266 			count++;
7267 		}
7268 	}
7269 	SCTP_IPI_ADDR_RUNLOCK();
7270 	return (count);
7271 }
7272 
7273 #if defined(SCTP_LOCAL_TRACE_BUF)
7274 
7275 void
7276 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
7277 {
7278 	uint32_t saveindex, newindex;
7279 
7280 	do {
7281 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
7282 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7283 			newindex = 1;
7284 		} else {
7285 			newindex = saveindex + 1;
7286 		}
7287 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
7288 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7289 		saveindex = 0;
7290 	}
7291 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
7292 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
7293 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
7294 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
7295 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
7296 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
7297 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
7298 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
7299 }
7300 
7301 #endif
7302 static void
7303 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
7304     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
7305 {
7306 	struct ip *iph;
7307 #ifdef INET6
7308 	struct ip6_hdr *ip6;
7309 #endif
7310 	struct mbuf *sp, *last;
7311 	struct udphdr *uhdr;
7312 	uint16_t port;
7313 
7314 	if ((m->m_flags & M_PKTHDR) == 0) {
7315 		/* Can't handle one that is not a pkt hdr */
7316 		goto out;
7317 	}
7318 	/* Pull the src port */
7319 	iph = mtod(m, struct ip *);
7320 	uhdr = (struct udphdr *)((caddr_t)iph + off);
7321 	port = uhdr->uh_sport;
7322 	/*
7323 	 * Split out the mbuf chain. Leave the IP header in m, place the
7324 	 * rest in the sp.
7325 	 */
7326 	sp = m_split(m, off, M_NOWAIT);
7327 	if (sp == NULL) {
7328 		/* Gak, drop packet, we can't do a split */
7329 		goto out;
7330 	}
7331 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
7332 		/* Gak, packet can't have an SCTP header in it - too small */
7333 		m_freem(sp);
7334 		goto out;
7335 	}
7336 	/* Now pull up the UDP header and SCTP header together */
7337 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
7338 	if (sp == NULL) {
7339 		/* Gak pullup failed */
7340 		goto out;
7341 	}
7342 	/* Trim out the UDP header */
7343 	m_adj(sp, sizeof(struct udphdr));
7344 
7345 	/* Now reconstruct the mbuf chain */
7346 	for (last = m; last->m_next; last = last->m_next);
7347 	last->m_next = sp;
7348 	m->m_pkthdr.len += sp->m_pkthdr.len;
7349 	/*
7350 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
7351 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
7352 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
7353 	 * SCTP checksum. Therefore, clear the bit.
7354 	 */
7355 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
7356 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
7357 	    m->m_pkthdr.len,
7358 	    if_name(m->m_pkthdr.rcvif),
7359 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
7360 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
7361 	iph = mtod(m, struct ip *);
7362 	switch (iph->ip_v) {
7363 #ifdef INET
7364 	case IPVERSION:
7365 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
7366 		sctp_input_with_port(m, off, port);
7367 		break;
7368 #endif
7369 #ifdef INET6
7370 	case IPV6_VERSION >> 4:
7371 		ip6 = mtod(m, struct ip6_hdr *);
7372 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
7373 		sctp6_input_with_port(&m, &off, port);
7374 		break;
7375 #endif
7376 	default:
7377 		goto out;
7378 		break;
7379 	}
7380 	return;
7381 out:
7382 	m_freem(m);
7383 }
7384 
7385 #ifdef INET
7386 static void
7387 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
7388 {
7389 	struct ip *outer_ip, *inner_ip;
7390 	struct sctphdr *sh;
7391 	struct icmp *icmp;
7392 	struct udphdr *udp;
7393 	struct sctp_inpcb *inp;
7394 	struct sctp_tcb *stcb;
7395 	struct sctp_nets *net;
7396 	struct sctp_init_chunk *ch;
7397 	struct sockaddr_in src, dst;
7398 	uint8_t type, code;
7399 
7400 	inner_ip = (struct ip *)vip;
7401 	icmp = (struct icmp *)((caddr_t)inner_ip -
7402 	    (sizeof(struct icmp) - sizeof(struct ip)));
7403 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
7404 	if (ntohs(outer_ip->ip_len) <
7405 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
7406 		return;
7407 	}
7408 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
7409 	sh = (struct sctphdr *)(udp + 1);
7410 	memset(&src, 0, sizeof(struct sockaddr_in));
7411 	src.sin_family = AF_INET;
7412 	src.sin_len = sizeof(struct sockaddr_in);
7413 	src.sin_port = sh->src_port;
7414 	src.sin_addr = inner_ip->ip_src;
7415 	memset(&dst, 0, sizeof(struct sockaddr_in));
7416 	dst.sin_family = AF_INET;
7417 	dst.sin_len = sizeof(struct sockaddr_in);
7418 	dst.sin_port = sh->dest_port;
7419 	dst.sin_addr = inner_ip->ip_dst;
7420 	/*
7421 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
7422 	 * holds our local endpoint address. Thus we reverse the dst and the
7423 	 * src in the lookup.
7424 	 */
7425 	inp = NULL;
7426 	net = NULL;
7427 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7428 	    (struct sockaddr *)&src,
7429 	    &inp, &net, 1,
7430 	    SCTP_DEFAULT_VRFID);
7431 	if ((stcb != NULL) &&
7432 	    (net != NULL) &&
7433 	    (inp != NULL)) {
7434 		/* Check the UDP port numbers */
7435 		if ((udp->uh_dport != net->port) ||
7436 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7437 			SCTP_TCB_UNLOCK(stcb);
7438 			return;
7439 		}
7440 		/* Check the verification tag */
7441 		if (ntohl(sh->v_tag) != 0) {
7442 			/*
7443 			 * This must be the verification tag used for
7444 			 * sending out packets. We don't consider packets
7445 			 * reflecting the verification tag.
7446 			 */
7447 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
7448 				SCTP_TCB_UNLOCK(stcb);
7449 				return;
7450 			}
7451 		} else {
7452 			if (ntohs(outer_ip->ip_len) >=
7453 			    sizeof(struct ip) +
7454 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
7455 				/*
7456 				 * In this case we can check if we got an
7457 				 * INIT chunk and if the initiate tag
7458 				 * matches.
7459 				 */
7460 				ch = (struct sctp_init_chunk *)(sh + 1);
7461 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
7462 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
7463 					SCTP_TCB_UNLOCK(stcb);
7464 					return;
7465 				}
7466 			} else {
7467 				SCTP_TCB_UNLOCK(stcb);
7468 				return;
7469 			}
7470 		}
7471 		type = icmp->icmp_type;
7472 		code = icmp->icmp_code;
7473 		if ((type == ICMP_UNREACH) &&
7474 		    (code == ICMP_UNREACH_PORT)) {
7475 			code = ICMP_UNREACH_PROTOCOL;
7476 		}
7477 		sctp_notify(inp, stcb, net, type, code,
7478 		    ntohs(inner_ip->ip_len),
7479 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
7480 	} else {
7481 		if ((stcb == NULL) && (inp != NULL)) {
7482 			/* reduce ref-count */
7483 			SCTP_INP_WLOCK(inp);
7484 			SCTP_INP_DECR_REF(inp);
7485 			SCTP_INP_WUNLOCK(inp);
7486 		}
7487 		if (stcb) {
7488 			SCTP_TCB_UNLOCK(stcb);
7489 		}
7490 	}
7491 	return;
7492 }
7493 #endif
7494 
7495 #ifdef INET6
7496 static void
7497 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7498 {
7499 	struct ip6ctlparam *ip6cp;
7500 	struct sctp_inpcb *inp;
7501 	struct sctp_tcb *stcb;
7502 	struct sctp_nets *net;
7503 	struct sctphdr sh;
7504 	struct udphdr udp;
7505 	struct sockaddr_in6 src, dst;
7506 	uint8_t type, code;
7507 
7508 	ip6cp = (struct ip6ctlparam *)d;
7509 	/*
7510 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7511 	 */
7512 	if (ip6cp->ip6c_m == NULL) {
7513 		return;
7514 	}
7515 	/*
7516 	 * Check if we can safely examine the ports and the verification tag
7517 	 * of the SCTP common header.
7518 	 */
7519 	if (ip6cp->ip6c_m->m_pkthdr.len <
7520 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7521 		return;
7522 	}
7523 	/* Copy out the UDP header. */
7524 	memset(&udp, 0, sizeof(struct udphdr));
7525 	m_copydata(ip6cp->ip6c_m,
7526 	    ip6cp->ip6c_off,
7527 	    sizeof(struct udphdr),
7528 	    (caddr_t)&udp);
7529 	/* Copy out the port numbers and the verification tag. */
7530 	memset(&sh, 0, sizeof(struct sctphdr));
7531 	m_copydata(ip6cp->ip6c_m,
7532 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7533 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7534 	    (caddr_t)&sh);
7535 	memset(&src, 0, sizeof(struct sockaddr_in6));
7536 	src.sin6_family = AF_INET6;
7537 	src.sin6_len = sizeof(struct sockaddr_in6);
7538 	src.sin6_port = sh.src_port;
7539 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7540 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7541 		return;
7542 	}
7543 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7544 	dst.sin6_family = AF_INET6;
7545 	dst.sin6_len = sizeof(struct sockaddr_in6);
7546 	dst.sin6_port = sh.dest_port;
7547 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7548 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7549 		return;
7550 	}
7551 	inp = NULL;
7552 	net = NULL;
7553 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7554 	    (struct sockaddr *)&src,
7555 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7556 	if ((stcb != NULL) &&
7557 	    (net != NULL) &&
7558 	    (inp != NULL)) {
7559 		/* Check the UDP port numbers */
7560 		if ((udp.uh_dport != net->port) ||
7561 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7562 			SCTP_TCB_UNLOCK(stcb);
7563 			return;
7564 		}
7565 		/* Check the verification tag */
7566 		if (ntohl(sh.v_tag) != 0) {
7567 			/*
7568 			 * This must be the verification tag used for
7569 			 * sending out packets. We don't consider packets
7570 			 * reflecting the verification tag.
7571 			 */
7572 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7573 				SCTP_TCB_UNLOCK(stcb);
7574 				return;
7575 			}
7576 		} else {
7577 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7578 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7579 			    sizeof(struct sctphdr) +
7580 			    sizeof(struct sctp_chunkhdr) +
7581 			    offsetof(struct sctp_init, a_rwnd)) {
7582 				/*
7583 				 * In this case we can check if we got an
7584 				 * INIT chunk and if the initiate tag
7585 				 * matches.
7586 				 */
7587 				uint32_t initiate_tag;
7588 				uint8_t chunk_type;
7589 
7590 				m_copydata(ip6cp->ip6c_m,
7591 				    ip6cp->ip6c_off +
7592 				    sizeof(struct udphdr) +
7593 				    sizeof(struct sctphdr),
7594 				    sizeof(uint8_t),
7595 				    (caddr_t)&chunk_type);
7596 				m_copydata(ip6cp->ip6c_m,
7597 				    ip6cp->ip6c_off +
7598 				    sizeof(struct udphdr) +
7599 				    sizeof(struct sctphdr) +
7600 				    sizeof(struct sctp_chunkhdr),
7601 				    sizeof(uint32_t),
7602 				    (caddr_t)&initiate_tag);
7603 				if ((chunk_type != SCTP_INITIATION) ||
7604 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7605 					SCTP_TCB_UNLOCK(stcb);
7606 					return;
7607 				}
7608 			} else {
7609 				SCTP_TCB_UNLOCK(stcb);
7610 				return;
7611 			}
7612 		}
7613 		type = ip6cp->ip6c_icmp6->icmp6_type;
7614 		code = ip6cp->ip6c_icmp6->icmp6_code;
7615 		if ((type == ICMP6_DST_UNREACH) &&
7616 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7617 			type = ICMP6_PARAM_PROB;
7618 			code = ICMP6_PARAMPROB_NEXTHEADER;
7619 		}
7620 		sctp6_notify(inp, stcb, net, type, code,
7621 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7622 	} else {
7623 		if ((stcb == NULL) && (inp != NULL)) {
7624 			/* reduce inp's ref-count */
7625 			SCTP_INP_WLOCK(inp);
7626 			SCTP_INP_DECR_REF(inp);
7627 			SCTP_INP_WUNLOCK(inp);
7628 		}
7629 		if (stcb) {
7630 			SCTP_TCB_UNLOCK(stcb);
7631 		}
7632 	}
7633 }
7634 #endif
7635 
7636 void
7637 sctp_over_udp_stop(void)
7638 {
7639 	/*
7640 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7641 	 * for writting!
7642 	 */
7643 #ifdef INET
7644 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7645 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7646 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7647 	}
7648 #endif
7649 #ifdef INET6
7650 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7651 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7652 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7653 	}
7654 #endif
7655 }
7656 
7657 int
7658 sctp_over_udp_start(void)
7659 {
7660 	uint16_t port;
7661 	int ret;
7662 #ifdef INET
7663 	struct sockaddr_in sin;
7664 #endif
7665 #ifdef INET6
7666 	struct sockaddr_in6 sin6;
7667 #endif
7668 	/*
7669 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7670 	 * for writting!
7671 	 */
7672 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7673 	if (ntohs(port) == 0) {
7674 		/* Must have a port set */
7675 		return (EINVAL);
7676 	}
7677 #ifdef INET
7678 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7679 		/* Already running -- must stop first */
7680 		return (EALREADY);
7681 	}
7682 #endif
7683 #ifdef INET6
7684 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7685 		/* Already running -- must stop first */
7686 		return (EALREADY);
7687 	}
7688 #endif
7689 #ifdef INET
7690 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7691 	    SOCK_DGRAM, IPPROTO_UDP,
7692 	    curthread->td_ucred, curthread))) {
7693 		sctp_over_udp_stop();
7694 		return (ret);
7695 	}
7696 	/* Call the special UDP hook. */
7697 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7698 	    sctp_recv_udp_tunneled_packet,
7699 	    sctp_recv_icmp_tunneled_packet,
7700 	    NULL))) {
7701 		sctp_over_udp_stop();
7702 		return (ret);
7703 	}
7704 	/* Ok, we have a socket, bind it to the port. */
7705 	memset(&sin, 0, sizeof(struct sockaddr_in));
7706 	sin.sin_len = sizeof(struct sockaddr_in);
7707 	sin.sin_family = AF_INET;
7708 	sin.sin_port = htons(port);
7709 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7710 	    (struct sockaddr *)&sin, curthread))) {
7711 		sctp_over_udp_stop();
7712 		return (ret);
7713 	}
7714 #endif
7715 #ifdef INET6
7716 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7717 	    SOCK_DGRAM, IPPROTO_UDP,
7718 	    curthread->td_ucred, curthread))) {
7719 		sctp_over_udp_stop();
7720 		return (ret);
7721 	}
7722 	/* Call the special UDP hook. */
7723 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7724 	    sctp_recv_udp_tunneled_packet,
7725 	    sctp_recv_icmp6_tunneled_packet,
7726 	    NULL))) {
7727 		sctp_over_udp_stop();
7728 		return (ret);
7729 	}
7730 	/* Ok, we have a socket, bind it to the port. */
7731 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7732 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7733 	sin6.sin6_family = AF_INET6;
7734 	sin6.sin6_port = htons(port);
7735 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7736 	    (struct sockaddr *)&sin6, curthread))) {
7737 		sctp_over_udp_stop();
7738 		return (ret);
7739 	}
7740 #endif
7741 	return (0);
7742 }
7743 
7744 /*
7745  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7746  * If all arguments are zero, zero is returned.
7747  */
7748 uint32_t
7749 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7750 {
7751 	if (mtu1 > 0) {
7752 		if (mtu2 > 0) {
7753 			if (mtu3 > 0) {
7754 				return (min(mtu1, min(mtu2, mtu3)));
7755 			} else {
7756 				return (min(mtu1, mtu2));
7757 			}
7758 		} else {
7759 			if (mtu3 > 0) {
7760 				return (min(mtu1, mtu3));
7761 			} else {
7762 				return (mtu1);
7763 			}
7764 		}
7765 	} else {
7766 		if (mtu2 > 0) {
7767 			if (mtu3 > 0) {
7768 				return (min(mtu2, mtu3));
7769 			} else {
7770 				return (mtu2);
7771 			}
7772 		} else {
7773 			return (mtu3);
7774 		}
7775 	}
7776 }
7777 
7778 void
7779 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7780 {
7781 	struct in_conninfo inc;
7782 
7783 	memset(&inc, 0, sizeof(struct in_conninfo));
7784 	inc.inc_fibnum = fibnum;
7785 	switch (addr->sa.sa_family) {
7786 #ifdef INET
7787 	case AF_INET:
7788 		inc.inc_faddr = addr->sin.sin_addr;
7789 		break;
7790 #endif
7791 #ifdef INET6
7792 	case AF_INET6:
7793 		inc.inc_flags |= INC_ISIPV6;
7794 		inc.inc6_faddr = addr->sin6.sin6_addr;
7795 		break;
7796 #endif
7797 	default:
7798 		return;
7799 	}
7800 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7801 }
7802 
7803 uint32_t
7804 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7805 {
7806 	struct in_conninfo inc;
7807 
7808 	memset(&inc, 0, sizeof(struct in_conninfo));
7809 	inc.inc_fibnum = fibnum;
7810 	switch (addr->sa.sa_family) {
7811 #ifdef INET
7812 	case AF_INET:
7813 		inc.inc_faddr = addr->sin.sin_addr;
7814 		break;
7815 #endif
7816 #ifdef INET6
7817 	case AF_INET6:
7818 		inc.inc_flags |= INC_ISIPV6;
7819 		inc.inc6_faddr = addr->sin6.sin6_addr;
7820 		break;
7821 #endif
7822 	default:
7823 		return (0);
7824 	}
7825 	return ((uint32_t)tcp_hc_getmtu(&inc));
7826 }
7827 
7828 void
7829 sctp_set_state(struct sctp_tcb *stcb, int new_state)
7830 {
7831 #if defined(KDTRACE_HOOKS)
7832 	int old_state = stcb->asoc.state;
7833 #endif
7834 
7835 	KASSERT((new_state & ~SCTP_STATE_MASK) == 0,
7836 	    ("sctp_set_state: Can't set substate (new_state = %x)",
7837 	    new_state));
7838 	stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state;
7839 	if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) ||
7840 	    (new_state == SCTP_STATE_SHUTDOWN_SENT) ||
7841 	    (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7842 		SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
7843 	}
7844 #if defined(KDTRACE_HOOKS)
7845 	if (((old_state & SCTP_STATE_MASK) != new_state) &&
7846 	    !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) &&
7847 	    (new_state == SCTP_STATE_INUSE))) {
7848 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7849 	}
7850 #endif
7851 }
7852 
7853 void
7854 sctp_add_substate(struct sctp_tcb *stcb, int substate)
7855 {
7856 #if defined(KDTRACE_HOOKS)
7857 	int old_state = stcb->asoc.state;
7858 #endif
7859 
7860 	KASSERT((substate & SCTP_STATE_MASK) == 0,
7861 	    ("sctp_add_substate: Can't set state (substate = %x)",
7862 	    substate));
7863 	stcb->asoc.state |= substate;
7864 #if defined(KDTRACE_HOOKS)
7865 	if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) &&
7866 	    ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) ||
7867 	    ((substate & SCTP_STATE_SHUTDOWN_PENDING) &&
7868 	    ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) {
7869 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7870 	}
7871 #endif
7872 }
7873