xref: /freebsd/sys/netinet/sctputil.c (revision d38c30c092828f4882ce13b08d0bd3fd6dc7afb5)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #include <netinet6/sctp6_var.h>
45 #endif
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_output.h>
48 #include <netinet/sctp_uio.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_auth.h>
52 #include <netinet/sctp_asconf.h>
53 #include <netinet/sctp_bsd_addr.h>
54 #include <netinet/sctp_kdtrace.h>
55 #if defined(INET6) || defined(INET)
56 #include <netinet/tcp_var.h>
57 #endif
58 #include <netinet/udp.h>
59 #include <netinet/udp_var.h>
60 #include <sys/proc.h>
61 #ifdef INET6
62 #include <netinet/icmp6.h>
63 #endif
64 
65 
66 #ifndef KTR_SCTP
67 #define KTR_SCTP KTR_SUBSYS
68 #endif
69 
70 extern const struct sctp_cc_functions sctp_cc_functions[];
71 extern const struct sctp_ss_functions sctp_ss_functions[];
72 
73 void
74 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
75 {
76 #if defined(SCTP_LOCAL_TRACE_BUF)
77 	struct sctp_cwnd_log sctp_clog;
78 
79 	sctp_clog.x.sb.stcb = stcb;
80 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
81 	if (stcb)
82 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
83 	else
84 		sctp_clog.x.sb.stcb_sbcc = 0;
85 	sctp_clog.x.sb.incr = incr;
86 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
87 	    SCTP_LOG_EVENT_SB,
88 	    from,
89 	    sctp_clog.x.misc.log1,
90 	    sctp_clog.x.misc.log2,
91 	    sctp_clog.x.misc.log3,
92 	    sctp_clog.x.misc.log4);
93 #endif
94 }
95 
96 void
97 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
98 {
99 #if defined(SCTP_LOCAL_TRACE_BUF)
100 	struct sctp_cwnd_log sctp_clog;
101 
102 	sctp_clog.x.close.inp = (void *)inp;
103 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
104 	if (stcb) {
105 		sctp_clog.x.close.stcb = (void *)stcb;
106 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
107 	} else {
108 		sctp_clog.x.close.stcb = 0;
109 		sctp_clog.x.close.state = 0;
110 	}
111 	sctp_clog.x.close.loc = loc;
112 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
113 	    SCTP_LOG_EVENT_CLOSE,
114 	    0,
115 	    sctp_clog.x.misc.log1,
116 	    sctp_clog.x.misc.log2,
117 	    sctp_clog.x.misc.log3,
118 	    sctp_clog.x.misc.log4);
119 #endif
120 }
121 
122 void
123 rto_logging(struct sctp_nets *net, int from)
124 {
125 #if defined(SCTP_LOCAL_TRACE_BUF)
126 	struct sctp_cwnd_log sctp_clog;
127 
128 	memset(&sctp_clog, 0, sizeof(sctp_clog));
129 	sctp_clog.x.rto.net = (void *)net;
130 	sctp_clog.x.rto.rtt = net->rtt / 1000;
131 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
132 	    SCTP_LOG_EVENT_RTT,
133 	    from,
134 	    sctp_clog.x.misc.log1,
135 	    sctp_clog.x.misc.log2,
136 	    sctp_clog.x.misc.log3,
137 	    sctp_clog.x.misc.log4);
138 #endif
139 }
140 
141 void
142 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
143 {
144 #if defined(SCTP_LOCAL_TRACE_BUF)
145 	struct sctp_cwnd_log sctp_clog;
146 
147 	sctp_clog.x.strlog.stcb = stcb;
148 	sctp_clog.x.strlog.n_tsn = tsn;
149 	sctp_clog.x.strlog.n_sseq = sseq;
150 	sctp_clog.x.strlog.e_tsn = 0;
151 	sctp_clog.x.strlog.e_sseq = 0;
152 	sctp_clog.x.strlog.strm = stream;
153 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
154 	    SCTP_LOG_EVENT_STRM,
155 	    from,
156 	    sctp_clog.x.misc.log1,
157 	    sctp_clog.x.misc.log2,
158 	    sctp_clog.x.misc.log3,
159 	    sctp_clog.x.misc.log4);
160 #endif
161 }
162 
163 void
164 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
165 {
166 #if defined(SCTP_LOCAL_TRACE_BUF)
167 	struct sctp_cwnd_log sctp_clog;
168 
169 	sctp_clog.x.nagle.stcb = (void *)stcb;
170 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
171 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
172 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
173 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
174 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
175 	    SCTP_LOG_EVENT_NAGLE,
176 	    action,
177 	    sctp_clog.x.misc.log1,
178 	    sctp_clog.x.misc.log2,
179 	    sctp_clog.x.misc.log3,
180 	    sctp_clog.x.misc.log4);
181 #endif
182 }
183 
184 void
185 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
186 {
187 #if defined(SCTP_LOCAL_TRACE_BUF)
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	sctp_clog.x.sack.cumack = cumack;
191 	sctp_clog.x.sack.oldcumack = old_cumack;
192 	sctp_clog.x.sack.tsn = tsn;
193 	sctp_clog.x.sack.numGaps = gaps;
194 	sctp_clog.x.sack.numDups = dups;
195 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
196 	    SCTP_LOG_EVENT_SACK,
197 	    from,
198 	    sctp_clog.x.misc.log1,
199 	    sctp_clog.x.misc.log2,
200 	    sctp_clog.x.misc.log3,
201 	    sctp_clog.x.misc.log4);
202 #endif
203 }
204 
205 void
206 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
207 {
208 #if defined(SCTP_LOCAL_TRACE_BUF)
209 	struct sctp_cwnd_log sctp_clog;
210 
211 	memset(&sctp_clog, 0, sizeof(sctp_clog));
212 	sctp_clog.x.map.base = map;
213 	sctp_clog.x.map.cum = cum;
214 	sctp_clog.x.map.high = high;
215 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
216 	    SCTP_LOG_EVENT_MAP,
217 	    from,
218 	    sctp_clog.x.misc.log1,
219 	    sctp_clog.x.misc.log2,
220 	    sctp_clog.x.misc.log3,
221 	    sctp_clog.x.misc.log4);
222 #endif
223 }
224 
225 void
226 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
227 {
228 #if defined(SCTP_LOCAL_TRACE_BUF)
229 	struct sctp_cwnd_log sctp_clog;
230 
231 	memset(&sctp_clog, 0, sizeof(sctp_clog));
232 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
233 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
234 	sctp_clog.x.fr.tsn = tsn;
235 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
236 	    SCTP_LOG_EVENT_FR,
237 	    from,
238 	    sctp_clog.x.misc.log1,
239 	    sctp_clog.x.misc.log2,
240 	    sctp_clog.x.misc.log3,
241 	    sctp_clog.x.misc.log4);
242 #endif
243 }
244 
245 #ifdef SCTP_MBUF_LOGGING
246 void
247 sctp_log_mb(struct mbuf *m, int from)
248 {
249 #if defined(SCTP_LOCAL_TRACE_BUF)
250 	struct sctp_cwnd_log sctp_clog;
251 
252 	sctp_clog.x.mb.mp = m;
253 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
254 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
255 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
256 	if (SCTP_BUF_IS_EXTENDED(m)) {
257 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
258 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
259 	} else {
260 		sctp_clog.x.mb.ext = 0;
261 		sctp_clog.x.mb.refcnt = 0;
262 	}
263 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
264 	    SCTP_LOG_EVENT_MBUF,
265 	    from,
266 	    sctp_clog.x.misc.log1,
267 	    sctp_clog.x.misc.log2,
268 	    sctp_clog.x.misc.log3,
269 	    sctp_clog.x.misc.log4);
270 #endif
271 }
272 
273 void
274 sctp_log_mbc(struct mbuf *m, int from)
275 {
276 	struct mbuf *mat;
277 
278 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
279 		sctp_log_mb(mat, from);
280 	}
281 }
282 #endif
283 
284 void
285 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
286 {
287 #if defined(SCTP_LOCAL_TRACE_BUF)
288 	struct sctp_cwnd_log sctp_clog;
289 
290 	if (control == NULL) {
291 		SCTP_PRINTF("Gak log of NULL?\n");
292 		return;
293 	}
294 	sctp_clog.x.strlog.stcb = control->stcb;
295 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
296 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
297 	sctp_clog.x.strlog.strm = control->sinfo_stream;
298 	if (poschk != NULL) {
299 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
300 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
301 	} else {
302 		sctp_clog.x.strlog.e_tsn = 0;
303 		sctp_clog.x.strlog.e_sseq = 0;
304 	}
305 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
306 	    SCTP_LOG_EVENT_STRM,
307 	    from,
308 	    sctp_clog.x.misc.log1,
309 	    sctp_clog.x.misc.log2,
310 	    sctp_clog.x.misc.log3,
311 	    sctp_clog.x.misc.log4);
312 #endif
313 }
314 
315 void
316 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
317 {
318 #if defined(SCTP_LOCAL_TRACE_BUF)
319 	struct sctp_cwnd_log sctp_clog;
320 
321 	sctp_clog.x.cwnd.net = net;
322 	if (stcb->asoc.send_queue_cnt > 255)
323 		sctp_clog.x.cwnd.cnt_in_send = 255;
324 	else
325 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
326 	if (stcb->asoc.stream_queue_cnt > 255)
327 		sctp_clog.x.cwnd.cnt_in_str = 255;
328 	else
329 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
330 
331 	if (net) {
332 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
333 		sctp_clog.x.cwnd.inflight = net->flight_size;
334 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
335 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
336 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
337 	}
338 	if (SCTP_CWNDLOG_PRESEND == from) {
339 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
340 	}
341 	sctp_clog.x.cwnd.cwnd_augment = augment;
342 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
343 	    SCTP_LOG_EVENT_CWND,
344 	    from,
345 	    sctp_clog.x.misc.log1,
346 	    sctp_clog.x.misc.log2,
347 	    sctp_clog.x.misc.log3,
348 	    sctp_clog.x.misc.log4);
349 #endif
350 }
351 
352 void
353 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
354 {
355 #if defined(SCTP_LOCAL_TRACE_BUF)
356 	struct sctp_cwnd_log sctp_clog;
357 
358 	memset(&sctp_clog, 0, sizeof(sctp_clog));
359 	if (inp) {
360 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
361 
362 	} else {
363 		sctp_clog.x.lock.sock = (void *)NULL;
364 	}
365 	sctp_clog.x.lock.inp = (void *)inp;
366 	if (stcb) {
367 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
368 	} else {
369 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
370 	}
371 	if (inp) {
372 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
373 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
374 	} else {
375 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
376 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
377 	}
378 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
379 	if (inp && (inp->sctp_socket)) {
380 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
381 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
382 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
383 	} else {
384 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
385 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
386 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
387 	}
388 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
389 	    SCTP_LOG_LOCK_EVENT,
390 	    from,
391 	    sctp_clog.x.misc.log1,
392 	    sctp_clog.x.misc.log2,
393 	    sctp_clog.x.misc.log3,
394 	    sctp_clog.x.misc.log4);
395 #endif
396 }
397 
398 void
399 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
400 {
401 #if defined(SCTP_LOCAL_TRACE_BUF)
402 	struct sctp_cwnd_log sctp_clog;
403 
404 	memset(&sctp_clog, 0, sizeof(sctp_clog));
405 	sctp_clog.x.cwnd.net = net;
406 	sctp_clog.x.cwnd.cwnd_new_value = error;
407 	sctp_clog.x.cwnd.inflight = net->flight_size;
408 	sctp_clog.x.cwnd.cwnd_augment = burst;
409 	if (stcb->asoc.send_queue_cnt > 255)
410 		sctp_clog.x.cwnd.cnt_in_send = 255;
411 	else
412 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
413 	if (stcb->asoc.stream_queue_cnt > 255)
414 		sctp_clog.x.cwnd.cnt_in_str = 255;
415 	else
416 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
417 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
418 	    SCTP_LOG_EVENT_MAXBURST,
419 	    from,
420 	    sctp_clog.x.misc.log1,
421 	    sctp_clog.x.misc.log2,
422 	    sctp_clog.x.misc.log3,
423 	    sctp_clog.x.misc.log4);
424 #endif
425 }
426 
427 void
428 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
429 {
430 #if defined(SCTP_LOCAL_TRACE_BUF)
431 	struct sctp_cwnd_log sctp_clog;
432 
433 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
434 	sctp_clog.x.rwnd.send_size = snd_size;
435 	sctp_clog.x.rwnd.overhead = overhead;
436 	sctp_clog.x.rwnd.new_rwnd = 0;
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_EVENT_RWND,
439 	    from,
440 	    sctp_clog.x.misc.log1,
441 	    sctp_clog.x.misc.log2,
442 	    sctp_clog.x.misc.log3,
443 	    sctp_clog.x.misc.log4);
444 #endif
445 }
446 
447 void
448 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
449 {
450 #if defined(SCTP_LOCAL_TRACE_BUF)
451 	struct sctp_cwnd_log sctp_clog;
452 
453 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
454 	sctp_clog.x.rwnd.send_size = flight_size;
455 	sctp_clog.x.rwnd.overhead = overhead;
456 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
457 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
458 	    SCTP_LOG_EVENT_RWND,
459 	    from,
460 	    sctp_clog.x.misc.log1,
461 	    sctp_clog.x.misc.log2,
462 	    sctp_clog.x.misc.log3,
463 	    sctp_clog.x.misc.log4);
464 #endif
465 }
466 
467 #ifdef SCTP_MBCNT_LOGGING
468 static void
469 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
470 {
471 #if defined(SCTP_LOCAL_TRACE_BUF)
472 	struct sctp_cwnd_log sctp_clog;
473 
474 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
475 	sctp_clog.x.mbcnt.size_change = book;
476 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
477 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
478 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
479 	    SCTP_LOG_EVENT_MBCNT,
480 	    from,
481 	    sctp_clog.x.misc.log1,
482 	    sctp_clog.x.misc.log2,
483 	    sctp_clog.x.misc.log3,
484 	    sctp_clog.x.misc.log4);
485 #endif
486 }
487 #endif
488 
489 void
490 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
491 {
492 #if defined(SCTP_LOCAL_TRACE_BUF)
493 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
494 	    SCTP_LOG_MISC_EVENT,
495 	    from,
496 	    a, b, c, d);
497 #endif
498 }
499 
500 void
501 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
502 {
503 #if defined(SCTP_LOCAL_TRACE_BUF)
504 	struct sctp_cwnd_log sctp_clog;
505 
506 	sctp_clog.x.wake.stcb = (void *)stcb;
507 	sctp_clog.x.wake.wake_cnt = wake_cnt;
508 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
509 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
510 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
511 
512 	if (stcb->asoc.stream_queue_cnt < 0xff)
513 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
514 	else
515 		sctp_clog.x.wake.stream_qcnt = 0xff;
516 
517 	if (stcb->asoc.chunks_on_out_queue < 0xff)
518 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
519 	else
520 		sctp_clog.x.wake.chunks_on_oque = 0xff;
521 
522 	sctp_clog.x.wake.sctpflags = 0;
523 	/* set in the defered mode stuff */
524 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
525 		sctp_clog.x.wake.sctpflags |= 1;
526 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
527 		sctp_clog.x.wake.sctpflags |= 2;
528 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
529 		sctp_clog.x.wake.sctpflags |= 4;
530 	/* what about the sb */
531 	if (stcb->sctp_socket) {
532 		struct socket *so = stcb->sctp_socket;
533 
534 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
535 	} else {
536 		sctp_clog.x.wake.sbflags = 0xff;
537 	}
538 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
539 	    SCTP_LOG_EVENT_WAKE,
540 	    from,
541 	    sctp_clog.x.misc.log1,
542 	    sctp_clog.x.misc.log2,
543 	    sctp_clog.x.misc.log3,
544 	    sctp_clog.x.misc.log4);
545 #endif
546 }
547 
548 void
549 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen)
550 {
551 #if defined(SCTP_LOCAL_TRACE_BUF)
552 	struct sctp_cwnd_log sctp_clog;
553 
554 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
555 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
556 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
557 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
558 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
559 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
560 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
561 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
562 	    SCTP_LOG_EVENT_BLOCK,
563 	    from,
564 	    sctp_clog.x.misc.log1,
565 	    sctp_clog.x.misc.log2,
566 	    sctp_clog.x.misc.log3,
567 	    sctp_clog.x.misc.log4);
568 #endif
569 }
570 
571 int
572 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
573 {
574 	/* May need to fix this if ktrdump does not work */
575 	return (0);
576 }
577 
578 #ifdef SCTP_AUDITING_ENABLED
579 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
580 static int sctp_audit_indx = 0;
581 
582 static
583 void
584 sctp_print_audit_report(void)
585 {
586 	int i;
587 	int cnt;
588 
589 	cnt = 0;
590 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
591 		if ((sctp_audit_data[i][0] == 0xe0) &&
592 		    (sctp_audit_data[i][1] == 0x01)) {
593 			cnt = 0;
594 			SCTP_PRINTF("\n");
595 		} else if (sctp_audit_data[i][0] == 0xf0) {
596 			cnt = 0;
597 			SCTP_PRINTF("\n");
598 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
599 		    (sctp_audit_data[i][1] == 0x01)) {
600 			SCTP_PRINTF("\n");
601 			cnt = 0;
602 		}
603 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
604 		    (uint32_t)sctp_audit_data[i][1]);
605 		cnt++;
606 		if ((cnt % 14) == 0)
607 			SCTP_PRINTF("\n");
608 	}
609 	for (i = 0; i < sctp_audit_indx; i++) {
610 		if ((sctp_audit_data[i][0] == 0xe0) &&
611 		    (sctp_audit_data[i][1] == 0x01)) {
612 			cnt = 0;
613 			SCTP_PRINTF("\n");
614 		} else if (sctp_audit_data[i][0] == 0xf0) {
615 			cnt = 0;
616 			SCTP_PRINTF("\n");
617 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
618 		    (sctp_audit_data[i][1] == 0x01)) {
619 			SCTP_PRINTF("\n");
620 			cnt = 0;
621 		}
622 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
623 		    (uint32_t)sctp_audit_data[i][1]);
624 		cnt++;
625 		if ((cnt % 14) == 0)
626 			SCTP_PRINTF("\n");
627 	}
628 	SCTP_PRINTF("\n");
629 }
630 
631 void
632 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
633     struct sctp_nets *net)
634 {
635 	int resend_cnt, tot_out, rep, tot_book_cnt;
636 	struct sctp_nets *lnet;
637 	struct sctp_tmit_chunk *chk;
638 
639 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
640 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
641 	sctp_audit_indx++;
642 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
643 		sctp_audit_indx = 0;
644 	}
645 	if (inp == NULL) {
646 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
647 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
648 		sctp_audit_indx++;
649 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
650 			sctp_audit_indx = 0;
651 		}
652 		return;
653 	}
654 	if (stcb == NULL) {
655 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
656 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
657 		sctp_audit_indx++;
658 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
659 			sctp_audit_indx = 0;
660 		}
661 		return;
662 	}
663 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
664 	sctp_audit_data[sctp_audit_indx][1] =
665 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
666 	sctp_audit_indx++;
667 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
668 		sctp_audit_indx = 0;
669 	}
670 	rep = 0;
671 	tot_book_cnt = 0;
672 	resend_cnt = tot_out = 0;
673 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
674 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
675 			resend_cnt++;
676 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
677 			tot_out += chk->book_size;
678 			tot_book_cnt++;
679 		}
680 	}
681 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
682 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
683 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
684 		sctp_audit_indx++;
685 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
686 			sctp_audit_indx = 0;
687 		}
688 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
689 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
690 		rep = 1;
691 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
692 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
693 		sctp_audit_data[sctp_audit_indx][1] =
694 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
695 		sctp_audit_indx++;
696 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
697 			sctp_audit_indx = 0;
698 		}
699 	}
700 	if (tot_out != stcb->asoc.total_flight) {
701 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
702 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
703 		sctp_audit_indx++;
704 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
705 			sctp_audit_indx = 0;
706 		}
707 		rep = 1;
708 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
709 		    (int)stcb->asoc.total_flight);
710 		stcb->asoc.total_flight = tot_out;
711 	}
712 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
713 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
714 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
715 		sctp_audit_indx++;
716 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
717 			sctp_audit_indx = 0;
718 		}
719 		rep = 1;
720 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
721 
722 		stcb->asoc.total_flight_count = tot_book_cnt;
723 	}
724 	tot_out = 0;
725 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
726 		tot_out += lnet->flight_size;
727 	}
728 	if (tot_out != stcb->asoc.total_flight) {
729 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
730 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
731 		sctp_audit_indx++;
732 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
733 			sctp_audit_indx = 0;
734 		}
735 		rep = 1;
736 		SCTP_PRINTF("real flight:%d net total was %d\n",
737 		    stcb->asoc.total_flight, tot_out);
738 		/* now corrective action */
739 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
740 
741 			tot_out = 0;
742 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
743 				if ((chk->whoTo == lnet) &&
744 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
745 					tot_out += chk->book_size;
746 				}
747 			}
748 			if (lnet->flight_size != tot_out) {
749 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
750 				    (void *)lnet, lnet->flight_size,
751 				    tot_out);
752 				lnet->flight_size = tot_out;
753 			}
754 		}
755 	}
756 	if (rep) {
757 		sctp_print_audit_report();
758 	}
759 }
760 
761 void
762 sctp_audit_log(uint8_t ev, uint8_t fd)
763 {
764 
765 	sctp_audit_data[sctp_audit_indx][0] = ev;
766 	sctp_audit_data[sctp_audit_indx][1] = fd;
767 	sctp_audit_indx++;
768 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
769 		sctp_audit_indx = 0;
770 	}
771 }
772 
773 #endif
774 
775 /*
776  * sctp_stop_timers_for_shutdown() should be called
777  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
778  * state to make sure that all timers are stopped.
779  */
780 void
781 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
782 {
783 	struct sctp_inpcb *inp;
784 	struct sctp_nets *net;
785 
786 	inp = stcb->sctp_ep;
787 
788 	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
789 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_12);
790 	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
791 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_13);
792 	sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
793 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_14);
794 	sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
795 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_15);
796 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
797 		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
798 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_16);
799 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
800 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_17);
801 	}
802 }
803 
804 void
805 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer)
806 {
807 	struct sctp_inpcb *inp;
808 	struct sctp_nets *net;
809 
810 	inp = stcb->sctp_ep;
811 	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
812 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_18);
813 	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
814 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_19);
815 	if (stop_assoc_kill_timer) {
816 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
817 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_20);
818 	}
819 	sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
820 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_21);
821 	sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
822 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_22);
823 	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL,
824 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_23);
825 	/* Mobility adaptation */
826 	sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL,
827 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_24);
828 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
829 		sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
830 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_25);
831 		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net,
832 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_26);
833 		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net,
834 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_27);
835 		sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net,
836 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_28);
837 		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net,
838 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_29);
839 		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
840 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_30);
841 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
842 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_31);
843 	}
844 }
845 
846 /*
847  * A list of sizes based on typical mtu's, used only if next hop size not
848  * returned. These values MUST be multiples of 4 and MUST be ordered.
849  */
850 static uint32_t sctp_mtu_sizes[] = {
851 	68,
852 	296,
853 	508,
854 	512,
855 	544,
856 	576,
857 	1004,
858 	1492,
859 	1500,
860 	1536,
861 	2000,
862 	2048,
863 	4352,
864 	4464,
865 	8166,
866 	17912,
867 	32000,
868 	65532
869 };
870 
871 /*
872  * Return the largest MTU in sctp_mtu_sizes smaller than val.
873  * If val is smaller than the minimum, just return the largest
874  * multiple of 4 smaller or equal to val.
875  * Ensure that the result is a multiple of 4.
876  */
877 uint32_t
878 sctp_get_prev_mtu(uint32_t val)
879 {
880 	uint32_t i;
881 
882 	val &= 0xfffffffc;
883 	if (val <= sctp_mtu_sizes[0]) {
884 		return (val);
885 	}
886 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
887 		if (val <= sctp_mtu_sizes[i]) {
888 			break;
889 		}
890 	}
891 	KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0,
892 	    ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1));
893 	return (sctp_mtu_sizes[i - 1]);
894 }
895 
896 /*
897  * Return the smallest MTU in sctp_mtu_sizes larger than val.
898  * If val is larger than the maximum, just return the largest multiple of 4 smaller
899  * or equal to val.
900  * Ensure that the result is a multiple of 4.
901  */
902 uint32_t
903 sctp_get_next_mtu(uint32_t val)
904 {
905 	/* select another MTU that is just bigger than this one */
906 	uint32_t i;
907 
908 	val &= 0xfffffffc;
909 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
910 		if (val < sctp_mtu_sizes[i]) {
911 			KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0,
912 			    ("sctp_mtu_sizes[%u] not a multiple of 4", i));
913 			return (sctp_mtu_sizes[i]);
914 		}
915 	}
916 	return (val);
917 }
918 
919 void
920 sctp_fill_random_store(struct sctp_pcb *m)
921 {
922 	/*
923 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
924 	 * our counter. The result becomes our good random numbers and we
925 	 * then setup to give these out. Note that we do no locking to
926 	 * protect this. This is ok, since if competing folks call this we
927 	 * will get more gobbled gook in the random store which is what we
928 	 * want. There is a danger that two guys will use the same random
929 	 * numbers, but thats ok too since that is random as well :->
930 	 */
931 	m->store_at = 0;
932 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
933 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
934 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
935 	m->random_counter++;
936 }
937 
938 uint32_t
939 sctp_select_initial_TSN(struct sctp_pcb *inp)
940 {
941 	/*
942 	 * A true implementation should use random selection process to get
943 	 * the initial stream sequence number, using RFC1750 as a good
944 	 * guideline
945 	 */
946 	uint32_t x, *xp;
947 	uint8_t *p;
948 	int store_at, new_store;
949 
950 	if (inp->initial_sequence_debug != 0) {
951 		uint32_t ret;
952 
953 		ret = inp->initial_sequence_debug;
954 		inp->initial_sequence_debug++;
955 		return (ret);
956 	}
957 retry:
958 	store_at = inp->store_at;
959 	new_store = store_at + sizeof(uint32_t);
960 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
961 		new_store = 0;
962 	}
963 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
964 		goto retry;
965 	}
966 	if (new_store == 0) {
967 		/* Refill the random store */
968 		sctp_fill_random_store(inp);
969 	}
970 	p = &inp->random_store[store_at];
971 	xp = (uint32_t *)p;
972 	x = *xp;
973 	return (x);
974 }
975 
976 uint32_t
977 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
978 {
979 	uint32_t x;
980 	struct timeval now;
981 
982 	if (check) {
983 		(void)SCTP_GETTIME_TIMEVAL(&now);
984 	}
985 	for (;;) {
986 		x = sctp_select_initial_TSN(&inp->sctp_ep);
987 		if (x == 0) {
988 			/* we never use 0 */
989 			continue;
990 		}
991 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
992 			break;
993 		}
994 	}
995 	return (x);
996 }
997 
998 int32_t
999 sctp_map_assoc_state(int kernel_state)
1000 {
1001 	int32_t user_state;
1002 
1003 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
1004 		user_state = SCTP_CLOSED;
1005 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
1006 		user_state = SCTP_SHUTDOWN_PENDING;
1007 	} else {
1008 		switch (kernel_state & SCTP_STATE_MASK) {
1009 		case SCTP_STATE_EMPTY:
1010 			user_state = SCTP_CLOSED;
1011 			break;
1012 		case SCTP_STATE_INUSE:
1013 			user_state = SCTP_CLOSED;
1014 			break;
1015 		case SCTP_STATE_COOKIE_WAIT:
1016 			user_state = SCTP_COOKIE_WAIT;
1017 			break;
1018 		case SCTP_STATE_COOKIE_ECHOED:
1019 			user_state = SCTP_COOKIE_ECHOED;
1020 			break;
1021 		case SCTP_STATE_OPEN:
1022 			user_state = SCTP_ESTABLISHED;
1023 			break;
1024 		case SCTP_STATE_SHUTDOWN_SENT:
1025 			user_state = SCTP_SHUTDOWN_SENT;
1026 			break;
1027 		case SCTP_STATE_SHUTDOWN_RECEIVED:
1028 			user_state = SCTP_SHUTDOWN_RECEIVED;
1029 			break;
1030 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
1031 			user_state = SCTP_SHUTDOWN_ACK_SENT;
1032 			break;
1033 		default:
1034 			user_state = SCTP_CLOSED;
1035 			break;
1036 		}
1037 	}
1038 	return (user_state);
1039 }
1040 
1041 int
1042 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1043     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
1044 {
1045 	struct sctp_association *asoc;
1046 
1047 	/*
1048 	 * Anything set to zero is taken care of by the allocation routine's
1049 	 * bzero
1050 	 */
1051 
1052 	/*
1053 	 * Up front select what scoping to apply on addresses I tell my peer
1054 	 * Not sure what to do with these right now, we will need to come up
1055 	 * with a way to set them. We may need to pass them through from the
1056 	 * caller in the sctp_aloc_assoc() function.
1057 	 */
1058 	int i;
1059 #if defined(SCTP_DETAILED_STR_STATS)
1060 	int j;
1061 #endif
1062 
1063 	asoc = &stcb->asoc;
1064 	/* init all variables to a known value. */
1065 	SCTP_SET_STATE(stcb, SCTP_STATE_INUSE);
1066 	asoc->max_burst = inp->sctp_ep.max_burst;
1067 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
1068 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
1069 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
1070 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
1071 	asoc->ecn_supported = inp->ecn_supported;
1072 	asoc->prsctp_supported = inp->prsctp_supported;
1073 	asoc->idata_supported = inp->idata_supported;
1074 	asoc->auth_supported = inp->auth_supported;
1075 	asoc->asconf_supported = inp->asconf_supported;
1076 	asoc->reconfig_supported = inp->reconfig_supported;
1077 	asoc->nrsack_supported = inp->nrsack_supported;
1078 	asoc->pktdrop_supported = inp->pktdrop_supported;
1079 	asoc->idata_supported = inp->idata_supported;
1080 	asoc->sctp_cmt_pf = (uint8_t)0;
1081 	asoc->sctp_frag_point = inp->sctp_frag_point;
1082 	asoc->sctp_features = inp->sctp_features;
1083 	asoc->default_dscp = inp->sctp_ep.default_dscp;
1084 	asoc->max_cwnd = inp->max_cwnd;
1085 #ifdef INET6
1086 	if (inp->sctp_ep.default_flowlabel) {
1087 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
1088 	} else {
1089 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
1090 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
1091 			asoc->default_flowlabel &= 0x000fffff;
1092 			asoc->default_flowlabel |= 0x80000000;
1093 		} else {
1094 			asoc->default_flowlabel = 0;
1095 		}
1096 	}
1097 #endif
1098 	asoc->sb_send_resv = 0;
1099 	if (override_tag) {
1100 		asoc->my_vtag = override_tag;
1101 	} else {
1102 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1103 	}
1104 	/* Get the nonce tags */
1105 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1106 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1107 	asoc->vrf_id = vrf_id;
1108 
1109 #ifdef SCTP_ASOCLOG_OF_TSNS
1110 	asoc->tsn_in_at = 0;
1111 	asoc->tsn_out_at = 0;
1112 	asoc->tsn_in_wrapped = 0;
1113 	asoc->tsn_out_wrapped = 0;
1114 	asoc->cumack_log_at = 0;
1115 	asoc->cumack_log_atsnt = 0;
1116 #endif
1117 #ifdef SCTP_FS_SPEC_LOG
1118 	asoc->fs_index = 0;
1119 #endif
1120 	asoc->refcnt = 0;
1121 	asoc->assoc_up_sent = 0;
1122 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1123 	    sctp_select_initial_TSN(&inp->sctp_ep);
1124 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1125 	/* we are optimisitic here */
1126 	asoc->peer_supports_nat = 0;
1127 	asoc->sent_queue_retran_cnt = 0;
1128 
1129 	/* for CMT */
1130 	asoc->last_net_cmt_send_started = NULL;
1131 
1132 	/* This will need to be adjusted */
1133 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1134 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1135 	asoc->asconf_seq_in = asoc->last_acked_seq;
1136 
1137 	/* here we are different, we hold the next one we expect */
1138 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1139 
1140 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1141 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1142 
1143 	asoc->default_mtu = inp->sctp_ep.default_mtu;
1144 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1145 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1146 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1147 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1148 	asoc->free_chunk_cnt = 0;
1149 
1150 	asoc->iam_blocking = 0;
1151 	asoc->context = inp->sctp_context;
1152 	asoc->local_strreset_support = inp->local_strreset_support;
1153 	asoc->def_send = inp->def_send;
1154 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1155 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1156 	asoc->pr_sctp_cnt = 0;
1157 	asoc->total_output_queue_size = 0;
1158 
1159 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1160 		asoc->scope.ipv6_addr_legal = 1;
1161 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1162 			asoc->scope.ipv4_addr_legal = 1;
1163 		} else {
1164 			asoc->scope.ipv4_addr_legal = 0;
1165 		}
1166 	} else {
1167 		asoc->scope.ipv6_addr_legal = 0;
1168 		asoc->scope.ipv4_addr_legal = 1;
1169 	}
1170 
1171 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1172 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1173 
1174 	asoc->smallest_mtu = inp->sctp_frag_point;
1175 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1176 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1177 
1178 	asoc->stream_locked_on = 0;
1179 	asoc->ecn_echo_cnt_onq = 0;
1180 	asoc->stream_locked = 0;
1181 
1182 	asoc->send_sack = 1;
1183 
1184 	LIST_INIT(&asoc->sctp_restricted_addrs);
1185 
1186 	TAILQ_INIT(&asoc->nets);
1187 	TAILQ_INIT(&asoc->pending_reply_queue);
1188 	TAILQ_INIT(&asoc->asconf_ack_sent);
1189 	/* Setup to fill the hb random cache at first HB */
1190 	asoc->hb_random_idx = 4;
1191 
1192 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1193 
1194 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1195 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1196 
1197 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1198 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1199 
1200 	/*
1201 	 * Now the stream parameters, here we allocate space for all streams
1202 	 * that we request by default.
1203 	 */
1204 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1205 	    o_strms;
1206 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1207 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1208 	    SCTP_M_STRMO);
1209 	if (asoc->strmout == NULL) {
1210 		/* big trouble no memory */
1211 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1212 		return (ENOMEM);
1213 	}
1214 	for (i = 0; i < asoc->streamoutcnt; i++) {
1215 		/*
1216 		 * inbound side must be set to 0xffff, also NOTE when we get
1217 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1218 		 * count (streamoutcnt) but first check if we sent to any of
1219 		 * the upper streams that were dropped (if some were). Those
1220 		 * that were dropped must be notified to the upper layer as
1221 		 * failed to send.
1222 		 */
1223 		asoc->strmout[i].next_mid_ordered = 0;
1224 		asoc->strmout[i].next_mid_unordered = 0;
1225 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1226 		asoc->strmout[i].chunks_on_queues = 0;
1227 #if defined(SCTP_DETAILED_STR_STATS)
1228 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1229 			asoc->strmout[i].abandoned_sent[j] = 0;
1230 			asoc->strmout[i].abandoned_unsent[j] = 0;
1231 		}
1232 #else
1233 		asoc->strmout[i].abandoned_sent[0] = 0;
1234 		asoc->strmout[i].abandoned_unsent[0] = 0;
1235 #endif
1236 		asoc->strmout[i].sid = i;
1237 		asoc->strmout[i].last_msg_incomplete = 0;
1238 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1239 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1240 	}
1241 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1242 
1243 	/* Now the mapping array */
1244 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1245 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1246 	    SCTP_M_MAP);
1247 	if (asoc->mapping_array == NULL) {
1248 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1249 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1250 		return (ENOMEM);
1251 	}
1252 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1253 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1254 	    SCTP_M_MAP);
1255 	if (asoc->nr_mapping_array == NULL) {
1256 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1257 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1258 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1259 		return (ENOMEM);
1260 	}
1261 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1262 
1263 	/* Now the init of the other outqueues */
1264 	TAILQ_INIT(&asoc->free_chunks);
1265 	TAILQ_INIT(&asoc->control_send_queue);
1266 	TAILQ_INIT(&asoc->asconf_send_queue);
1267 	TAILQ_INIT(&asoc->send_queue);
1268 	TAILQ_INIT(&asoc->sent_queue);
1269 	TAILQ_INIT(&asoc->resetHead);
1270 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1271 	TAILQ_INIT(&asoc->asconf_queue);
1272 	/* authentication fields */
1273 	asoc->authinfo.random = NULL;
1274 	asoc->authinfo.active_keyid = 0;
1275 	asoc->authinfo.assoc_key = NULL;
1276 	asoc->authinfo.assoc_keyid = 0;
1277 	asoc->authinfo.recv_key = NULL;
1278 	asoc->authinfo.recv_keyid = 0;
1279 	LIST_INIT(&asoc->shared_keys);
1280 	asoc->marked_retrans = 0;
1281 	asoc->port = inp->sctp_ep.port;
1282 	asoc->timoinit = 0;
1283 	asoc->timodata = 0;
1284 	asoc->timosack = 0;
1285 	asoc->timoshutdown = 0;
1286 	asoc->timoheartbeat = 0;
1287 	asoc->timocookie = 0;
1288 	asoc->timoshutdownack = 0;
1289 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1290 	asoc->discontinuity_time = asoc->start_time;
1291 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1292 		asoc->abandoned_unsent[i] = 0;
1293 		asoc->abandoned_sent[i] = 0;
1294 	}
1295 	/*
1296 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1297 	 * freed later when the association is freed.
1298 	 */
1299 	return (0);
1300 }
1301 
1302 void
1303 sctp_print_mapping_array(struct sctp_association *asoc)
1304 {
1305 	unsigned int i, limit;
1306 
1307 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1308 	    asoc->mapping_array_size,
1309 	    asoc->mapping_array_base_tsn,
1310 	    asoc->cumulative_tsn,
1311 	    asoc->highest_tsn_inside_map,
1312 	    asoc->highest_tsn_inside_nr_map);
1313 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1314 		if (asoc->mapping_array[limit - 1] != 0) {
1315 			break;
1316 		}
1317 	}
1318 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1319 	for (i = 0; i < limit; i++) {
1320 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1321 	}
1322 	if (limit % 16)
1323 		SCTP_PRINTF("\n");
1324 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1325 		if (asoc->nr_mapping_array[limit - 1]) {
1326 			break;
1327 		}
1328 	}
1329 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1330 	for (i = 0; i < limit; i++) {
1331 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1332 	}
1333 	if (limit % 16)
1334 		SCTP_PRINTF("\n");
1335 }
1336 
1337 int
1338 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1339 {
1340 	/* mapping array needs to grow */
1341 	uint8_t *new_array1, *new_array2;
1342 	uint32_t new_size;
1343 
1344 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1345 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1346 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1347 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1348 		/* can't get more, forget it */
1349 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1350 		if (new_array1) {
1351 			SCTP_FREE(new_array1, SCTP_M_MAP);
1352 		}
1353 		if (new_array2) {
1354 			SCTP_FREE(new_array2, SCTP_M_MAP);
1355 		}
1356 		return (-1);
1357 	}
1358 	memset(new_array1, 0, new_size);
1359 	memset(new_array2, 0, new_size);
1360 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1361 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1362 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1363 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1364 	asoc->mapping_array = new_array1;
1365 	asoc->nr_mapping_array = new_array2;
1366 	asoc->mapping_array_size = new_size;
1367 	return (0);
1368 }
1369 
1370 
1371 static void
1372 sctp_iterator_work(struct sctp_iterator *it)
1373 {
1374 	int iteration_count = 0;
1375 	int inp_skip = 0;
1376 	int first_in = 1;
1377 	struct sctp_inpcb *tinp;
1378 
1379 	SCTP_INP_INFO_RLOCK();
1380 	SCTP_ITERATOR_LOCK();
1381 	sctp_it_ctl.cur_it = it;
1382 	if (it->inp) {
1383 		SCTP_INP_RLOCK(it->inp);
1384 		SCTP_INP_DECR_REF(it->inp);
1385 	}
1386 	if (it->inp == NULL) {
1387 		/* iterator is complete */
1388 done_with_iterator:
1389 		sctp_it_ctl.cur_it = NULL;
1390 		SCTP_ITERATOR_UNLOCK();
1391 		SCTP_INP_INFO_RUNLOCK();
1392 		if (it->function_atend != NULL) {
1393 			(*it->function_atend) (it->pointer, it->val);
1394 		}
1395 		SCTP_FREE(it, SCTP_M_ITER);
1396 		return;
1397 	}
1398 select_a_new_ep:
1399 	if (first_in) {
1400 		first_in = 0;
1401 	} else {
1402 		SCTP_INP_RLOCK(it->inp);
1403 	}
1404 	while (((it->pcb_flags) &&
1405 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1406 	    ((it->pcb_features) &&
1407 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1408 		/* endpoint flags or features don't match, so keep looking */
1409 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1410 			SCTP_INP_RUNLOCK(it->inp);
1411 			goto done_with_iterator;
1412 		}
1413 		tinp = it->inp;
1414 		it->inp = LIST_NEXT(it->inp, sctp_list);
1415 		SCTP_INP_RUNLOCK(tinp);
1416 		if (it->inp == NULL) {
1417 			goto done_with_iterator;
1418 		}
1419 		SCTP_INP_RLOCK(it->inp);
1420 	}
1421 	/* now go through each assoc which is in the desired state */
1422 	if (it->done_current_ep == 0) {
1423 		if (it->function_inp != NULL)
1424 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1425 		it->done_current_ep = 1;
1426 	}
1427 	if (it->stcb == NULL) {
1428 		/* run the per instance function */
1429 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1430 	}
1431 	if ((inp_skip) || it->stcb == NULL) {
1432 		if (it->function_inp_end != NULL) {
1433 			inp_skip = (*it->function_inp_end) (it->inp,
1434 			    it->pointer,
1435 			    it->val);
1436 		}
1437 		SCTP_INP_RUNLOCK(it->inp);
1438 		goto no_stcb;
1439 	}
1440 	while (it->stcb) {
1441 		SCTP_TCB_LOCK(it->stcb);
1442 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1443 			/* not in the right state... keep looking */
1444 			SCTP_TCB_UNLOCK(it->stcb);
1445 			goto next_assoc;
1446 		}
1447 		/* see if we have limited out the iterator loop */
1448 		iteration_count++;
1449 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1450 			/* Pause to let others grab the lock */
1451 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1452 			SCTP_TCB_UNLOCK(it->stcb);
1453 			SCTP_INP_INCR_REF(it->inp);
1454 			SCTP_INP_RUNLOCK(it->inp);
1455 			SCTP_ITERATOR_UNLOCK();
1456 			SCTP_INP_INFO_RUNLOCK();
1457 			SCTP_INP_INFO_RLOCK();
1458 			SCTP_ITERATOR_LOCK();
1459 			if (sctp_it_ctl.iterator_flags) {
1460 				/* We won't be staying here */
1461 				SCTP_INP_DECR_REF(it->inp);
1462 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1463 				if (sctp_it_ctl.iterator_flags &
1464 				    SCTP_ITERATOR_STOP_CUR_IT) {
1465 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1466 					goto done_with_iterator;
1467 				}
1468 				if (sctp_it_ctl.iterator_flags &
1469 				    SCTP_ITERATOR_STOP_CUR_INP) {
1470 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1471 					goto no_stcb;
1472 				}
1473 				/* If we reach here huh? */
1474 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1475 				    sctp_it_ctl.iterator_flags);
1476 				sctp_it_ctl.iterator_flags = 0;
1477 			}
1478 			SCTP_INP_RLOCK(it->inp);
1479 			SCTP_INP_DECR_REF(it->inp);
1480 			SCTP_TCB_LOCK(it->stcb);
1481 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1482 			iteration_count = 0;
1483 		}
1484 
1485 		/* run function on this one */
1486 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1487 
1488 		/*
1489 		 * we lie here, it really needs to have its own type but
1490 		 * first I must verify that this won't effect things :-0
1491 		 */
1492 		if (it->no_chunk_output == 0)
1493 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1494 
1495 		SCTP_TCB_UNLOCK(it->stcb);
1496 next_assoc:
1497 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1498 		if (it->stcb == NULL) {
1499 			/* Run last function */
1500 			if (it->function_inp_end != NULL) {
1501 				inp_skip = (*it->function_inp_end) (it->inp,
1502 				    it->pointer,
1503 				    it->val);
1504 			}
1505 		}
1506 	}
1507 	SCTP_INP_RUNLOCK(it->inp);
1508 no_stcb:
1509 	/* done with all assocs on this endpoint, move on to next endpoint */
1510 	it->done_current_ep = 0;
1511 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1512 		it->inp = NULL;
1513 	} else {
1514 		it->inp = LIST_NEXT(it->inp, sctp_list);
1515 	}
1516 	if (it->inp == NULL) {
1517 		goto done_with_iterator;
1518 	}
1519 	goto select_a_new_ep;
1520 }
1521 
1522 void
1523 sctp_iterator_worker(void)
1524 {
1525 	struct sctp_iterator *it;
1526 
1527 	/* This function is called with the WQ lock in place */
1528 	sctp_it_ctl.iterator_running = 1;
1529 	while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) {
1530 		/* now lets work on this one */
1531 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1532 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1533 		CURVNET_SET(it->vn);
1534 		sctp_iterator_work(it);
1535 		CURVNET_RESTORE();
1536 		SCTP_IPI_ITERATOR_WQ_LOCK();
1537 		/* sa_ignore FREED_MEMORY */
1538 	}
1539 	sctp_it_ctl.iterator_running = 0;
1540 	return;
1541 }
1542 
1543 
1544 static void
1545 sctp_handle_addr_wq(void)
1546 {
1547 	/* deal with the ADDR wq from the rtsock calls */
1548 	struct sctp_laddr *wi, *nwi;
1549 	struct sctp_asconf_iterator *asc;
1550 
1551 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1552 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1553 	if (asc == NULL) {
1554 		/* Try later, no memory */
1555 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1556 		    (struct sctp_inpcb *)NULL,
1557 		    (struct sctp_tcb *)NULL,
1558 		    (struct sctp_nets *)NULL);
1559 		return;
1560 	}
1561 	LIST_INIT(&asc->list_of_work);
1562 	asc->cnt = 0;
1563 
1564 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1565 		LIST_REMOVE(wi, sctp_nxt_addr);
1566 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1567 		asc->cnt++;
1568 	}
1569 
1570 	if (asc->cnt == 0) {
1571 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1572 	} else {
1573 		int ret;
1574 
1575 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1576 		    sctp_asconf_iterator_stcb,
1577 		    NULL,	/* No ep end for boundall */
1578 		    SCTP_PCB_FLAGS_BOUNDALL,
1579 		    SCTP_PCB_ANY_FEATURES,
1580 		    SCTP_ASOC_ANY_STATE,
1581 		    (void *)asc, 0,
1582 		    sctp_asconf_iterator_end, NULL, 0);
1583 		if (ret) {
1584 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1585 			/*
1586 			 * Freeing if we are stopping or put back on the
1587 			 * addr_wq.
1588 			 */
1589 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1590 				sctp_asconf_iterator_end(asc, 0);
1591 			} else {
1592 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1593 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1594 				}
1595 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1596 			}
1597 		}
1598 	}
1599 }
1600 
1601 void
1602 sctp_timeout_handler(void *t)
1603 {
1604 	struct sctp_inpcb *inp;
1605 	struct sctp_tcb *stcb;
1606 	struct sctp_nets *net;
1607 	struct sctp_timer *tmr;
1608 	struct mbuf *op_err;
1609 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1610 	struct socket *so;
1611 #endif
1612 	int did_output;
1613 	int type;
1614 
1615 	tmr = (struct sctp_timer *)t;
1616 	inp = (struct sctp_inpcb *)tmr->ep;
1617 	stcb = (struct sctp_tcb *)tmr->tcb;
1618 	net = (struct sctp_nets *)tmr->net;
1619 	CURVNET_SET((struct vnet *)tmr->vnet);
1620 	did_output = 1;
1621 
1622 #ifdef SCTP_AUDITING_ENABLED
1623 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1624 	sctp_auditing(3, inp, stcb, net);
1625 #endif
1626 
1627 	/* sanity checks... */
1628 	if (tmr->self != (void *)tmr) {
1629 		/*
1630 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1631 		 * (void *)tmr);
1632 		 */
1633 		CURVNET_RESTORE();
1634 		return;
1635 	}
1636 	tmr->stopped_from = 0xa001;
1637 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1638 		/*
1639 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1640 		 * tmr->type);
1641 		 */
1642 		CURVNET_RESTORE();
1643 		return;
1644 	}
1645 	tmr->stopped_from = 0xa002;
1646 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1647 		CURVNET_RESTORE();
1648 		return;
1649 	}
1650 	/* if this is an iterator timeout, get the struct and clear inp */
1651 	tmr->stopped_from = 0xa003;
1652 	if (inp) {
1653 		SCTP_INP_INCR_REF(inp);
1654 		if ((inp->sctp_socket == NULL) &&
1655 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1656 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1657 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1658 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1659 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1660 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1661 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1662 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1663 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))) {
1664 			SCTP_INP_DECR_REF(inp);
1665 			CURVNET_RESTORE();
1666 			return;
1667 		}
1668 	}
1669 	tmr->stopped_from = 0xa004;
1670 	if (stcb) {
1671 		atomic_add_int(&stcb->asoc.refcnt, 1);
1672 		if (stcb->asoc.state == 0) {
1673 			atomic_add_int(&stcb->asoc.refcnt, -1);
1674 			if (inp) {
1675 				SCTP_INP_DECR_REF(inp);
1676 			}
1677 			CURVNET_RESTORE();
1678 			return;
1679 		}
1680 	}
1681 	type = tmr->type;
1682 	tmr->stopped_from = 0xa005;
1683 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1684 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1685 		if (inp) {
1686 			SCTP_INP_DECR_REF(inp);
1687 		}
1688 		if (stcb) {
1689 			atomic_add_int(&stcb->asoc.refcnt, -1);
1690 		}
1691 		CURVNET_RESTORE();
1692 		return;
1693 	}
1694 	tmr->stopped_from = 0xa006;
1695 
1696 	if (stcb) {
1697 		SCTP_TCB_LOCK(stcb);
1698 		atomic_add_int(&stcb->asoc.refcnt, -1);
1699 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1700 		    ((stcb->asoc.state == 0) ||
1701 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1702 			SCTP_TCB_UNLOCK(stcb);
1703 			if (inp) {
1704 				SCTP_INP_DECR_REF(inp);
1705 			}
1706 			CURVNET_RESTORE();
1707 			return;
1708 		}
1709 	} else if (inp != NULL) {
1710 		if (type != SCTP_TIMER_TYPE_INPKILL) {
1711 			SCTP_INP_WLOCK(inp);
1712 		}
1713 	} else {
1714 		SCTP_WQ_ADDR_LOCK();
1715 	}
1716 	/* record in stopped what t-o occurred */
1717 	tmr->stopped_from = type;
1718 
1719 	/* mark as being serviced now */
1720 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1721 		/*
1722 		 * Callout has been rescheduled.
1723 		 */
1724 		goto get_out;
1725 	}
1726 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1727 		/*
1728 		 * Not active, so no action.
1729 		 */
1730 		goto get_out;
1731 	}
1732 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1733 
1734 	/* call the handler for the appropriate timer type */
1735 	switch (type) {
1736 	case SCTP_TIMER_TYPE_ADDR_WQ:
1737 		sctp_handle_addr_wq();
1738 		break;
1739 	case SCTP_TIMER_TYPE_SEND:
1740 		if ((stcb == NULL) || (inp == NULL)) {
1741 			break;
1742 		}
1743 		SCTP_STAT_INCR(sctps_timodata);
1744 		stcb->asoc.timodata++;
1745 		stcb->asoc.num_send_timers_up--;
1746 		if (stcb->asoc.num_send_timers_up < 0) {
1747 			stcb->asoc.num_send_timers_up = 0;
1748 		}
1749 		SCTP_TCB_LOCK_ASSERT(stcb);
1750 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1751 			/* no need to unlock on tcb its gone */
1752 
1753 			goto out_decr;
1754 		}
1755 		SCTP_TCB_LOCK_ASSERT(stcb);
1756 #ifdef SCTP_AUDITING_ENABLED
1757 		sctp_auditing(4, inp, stcb, net);
1758 #endif
1759 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1760 		if ((stcb->asoc.num_send_timers_up == 0) &&
1761 		    (stcb->asoc.sent_queue_cnt > 0)) {
1762 			struct sctp_tmit_chunk *chk;
1763 
1764 			/*
1765 			 * safeguard. If there on some on the sent queue
1766 			 * somewhere but no timers running something is
1767 			 * wrong... so we start a timer on the first chunk
1768 			 * on the send queue on whatever net it is sent to.
1769 			 */
1770 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1771 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1772 			    chk->whoTo);
1773 		}
1774 		break;
1775 	case SCTP_TIMER_TYPE_INIT:
1776 		if ((stcb == NULL) || (inp == NULL)) {
1777 			break;
1778 		}
1779 		SCTP_STAT_INCR(sctps_timoinit);
1780 		stcb->asoc.timoinit++;
1781 		if (sctp_t1init_timer(inp, stcb, net)) {
1782 			/* no need to unlock on tcb its gone */
1783 			goto out_decr;
1784 		}
1785 		/* We do output but not here */
1786 		did_output = 0;
1787 		break;
1788 	case SCTP_TIMER_TYPE_RECV:
1789 		if ((stcb == NULL) || (inp == NULL)) {
1790 			break;
1791 		}
1792 		SCTP_STAT_INCR(sctps_timosack);
1793 		stcb->asoc.timosack++;
1794 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1795 #ifdef SCTP_AUDITING_ENABLED
1796 		sctp_auditing(4, inp, stcb, net);
1797 #endif
1798 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1799 		break;
1800 	case SCTP_TIMER_TYPE_SHUTDOWN:
1801 		if ((stcb == NULL) || (inp == NULL)) {
1802 			break;
1803 		}
1804 		if (sctp_shutdown_timer(inp, stcb, net)) {
1805 			/* no need to unlock on tcb its gone */
1806 			goto out_decr;
1807 		}
1808 		SCTP_STAT_INCR(sctps_timoshutdown);
1809 		stcb->asoc.timoshutdown++;
1810 #ifdef SCTP_AUDITING_ENABLED
1811 		sctp_auditing(4, inp, stcb, net);
1812 #endif
1813 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1814 		break;
1815 	case SCTP_TIMER_TYPE_HEARTBEAT:
1816 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1817 			break;
1818 		}
1819 		SCTP_STAT_INCR(sctps_timoheartbeat);
1820 		stcb->asoc.timoheartbeat++;
1821 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1822 			/* no need to unlock on tcb its gone */
1823 			goto out_decr;
1824 		}
1825 #ifdef SCTP_AUDITING_ENABLED
1826 		sctp_auditing(4, inp, stcb, net);
1827 #endif
1828 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1829 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1830 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1831 		}
1832 		break;
1833 	case SCTP_TIMER_TYPE_COOKIE:
1834 		if ((stcb == NULL) || (inp == NULL)) {
1835 			break;
1836 		}
1837 
1838 		if (sctp_cookie_timer(inp, stcb, net)) {
1839 			/* no need to unlock on tcb its gone */
1840 			goto out_decr;
1841 		}
1842 		SCTP_STAT_INCR(sctps_timocookie);
1843 		stcb->asoc.timocookie++;
1844 #ifdef SCTP_AUDITING_ENABLED
1845 		sctp_auditing(4, inp, stcb, net);
1846 #endif
1847 		/*
1848 		 * We consider T3 and Cookie timer pretty much the same with
1849 		 * respect to where from in chunk_output.
1850 		 */
1851 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1852 		break;
1853 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1854 		{
1855 			struct timeval tv;
1856 			int i, secret;
1857 
1858 			if (inp == NULL) {
1859 				break;
1860 			}
1861 			SCTP_STAT_INCR(sctps_timosecret);
1862 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1863 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1864 			inp->sctp_ep.last_secret_number =
1865 			    inp->sctp_ep.current_secret_number;
1866 			inp->sctp_ep.current_secret_number++;
1867 			if (inp->sctp_ep.current_secret_number >=
1868 			    SCTP_HOW_MANY_SECRETS) {
1869 				inp->sctp_ep.current_secret_number = 0;
1870 			}
1871 			secret = (int)inp->sctp_ep.current_secret_number;
1872 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1873 				inp->sctp_ep.secret_key[secret][i] =
1874 				    sctp_select_initial_TSN(&inp->sctp_ep);
1875 			}
1876 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1877 		}
1878 		did_output = 0;
1879 		break;
1880 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1881 		if ((stcb == NULL) || (inp == NULL)) {
1882 			break;
1883 		}
1884 		SCTP_STAT_INCR(sctps_timopathmtu);
1885 		sctp_pathmtu_timer(inp, stcb, net);
1886 		did_output = 0;
1887 		break;
1888 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1889 		if ((stcb == NULL) || (inp == NULL)) {
1890 			break;
1891 		}
1892 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1893 			/* no need to unlock on tcb its gone */
1894 			goto out_decr;
1895 		}
1896 		SCTP_STAT_INCR(sctps_timoshutdownack);
1897 		stcb->asoc.timoshutdownack++;
1898 #ifdef SCTP_AUDITING_ENABLED
1899 		sctp_auditing(4, inp, stcb, net);
1900 #endif
1901 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1902 		break;
1903 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1904 		if ((stcb == NULL) || (inp == NULL)) {
1905 			break;
1906 		}
1907 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1908 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1909 		    "Shutdown guard timer expired");
1910 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1911 		/* no need to unlock on tcb its gone */
1912 		goto out_decr;
1913 
1914 	case SCTP_TIMER_TYPE_STRRESET:
1915 		if ((stcb == NULL) || (inp == NULL)) {
1916 			break;
1917 		}
1918 		if (sctp_strreset_timer(inp, stcb, net)) {
1919 			/* no need to unlock on tcb its gone */
1920 			goto out_decr;
1921 		}
1922 		SCTP_STAT_INCR(sctps_timostrmrst);
1923 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1924 		break;
1925 	case SCTP_TIMER_TYPE_ASCONF:
1926 		if ((stcb == NULL) || (inp == NULL)) {
1927 			break;
1928 		}
1929 		if (sctp_asconf_timer(inp, stcb, net)) {
1930 			/* no need to unlock on tcb its gone */
1931 			goto out_decr;
1932 		}
1933 		SCTP_STAT_INCR(sctps_timoasconf);
1934 #ifdef SCTP_AUDITING_ENABLED
1935 		sctp_auditing(4, inp, stcb, net);
1936 #endif
1937 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1938 		break;
1939 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1940 		if ((stcb == NULL) || (inp == NULL)) {
1941 			break;
1942 		}
1943 		sctp_delete_prim_timer(inp, stcb, net);
1944 		SCTP_STAT_INCR(sctps_timodelprim);
1945 		break;
1946 
1947 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1948 		if ((stcb == NULL) || (inp == NULL)) {
1949 			break;
1950 		}
1951 		SCTP_STAT_INCR(sctps_timoautoclose);
1952 		sctp_autoclose_timer(inp, stcb, net);
1953 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1954 		did_output = 0;
1955 		break;
1956 	case SCTP_TIMER_TYPE_ASOCKILL:
1957 		if ((stcb == NULL) || (inp == NULL)) {
1958 			break;
1959 		}
1960 		SCTP_STAT_INCR(sctps_timoassockill);
1961 		/* Can we free it yet? */
1962 		SCTP_INP_DECR_REF(inp);
1963 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1964 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1965 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1966 		so = SCTP_INP_SO(inp);
1967 		atomic_add_int(&stcb->asoc.refcnt, 1);
1968 		SCTP_TCB_UNLOCK(stcb);
1969 		SCTP_SOCKET_LOCK(so, 1);
1970 		SCTP_TCB_LOCK(stcb);
1971 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1972 #endif
1973 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1974 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1975 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1976 		SCTP_SOCKET_UNLOCK(so, 1);
1977 #endif
1978 		/*
1979 		 * free asoc, always unlocks (or destroy's) so prevent
1980 		 * duplicate unlock or unlock of a free mtx :-0
1981 		 */
1982 		stcb = NULL;
1983 		goto out_no_decr;
1984 	case SCTP_TIMER_TYPE_INPKILL:
1985 		SCTP_STAT_INCR(sctps_timoinpkill);
1986 		if (inp == NULL) {
1987 			break;
1988 		}
1989 		/*
1990 		 * special case, take away our increment since WE are the
1991 		 * killer
1992 		 */
1993 		SCTP_INP_DECR_REF(inp);
1994 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1995 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1996 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1997 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1998 		inp = NULL;
1999 		goto out_no_decr;
2000 	default:
2001 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
2002 		    type);
2003 		break;
2004 	}
2005 #ifdef SCTP_AUDITING_ENABLED
2006 	sctp_audit_log(0xF1, (uint8_t)type);
2007 	if (inp)
2008 		sctp_auditing(5, inp, stcb, net);
2009 #endif
2010 	if ((did_output) && stcb) {
2011 		/*
2012 		 * Now we need to clean up the control chunk chain if an
2013 		 * ECNE is on it. It must be marked as UNSENT again so next
2014 		 * call will continue to send it until such time that we get
2015 		 * a CWR, to remove it. It is, however, less likely that we
2016 		 * will find a ecn echo on the chain though.
2017 		 */
2018 		sctp_fix_ecn_echo(&stcb->asoc);
2019 	}
2020 get_out:
2021 	if (stcb) {
2022 		SCTP_TCB_UNLOCK(stcb);
2023 	} else if (inp != NULL) {
2024 		SCTP_INP_WUNLOCK(inp);
2025 	} else {
2026 		SCTP_WQ_ADDR_UNLOCK();
2027 	}
2028 
2029 out_decr:
2030 	if (inp) {
2031 		SCTP_INP_DECR_REF(inp);
2032 	}
2033 
2034 out_no_decr:
2035 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
2036 	CURVNET_RESTORE();
2037 }
2038 
2039 void
2040 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2041     struct sctp_nets *net)
2042 {
2043 	uint32_t to_ticks;
2044 	struct sctp_timer *tmr;
2045 
2046 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
2047 		return;
2048 
2049 	tmr = NULL;
2050 	if (stcb) {
2051 		SCTP_TCB_LOCK_ASSERT(stcb);
2052 	}
2053 	switch (t_type) {
2054 	case SCTP_TIMER_TYPE_ADDR_WQ:
2055 		/* Only 1 tick away :-) */
2056 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2057 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
2058 		break;
2059 	case SCTP_TIMER_TYPE_SEND:
2060 		/* Here we use the RTO timer */
2061 		{
2062 			int rto_val;
2063 
2064 			if ((stcb == NULL) || (net == NULL)) {
2065 				return;
2066 			}
2067 			tmr = &net->rxt_timer;
2068 			if (net->RTO == 0) {
2069 				rto_val = stcb->asoc.initial_rto;
2070 			} else {
2071 				rto_val = net->RTO;
2072 			}
2073 			to_ticks = MSEC_TO_TICKS(rto_val);
2074 		}
2075 		break;
2076 	case SCTP_TIMER_TYPE_INIT:
2077 		/*
2078 		 * Here we use the INIT timer default usually about 1
2079 		 * minute.
2080 		 */
2081 		if ((stcb == NULL) || (net == NULL)) {
2082 			return;
2083 		}
2084 		tmr = &net->rxt_timer;
2085 		if (net->RTO == 0) {
2086 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2087 		} else {
2088 			to_ticks = MSEC_TO_TICKS(net->RTO);
2089 		}
2090 		break;
2091 	case SCTP_TIMER_TYPE_RECV:
2092 		/*
2093 		 * Here we use the Delayed-Ack timer value from the inp
2094 		 * ususually about 200ms.
2095 		 */
2096 		if (stcb == NULL) {
2097 			return;
2098 		}
2099 		tmr = &stcb->asoc.dack_timer;
2100 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2101 		break;
2102 	case SCTP_TIMER_TYPE_SHUTDOWN:
2103 		/* Here we use the RTO of the destination. */
2104 		if ((stcb == NULL) || (net == NULL)) {
2105 			return;
2106 		}
2107 		if (net->RTO == 0) {
2108 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2109 		} else {
2110 			to_ticks = MSEC_TO_TICKS(net->RTO);
2111 		}
2112 		tmr = &net->rxt_timer;
2113 		break;
2114 	case SCTP_TIMER_TYPE_HEARTBEAT:
2115 		/*
2116 		 * the net is used here so that we can add in the RTO. Even
2117 		 * though we use a different timer. We also add the HB timer
2118 		 * PLUS a random jitter.
2119 		 */
2120 		if ((stcb == NULL) || (net == NULL)) {
2121 			return;
2122 		} else {
2123 			uint32_t rndval;
2124 			uint32_t jitter;
2125 
2126 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2127 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2128 				return;
2129 			}
2130 			if (net->RTO == 0) {
2131 				to_ticks = stcb->asoc.initial_rto;
2132 			} else {
2133 				to_ticks = net->RTO;
2134 			}
2135 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2136 			jitter = rndval % to_ticks;
2137 			if (jitter >= (to_ticks >> 1)) {
2138 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2139 			} else {
2140 				to_ticks = to_ticks - jitter;
2141 			}
2142 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2143 			    !(net->dest_state & SCTP_ADDR_PF)) {
2144 				to_ticks += net->heart_beat_delay;
2145 			}
2146 			/*
2147 			 * Now we must convert the to_ticks that are now in
2148 			 * ms to ticks.
2149 			 */
2150 			to_ticks = MSEC_TO_TICKS(to_ticks);
2151 			tmr = &net->hb_timer;
2152 		}
2153 		break;
2154 	case SCTP_TIMER_TYPE_COOKIE:
2155 		/*
2156 		 * Here we can use the RTO timer from the network since one
2157 		 * RTT was compelete. If a retran happened then we will be
2158 		 * using the RTO initial value.
2159 		 */
2160 		if ((stcb == NULL) || (net == NULL)) {
2161 			return;
2162 		}
2163 		if (net->RTO == 0) {
2164 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2165 		} else {
2166 			to_ticks = MSEC_TO_TICKS(net->RTO);
2167 		}
2168 		tmr = &net->rxt_timer;
2169 		break;
2170 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2171 		/*
2172 		 * nothing needed but the endpoint here ususually about 60
2173 		 * minutes.
2174 		 */
2175 		tmr = &inp->sctp_ep.signature_change;
2176 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2177 		break;
2178 	case SCTP_TIMER_TYPE_ASOCKILL:
2179 		if (stcb == NULL) {
2180 			return;
2181 		}
2182 		tmr = &stcb->asoc.strreset_timer;
2183 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2184 		break;
2185 	case SCTP_TIMER_TYPE_INPKILL:
2186 		/*
2187 		 * The inp is setup to die. We re-use the signature_chage
2188 		 * timer since that has stopped and we are in the GONE
2189 		 * state.
2190 		 */
2191 		tmr = &inp->sctp_ep.signature_change;
2192 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2193 		break;
2194 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2195 		/*
2196 		 * Here we use the value found in the EP for PMTU ususually
2197 		 * about 10 minutes.
2198 		 */
2199 		if ((stcb == NULL) || (net == NULL)) {
2200 			return;
2201 		}
2202 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2203 			return;
2204 		}
2205 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2206 		tmr = &net->pmtu_timer;
2207 		break;
2208 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2209 		/* Here we use the RTO of the destination */
2210 		if ((stcb == NULL) || (net == NULL)) {
2211 			return;
2212 		}
2213 		if (net->RTO == 0) {
2214 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2215 		} else {
2216 			to_ticks = MSEC_TO_TICKS(net->RTO);
2217 		}
2218 		tmr = &net->rxt_timer;
2219 		break;
2220 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2221 		/*
2222 		 * Here we use the endpoints shutdown guard timer usually
2223 		 * about 3 minutes.
2224 		 */
2225 		if (stcb == NULL) {
2226 			return;
2227 		}
2228 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2229 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2230 		} else {
2231 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2232 		}
2233 		tmr = &stcb->asoc.shut_guard_timer;
2234 		break;
2235 	case SCTP_TIMER_TYPE_STRRESET:
2236 		/*
2237 		 * Here the timer comes from the stcb but its value is from
2238 		 * the net's RTO.
2239 		 */
2240 		if ((stcb == NULL) || (net == NULL)) {
2241 			return;
2242 		}
2243 		if (net->RTO == 0) {
2244 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2245 		} else {
2246 			to_ticks = MSEC_TO_TICKS(net->RTO);
2247 		}
2248 		tmr = &stcb->asoc.strreset_timer;
2249 		break;
2250 	case SCTP_TIMER_TYPE_ASCONF:
2251 		/*
2252 		 * Here the timer comes from the stcb but its value is from
2253 		 * the net's RTO.
2254 		 */
2255 		if ((stcb == NULL) || (net == NULL)) {
2256 			return;
2257 		}
2258 		if (net->RTO == 0) {
2259 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2260 		} else {
2261 			to_ticks = MSEC_TO_TICKS(net->RTO);
2262 		}
2263 		tmr = &stcb->asoc.asconf_timer;
2264 		break;
2265 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2266 		if ((stcb == NULL) || (net != NULL)) {
2267 			return;
2268 		}
2269 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2270 		tmr = &stcb->asoc.delete_prim_timer;
2271 		break;
2272 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2273 		if (stcb == NULL) {
2274 			return;
2275 		}
2276 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2277 			/*
2278 			 * Really an error since stcb is NOT set to
2279 			 * autoclose
2280 			 */
2281 			return;
2282 		}
2283 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2284 		tmr = &stcb->asoc.autoclose_timer;
2285 		break;
2286 	default:
2287 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2288 		    __func__, t_type);
2289 		return;
2290 		break;
2291 	}
2292 	if ((to_ticks <= 0) || (tmr == NULL)) {
2293 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2294 		    __func__, t_type, to_ticks, (void *)tmr);
2295 		return;
2296 	}
2297 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2298 		/*
2299 		 * we do NOT allow you to have it already running. if it is
2300 		 * we leave the current one up unchanged
2301 		 */
2302 		return;
2303 	}
2304 	/* At this point we can proceed */
2305 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2306 		stcb->asoc.num_send_timers_up++;
2307 	}
2308 	tmr->stopped_from = 0;
2309 	tmr->type = t_type;
2310 	tmr->ep = (void *)inp;
2311 	tmr->tcb = (void *)stcb;
2312 	tmr->net = (void *)net;
2313 	tmr->self = (void *)tmr;
2314 	tmr->vnet = (void *)curvnet;
2315 	tmr->ticks = sctp_get_tick_count();
2316 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2317 	return;
2318 }
2319 
2320 void
2321 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2322     struct sctp_nets *net, uint32_t from)
2323 {
2324 	struct sctp_timer *tmr;
2325 
2326 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2327 	    (inp == NULL))
2328 		return;
2329 
2330 	tmr = NULL;
2331 	if (stcb) {
2332 		SCTP_TCB_LOCK_ASSERT(stcb);
2333 	}
2334 	switch (t_type) {
2335 	case SCTP_TIMER_TYPE_ADDR_WQ:
2336 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2337 		break;
2338 	case SCTP_TIMER_TYPE_SEND:
2339 		if ((stcb == NULL) || (net == NULL)) {
2340 			return;
2341 		}
2342 		tmr = &net->rxt_timer;
2343 		break;
2344 	case SCTP_TIMER_TYPE_INIT:
2345 		if ((stcb == NULL) || (net == NULL)) {
2346 			return;
2347 		}
2348 		tmr = &net->rxt_timer;
2349 		break;
2350 	case SCTP_TIMER_TYPE_RECV:
2351 		if (stcb == NULL) {
2352 			return;
2353 		}
2354 		tmr = &stcb->asoc.dack_timer;
2355 		break;
2356 	case SCTP_TIMER_TYPE_SHUTDOWN:
2357 		if ((stcb == NULL) || (net == NULL)) {
2358 			return;
2359 		}
2360 		tmr = &net->rxt_timer;
2361 		break;
2362 	case SCTP_TIMER_TYPE_HEARTBEAT:
2363 		if ((stcb == NULL) || (net == NULL)) {
2364 			return;
2365 		}
2366 		tmr = &net->hb_timer;
2367 		break;
2368 	case SCTP_TIMER_TYPE_COOKIE:
2369 		if ((stcb == NULL) || (net == NULL)) {
2370 			return;
2371 		}
2372 		tmr = &net->rxt_timer;
2373 		break;
2374 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2375 		/* nothing needed but the endpoint here */
2376 		tmr = &inp->sctp_ep.signature_change;
2377 		/*
2378 		 * We re-use the newcookie timer for the INP kill timer. We
2379 		 * must assure that we do not kill it by accident.
2380 		 */
2381 		break;
2382 	case SCTP_TIMER_TYPE_ASOCKILL:
2383 		/*
2384 		 * Stop the asoc kill timer.
2385 		 */
2386 		if (stcb == NULL) {
2387 			return;
2388 		}
2389 		tmr = &stcb->asoc.strreset_timer;
2390 		break;
2391 
2392 	case SCTP_TIMER_TYPE_INPKILL:
2393 		/*
2394 		 * The inp is setup to die. We re-use the signature_chage
2395 		 * timer since that has stopped and we are in the GONE
2396 		 * state.
2397 		 */
2398 		tmr = &inp->sctp_ep.signature_change;
2399 		break;
2400 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2401 		if ((stcb == NULL) || (net == NULL)) {
2402 			return;
2403 		}
2404 		tmr = &net->pmtu_timer;
2405 		break;
2406 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2407 		if ((stcb == NULL) || (net == NULL)) {
2408 			return;
2409 		}
2410 		tmr = &net->rxt_timer;
2411 		break;
2412 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2413 		if (stcb == NULL) {
2414 			return;
2415 		}
2416 		tmr = &stcb->asoc.shut_guard_timer;
2417 		break;
2418 	case SCTP_TIMER_TYPE_STRRESET:
2419 		if (stcb == NULL) {
2420 			return;
2421 		}
2422 		tmr = &stcb->asoc.strreset_timer;
2423 		break;
2424 	case SCTP_TIMER_TYPE_ASCONF:
2425 		if (stcb == NULL) {
2426 			return;
2427 		}
2428 		tmr = &stcb->asoc.asconf_timer;
2429 		break;
2430 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2431 		if (stcb == NULL) {
2432 			return;
2433 		}
2434 		tmr = &stcb->asoc.delete_prim_timer;
2435 		break;
2436 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2437 		if (stcb == NULL) {
2438 			return;
2439 		}
2440 		tmr = &stcb->asoc.autoclose_timer;
2441 		break;
2442 	default:
2443 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2444 		    __func__, t_type);
2445 		break;
2446 	}
2447 	if (tmr == NULL) {
2448 		return;
2449 	}
2450 	if ((tmr->type != t_type) && tmr->type) {
2451 		/*
2452 		 * Ok we have a timer that is under joint use. Cookie timer
2453 		 * per chance with the SEND timer. We therefore are NOT
2454 		 * running the timer that the caller wants stopped.  So just
2455 		 * return.
2456 		 */
2457 		return;
2458 	}
2459 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2460 		stcb->asoc.num_send_timers_up--;
2461 		if (stcb->asoc.num_send_timers_up < 0) {
2462 			stcb->asoc.num_send_timers_up = 0;
2463 		}
2464 	}
2465 	tmr->self = NULL;
2466 	tmr->stopped_from = from;
2467 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2468 	return;
2469 }
2470 
2471 uint32_t
2472 sctp_calculate_len(struct mbuf *m)
2473 {
2474 	uint32_t tlen = 0;
2475 	struct mbuf *at;
2476 
2477 	at = m;
2478 	while (at) {
2479 		tlen += SCTP_BUF_LEN(at);
2480 		at = SCTP_BUF_NEXT(at);
2481 	}
2482 	return (tlen);
2483 }
2484 
2485 void
2486 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2487     struct sctp_association *asoc, uint32_t mtu)
2488 {
2489 	/*
2490 	 * Reset the P-MTU size on this association, this involves changing
2491 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2492 	 * allow the DF flag to be cleared.
2493 	 */
2494 	struct sctp_tmit_chunk *chk;
2495 	unsigned int eff_mtu, ovh;
2496 
2497 	asoc->smallest_mtu = mtu;
2498 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2499 		ovh = SCTP_MIN_OVERHEAD;
2500 	} else {
2501 		ovh = SCTP_MIN_V4_OVERHEAD;
2502 	}
2503 	eff_mtu = mtu - ovh;
2504 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2505 		if (chk->send_size > eff_mtu) {
2506 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2507 		}
2508 	}
2509 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2510 		if (chk->send_size > eff_mtu) {
2511 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2512 		}
2513 	}
2514 }
2515 
2516 
2517 /*
2518  * Given an association and starting time of the current RTT period, update
2519  * RTO in number of msecs. net should point to the current network.
2520  * Return 1, if an RTO update was performed, return 0 if no update was
2521  * performed due to invalid starting point.
2522  */
2523 
2524 int
2525 sctp_calculate_rto(struct sctp_tcb *stcb,
2526     struct sctp_association *asoc,
2527     struct sctp_nets *net,
2528     struct timeval *old,
2529     int rtt_from_sack)
2530 {
2531 	struct timeval now;
2532 	uint64_t rtt_us;	/* RTT in us */
2533 	int32_t rtt;		/* RTT in ms */
2534 	uint32_t new_rto;
2535 	int first_measure = 0;
2536 
2537 	/************************/
2538 	/* 1. calculate new RTT */
2539 	/************************/
2540 	/* get the current time */
2541 	if (stcb->asoc.use_precise_time) {
2542 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2543 	} else {
2544 		(void)SCTP_GETTIME_TIMEVAL(&now);
2545 	}
2546 	if ((old->tv_sec > now.tv_sec) ||
2547 	    ((old->tv_sec == now.tv_sec) && (old->tv_sec > now.tv_sec))) {
2548 		/* The starting point is in the future. */
2549 		return (0);
2550 	}
2551 	timevalsub(&now, old);
2552 	rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec;
2553 	if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) {
2554 		/* The RTT is larger than a sane value. */
2555 		return (0);
2556 	}
2557 	/* store the current RTT in us */
2558 	net->rtt = rtt_us;
2559 	/* compute rtt in ms */
2560 	rtt = (int32_t)(net->rtt / 1000);
2561 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2562 		/*
2563 		 * Tell the CC module that a new update has just occurred
2564 		 * from a sack
2565 		 */
2566 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2567 	}
2568 	/*
2569 	 * Do we need to determine the lan? We do this only on sacks i.e.
2570 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2571 	 */
2572 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2573 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2574 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2575 			net->lan_type = SCTP_LAN_INTERNET;
2576 		} else {
2577 			net->lan_type = SCTP_LAN_LOCAL;
2578 		}
2579 	}
2580 
2581 	/***************************/
2582 	/* 2. update RTTVAR & SRTT */
2583 	/***************************/
2584 	/*-
2585 	 * Compute the scaled average lastsa and the
2586 	 * scaled variance lastsv as described in van Jacobson
2587 	 * Paper "Congestion Avoidance and Control", Annex A.
2588 	 *
2589 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2590 	 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar
2591 	 */
2592 	if (net->RTO_measured) {
2593 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2594 		net->lastsa += rtt;
2595 		if (rtt < 0) {
2596 			rtt = -rtt;
2597 		}
2598 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2599 		net->lastsv += rtt;
2600 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2601 			rto_logging(net, SCTP_LOG_RTTVAR);
2602 		}
2603 	} else {
2604 		/* First RTO measurment */
2605 		net->RTO_measured = 1;
2606 		first_measure = 1;
2607 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2608 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2609 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2610 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2611 		}
2612 	}
2613 	if (net->lastsv == 0) {
2614 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2615 	}
2616 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2617 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2618 	    (stcb->asoc.sat_network_lockout == 0)) {
2619 		stcb->asoc.sat_network = 1;
2620 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2621 		stcb->asoc.sat_network = 0;
2622 		stcb->asoc.sat_network_lockout = 1;
2623 	}
2624 	/* bound it, per C6/C7 in Section 5.3.1 */
2625 	if (new_rto < stcb->asoc.minrto) {
2626 		new_rto = stcb->asoc.minrto;
2627 	}
2628 	if (new_rto > stcb->asoc.maxrto) {
2629 		new_rto = stcb->asoc.maxrto;
2630 	}
2631 	net->RTO = new_rto;
2632 	return (1);
2633 }
2634 
2635 /*
2636  * return a pointer to a contiguous piece of data from the given mbuf chain
2637  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2638  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2639  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2640  */
2641 caddr_t
2642 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2643 {
2644 	uint32_t count;
2645 	uint8_t *ptr;
2646 
2647 	ptr = in_ptr;
2648 	if ((off < 0) || (len <= 0))
2649 		return (NULL);
2650 
2651 	/* find the desired start location */
2652 	while ((m != NULL) && (off > 0)) {
2653 		if (off < SCTP_BUF_LEN(m))
2654 			break;
2655 		off -= SCTP_BUF_LEN(m);
2656 		m = SCTP_BUF_NEXT(m);
2657 	}
2658 	if (m == NULL)
2659 		return (NULL);
2660 
2661 	/* is the current mbuf large enough (eg. contiguous)? */
2662 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2663 		return (mtod(m, caddr_t)+off);
2664 	} else {
2665 		/* else, it spans more than one mbuf, so save a temp copy... */
2666 		while ((m != NULL) && (len > 0)) {
2667 			count = min(SCTP_BUF_LEN(m) - off, len);
2668 			memcpy(ptr, mtod(m, caddr_t)+off, count);
2669 			len -= count;
2670 			ptr += count;
2671 			off = 0;
2672 			m = SCTP_BUF_NEXT(m);
2673 		}
2674 		if ((m == NULL) && (len > 0))
2675 			return (NULL);
2676 		else
2677 			return ((caddr_t)in_ptr);
2678 	}
2679 }
2680 
2681 
2682 
2683 struct sctp_paramhdr *
2684 sctp_get_next_param(struct mbuf *m,
2685     int offset,
2686     struct sctp_paramhdr *pull,
2687     int pull_limit)
2688 {
2689 	/* This just provides a typed signature to Peter's Pull routine */
2690 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2691 	    (uint8_t *)pull));
2692 }
2693 
2694 
2695 struct mbuf *
2696 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2697 {
2698 	struct mbuf *m_last;
2699 	caddr_t dp;
2700 
2701 	if (padlen > 3) {
2702 		return (NULL);
2703 	}
2704 	if (padlen <= M_TRAILINGSPACE(m)) {
2705 		/*
2706 		 * The easy way. We hope the majority of the time we hit
2707 		 * here :)
2708 		 */
2709 		m_last = m;
2710 	} else {
2711 		/* Hard way we must grow the mbuf chain */
2712 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2713 		if (m_last == NULL) {
2714 			return (NULL);
2715 		}
2716 		SCTP_BUF_LEN(m_last) = 0;
2717 		SCTP_BUF_NEXT(m_last) = NULL;
2718 		SCTP_BUF_NEXT(m) = m_last;
2719 	}
2720 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2721 	SCTP_BUF_LEN(m_last) += padlen;
2722 	memset(dp, 0, padlen);
2723 	return (m_last);
2724 }
2725 
2726 struct mbuf *
2727 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2728 {
2729 	/* find the last mbuf in chain and pad it */
2730 	struct mbuf *m_at;
2731 
2732 	if (last_mbuf != NULL) {
2733 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2734 	} else {
2735 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2736 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2737 				return (sctp_add_pad_tombuf(m_at, padval));
2738 			}
2739 		}
2740 	}
2741 	return (NULL);
2742 }
2743 
2744 static void
2745 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2746     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2747 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2748     SCTP_UNUSED
2749 #endif
2750 )
2751 {
2752 	struct mbuf *m_notify;
2753 	struct sctp_assoc_change *sac;
2754 	struct sctp_queued_to_read *control;
2755 	unsigned int notif_len;
2756 	uint16_t abort_len;
2757 	unsigned int i;
2758 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2759 	struct socket *so;
2760 #endif
2761 
2762 	if (stcb == NULL) {
2763 		return;
2764 	}
2765 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2766 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2767 		if (abort != NULL) {
2768 			abort_len = ntohs(abort->ch.chunk_length);
2769 			/*
2770 			 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
2771 			 * contiguous.
2772 			 */
2773 			if (abort_len > SCTP_CHUNK_BUFFER_SIZE) {
2774 				abort_len = SCTP_CHUNK_BUFFER_SIZE;
2775 			}
2776 		} else {
2777 			abort_len = 0;
2778 		}
2779 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2780 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2781 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2782 			notif_len += abort_len;
2783 		}
2784 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2785 		if (m_notify == NULL) {
2786 			/* Retry with smaller value. */
2787 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2788 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2789 			if (m_notify == NULL) {
2790 				goto set_error;
2791 			}
2792 		}
2793 		SCTP_BUF_NEXT(m_notify) = NULL;
2794 		sac = mtod(m_notify, struct sctp_assoc_change *);
2795 		memset(sac, 0, notif_len);
2796 		sac->sac_type = SCTP_ASSOC_CHANGE;
2797 		sac->sac_flags = 0;
2798 		sac->sac_length = sizeof(struct sctp_assoc_change);
2799 		sac->sac_state = state;
2800 		sac->sac_error = error;
2801 		/* XXX verify these stream counts */
2802 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2803 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2804 		sac->sac_assoc_id = sctp_get_associd(stcb);
2805 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2806 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2807 				i = 0;
2808 				if (stcb->asoc.prsctp_supported == 1) {
2809 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2810 				}
2811 				if (stcb->asoc.auth_supported == 1) {
2812 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2813 				}
2814 				if (stcb->asoc.asconf_supported == 1) {
2815 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2816 				}
2817 				if (stcb->asoc.idata_supported == 1) {
2818 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2819 				}
2820 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2821 				if (stcb->asoc.reconfig_supported == 1) {
2822 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2823 				}
2824 				sac->sac_length += i;
2825 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2826 				memcpy(sac->sac_info, abort, abort_len);
2827 				sac->sac_length += abort_len;
2828 			}
2829 		}
2830 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2831 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2832 		    0, 0, stcb->asoc.context, 0, 0, 0,
2833 		    m_notify);
2834 		if (control != NULL) {
2835 			control->length = SCTP_BUF_LEN(m_notify);
2836 			control->spec_flags = M_NOTIFICATION;
2837 			/* not that we need this */
2838 			control->tail_mbuf = m_notify;
2839 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2840 			    control,
2841 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2842 			    so_locked);
2843 		} else {
2844 			sctp_m_freem(m_notify);
2845 		}
2846 	}
2847 	/*
2848 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2849 	 * comes in.
2850 	 */
2851 set_error:
2852 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2853 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2854 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2855 		SOCK_LOCK(stcb->sctp_socket);
2856 		if (from_peer) {
2857 			if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
2858 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2859 				stcb->sctp_socket->so_error = ECONNREFUSED;
2860 			} else {
2861 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2862 				stcb->sctp_socket->so_error = ECONNRESET;
2863 			}
2864 		} else {
2865 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
2866 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
2867 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2868 				stcb->sctp_socket->so_error = ETIMEDOUT;
2869 			} else {
2870 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2871 				stcb->sctp_socket->so_error = ECONNABORTED;
2872 			}
2873 		}
2874 		SOCK_UNLOCK(stcb->sctp_socket);
2875 	}
2876 	/* Wake ANY sleepers */
2877 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2878 	so = SCTP_INP_SO(stcb->sctp_ep);
2879 	if (!so_locked) {
2880 		atomic_add_int(&stcb->asoc.refcnt, 1);
2881 		SCTP_TCB_UNLOCK(stcb);
2882 		SCTP_SOCKET_LOCK(so, 1);
2883 		SCTP_TCB_LOCK(stcb);
2884 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2885 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2886 			SCTP_SOCKET_UNLOCK(so, 1);
2887 			return;
2888 		}
2889 	}
2890 #endif
2891 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2892 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2893 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2894 		socantrcvmore(stcb->sctp_socket);
2895 	}
2896 	sorwakeup(stcb->sctp_socket);
2897 	sowwakeup(stcb->sctp_socket);
2898 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2899 	if (!so_locked) {
2900 		SCTP_SOCKET_UNLOCK(so, 1);
2901 	}
2902 #endif
2903 }
2904 
2905 static void
2906 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2907     struct sockaddr *sa, uint32_t error, int so_locked
2908 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2909     SCTP_UNUSED
2910 #endif
2911 )
2912 {
2913 	struct mbuf *m_notify;
2914 	struct sctp_paddr_change *spc;
2915 	struct sctp_queued_to_read *control;
2916 
2917 	if ((stcb == NULL) ||
2918 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2919 		/* event not enabled */
2920 		return;
2921 	}
2922 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2923 	if (m_notify == NULL)
2924 		return;
2925 	SCTP_BUF_LEN(m_notify) = 0;
2926 	spc = mtod(m_notify, struct sctp_paddr_change *);
2927 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2928 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2929 	spc->spc_flags = 0;
2930 	spc->spc_length = sizeof(struct sctp_paddr_change);
2931 	switch (sa->sa_family) {
2932 #ifdef INET
2933 	case AF_INET:
2934 #ifdef INET6
2935 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2936 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2937 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2938 		} else {
2939 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2940 		}
2941 #else
2942 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2943 #endif
2944 		break;
2945 #endif
2946 #ifdef INET6
2947 	case AF_INET6:
2948 		{
2949 			struct sockaddr_in6 *sin6;
2950 
2951 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2952 
2953 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2954 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2955 				if (sin6->sin6_scope_id == 0) {
2956 					/* recover scope_id for user */
2957 					(void)sa6_recoverscope(sin6);
2958 				} else {
2959 					/* clear embedded scope_id for user */
2960 					in6_clearscope(&sin6->sin6_addr);
2961 				}
2962 			}
2963 			break;
2964 		}
2965 #endif
2966 	default:
2967 		/* TSNH */
2968 		break;
2969 	}
2970 	spc->spc_state = state;
2971 	spc->spc_error = error;
2972 	spc->spc_assoc_id = sctp_get_associd(stcb);
2973 
2974 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2975 	SCTP_BUF_NEXT(m_notify) = NULL;
2976 
2977 	/* append to socket */
2978 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2979 	    0, 0, stcb->asoc.context, 0, 0, 0,
2980 	    m_notify);
2981 	if (control == NULL) {
2982 		/* no memory */
2983 		sctp_m_freem(m_notify);
2984 		return;
2985 	}
2986 	control->length = SCTP_BUF_LEN(m_notify);
2987 	control->spec_flags = M_NOTIFICATION;
2988 	/* not that we need this */
2989 	control->tail_mbuf = m_notify;
2990 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2991 	    control,
2992 	    &stcb->sctp_socket->so_rcv, 1,
2993 	    SCTP_READ_LOCK_NOT_HELD,
2994 	    so_locked);
2995 }
2996 
2997 
2998 static void
2999 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
3000     struct sctp_tmit_chunk *chk, int so_locked
3001 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3002     SCTP_UNUSED
3003 #endif
3004 )
3005 {
3006 	struct mbuf *m_notify;
3007 	struct sctp_send_failed *ssf;
3008 	struct sctp_send_failed_event *ssfe;
3009 	struct sctp_queued_to_read *control;
3010 	struct sctp_chunkhdr *chkhdr;
3011 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
3012 
3013 	if ((stcb == NULL) ||
3014 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3015 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3016 		/* event not enabled */
3017 		return;
3018 	}
3019 
3020 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3021 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3022 	} else {
3023 		notifhdr_len = sizeof(struct sctp_send_failed);
3024 	}
3025 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3026 	if (m_notify == NULL)
3027 		/* no space left */
3028 		return;
3029 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3030 	if (stcb->asoc.idata_supported) {
3031 		chkhdr_len = sizeof(struct sctp_idata_chunk);
3032 	} else {
3033 		chkhdr_len = sizeof(struct sctp_data_chunk);
3034 	}
3035 	/* Use some defaults in case we can't access the chunk header */
3036 	if (chk->send_size >= chkhdr_len) {
3037 		payload_len = chk->send_size - chkhdr_len;
3038 	} else {
3039 		payload_len = 0;
3040 	}
3041 	padding_len = 0;
3042 	if (chk->data != NULL) {
3043 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
3044 		if (chkhdr != NULL) {
3045 			chk_len = ntohs(chkhdr->chunk_length);
3046 			if ((chk_len >= chkhdr_len) &&
3047 			    (chk->send_size >= chk_len) &&
3048 			    (chk->send_size - chk_len < 4)) {
3049 				padding_len = chk->send_size - chk_len;
3050 				payload_len = chk->send_size - chkhdr_len - padding_len;
3051 			}
3052 		}
3053 	}
3054 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3055 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3056 		memset(ssfe, 0, notifhdr_len);
3057 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3058 		if (sent) {
3059 			ssfe->ssfe_flags = SCTP_DATA_SENT;
3060 		} else {
3061 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3062 		}
3063 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
3064 		ssfe->ssfe_error = error;
3065 		/* not exactly what the user sent in, but should be close :) */
3066 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
3067 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
3068 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
3069 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
3070 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3071 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3072 	} else {
3073 		ssf = mtod(m_notify, struct sctp_send_failed *);
3074 		memset(ssf, 0, notifhdr_len);
3075 		ssf->ssf_type = SCTP_SEND_FAILED;
3076 		if (sent) {
3077 			ssf->ssf_flags = SCTP_DATA_SENT;
3078 		} else {
3079 			ssf->ssf_flags = SCTP_DATA_UNSENT;
3080 		}
3081 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3082 		ssf->ssf_error = error;
3083 		/* not exactly what the user sent in, but should be close :) */
3084 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3085 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3086 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3087 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3088 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3089 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3090 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3091 	}
3092 	if (chk->data != NULL) {
3093 		/* Trim off the sctp chunk header (it should be there) */
3094 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3095 			m_adj(chk->data, chkhdr_len);
3096 			m_adj(chk->data, -padding_len);
3097 			sctp_mbuf_crush(chk->data);
3098 			chk->send_size -= (chkhdr_len + padding_len);
3099 		}
3100 	}
3101 	SCTP_BUF_NEXT(m_notify) = chk->data;
3102 	/* Steal off the mbuf */
3103 	chk->data = NULL;
3104 	/*
3105 	 * For this case, we check the actual socket buffer, since the assoc
3106 	 * is going away we don't want to overfill the socket buffer for a
3107 	 * non-reader
3108 	 */
3109 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3110 		sctp_m_freem(m_notify);
3111 		return;
3112 	}
3113 	/* append to socket */
3114 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3115 	    0, 0, stcb->asoc.context, 0, 0, 0,
3116 	    m_notify);
3117 	if (control == NULL) {
3118 		/* no memory */
3119 		sctp_m_freem(m_notify);
3120 		return;
3121 	}
3122 	control->length = SCTP_BUF_LEN(m_notify);
3123 	control->spec_flags = M_NOTIFICATION;
3124 	/* not that we need this */
3125 	control->tail_mbuf = m_notify;
3126 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3127 	    control,
3128 	    &stcb->sctp_socket->so_rcv, 1,
3129 	    SCTP_READ_LOCK_NOT_HELD,
3130 	    so_locked);
3131 }
3132 
3133 
3134 static void
3135 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3136     struct sctp_stream_queue_pending *sp, int so_locked
3137 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3138     SCTP_UNUSED
3139 #endif
3140 )
3141 {
3142 	struct mbuf *m_notify;
3143 	struct sctp_send_failed *ssf;
3144 	struct sctp_send_failed_event *ssfe;
3145 	struct sctp_queued_to_read *control;
3146 	int notifhdr_len;
3147 
3148 	if ((stcb == NULL) ||
3149 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3150 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3151 		/* event not enabled */
3152 		return;
3153 	}
3154 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3155 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3156 	} else {
3157 		notifhdr_len = sizeof(struct sctp_send_failed);
3158 	}
3159 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3160 	if (m_notify == NULL) {
3161 		/* no space left */
3162 		return;
3163 	}
3164 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3165 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3166 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3167 		memset(ssfe, 0, notifhdr_len);
3168 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3169 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3170 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3171 		ssfe->ssfe_error = error;
3172 		/* not exactly what the user sent in, but should be close :) */
3173 		ssfe->ssfe_info.snd_sid = sp->sid;
3174 		if (sp->some_taken) {
3175 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3176 		} else {
3177 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3178 		}
3179 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3180 		ssfe->ssfe_info.snd_context = sp->context;
3181 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3182 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3183 	} else {
3184 		ssf = mtod(m_notify, struct sctp_send_failed *);
3185 		memset(ssf, 0, notifhdr_len);
3186 		ssf->ssf_type = SCTP_SEND_FAILED;
3187 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3188 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3189 		ssf->ssf_error = error;
3190 		/* not exactly what the user sent in, but should be close :) */
3191 		ssf->ssf_info.sinfo_stream = sp->sid;
3192 		ssf->ssf_info.sinfo_ssn = 0;
3193 		if (sp->some_taken) {
3194 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3195 		} else {
3196 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3197 		}
3198 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3199 		ssf->ssf_info.sinfo_context = sp->context;
3200 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3201 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3202 	}
3203 	SCTP_BUF_NEXT(m_notify) = sp->data;
3204 
3205 	/* Steal off the mbuf */
3206 	sp->data = NULL;
3207 	/*
3208 	 * For this case, we check the actual socket buffer, since the assoc
3209 	 * is going away we don't want to overfill the socket buffer for a
3210 	 * non-reader
3211 	 */
3212 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3213 		sctp_m_freem(m_notify);
3214 		return;
3215 	}
3216 	/* append to socket */
3217 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3218 	    0, 0, stcb->asoc.context, 0, 0, 0,
3219 	    m_notify);
3220 	if (control == NULL) {
3221 		/* no memory */
3222 		sctp_m_freem(m_notify);
3223 		return;
3224 	}
3225 	control->length = SCTP_BUF_LEN(m_notify);
3226 	control->spec_flags = M_NOTIFICATION;
3227 	/* not that we need this */
3228 	control->tail_mbuf = m_notify;
3229 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3230 	    control,
3231 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3232 }
3233 
3234 
3235 
3236 static void
3237 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3238 {
3239 	struct mbuf *m_notify;
3240 	struct sctp_adaptation_event *sai;
3241 	struct sctp_queued_to_read *control;
3242 
3243 	if ((stcb == NULL) ||
3244 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3245 		/* event not enabled */
3246 		return;
3247 	}
3248 
3249 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3250 	if (m_notify == NULL)
3251 		/* no space left */
3252 		return;
3253 	SCTP_BUF_LEN(m_notify) = 0;
3254 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3255 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3256 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3257 	sai->sai_flags = 0;
3258 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3259 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3260 	sai->sai_assoc_id = sctp_get_associd(stcb);
3261 
3262 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3263 	SCTP_BUF_NEXT(m_notify) = NULL;
3264 
3265 	/* append to socket */
3266 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3267 	    0, 0, stcb->asoc.context, 0, 0, 0,
3268 	    m_notify);
3269 	if (control == NULL) {
3270 		/* no memory */
3271 		sctp_m_freem(m_notify);
3272 		return;
3273 	}
3274 	control->length = SCTP_BUF_LEN(m_notify);
3275 	control->spec_flags = M_NOTIFICATION;
3276 	/* not that we need this */
3277 	control->tail_mbuf = m_notify;
3278 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3279 	    control,
3280 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3281 }
3282 
3283 /* This always must be called with the read-queue LOCKED in the INP */
3284 static void
3285 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3286     uint32_t val, int so_locked
3287 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3288     SCTP_UNUSED
3289 #endif
3290 )
3291 {
3292 	struct mbuf *m_notify;
3293 	struct sctp_pdapi_event *pdapi;
3294 	struct sctp_queued_to_read *control;
3295 	struct sockbuf *sb;
3296 
3297 	if ((stcb == NULL) ||
3298 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3299 		/* event not enabled */
3300 		return;
3301 	}
3302 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3303 		return;
3304 	}
3305 
3306 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3307 	if (m_notify == NULL)
3308 		/* no space left */
3309 		return;
3310 	SCTP_BUF_LEN(m_notify) = 0;
3311 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3312 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3313 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3314 	pdapi->pdapi_flags = 0;
3315 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3316 	pdapi->pdapi_indication = error;
3317 	pdapi->pdapi_stream = (val >> 16);
3318 	pdapi->pdapi_seq = (val & 0x0000ffff);
3319 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3320 
3321 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3322 	SCTP_BUF_NEXT(m_notify) = NULL;
3323 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3324 	    0, 0, stcb->asoc.context, 0, 0, 0,
3325 	    m_notify);
3326 	if (control == NULL) {
3327 		/* no memory */
3328 		sctp_m_freem(m_notify);
3329 		return;
3330 	}
3331 	control->length = SCTP_BUF_LEN(m_notify);
3332 	control->spec_flags = M_NOTIFICATION;
3333 	/* not that we need this */
3334 	control->tail_mbuf = m_notify;
3335 	sb = &stcb->sctp_socket->so_rcv;
3336 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3337 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3338 	}
3339 	sctp_sballoc(stcb, sb, m_notify);
3340 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3341 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3342 	}
3343 	control->end_added = 1;
3344 	if (stcb->asoc.control_pdapi)
3345 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3346 	else {
3347 		/* we really should not see this case */
3348 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3349 	}
3350 	if (stcb->sctp_ep && stcb->sctp_socket) {
3351 		/* This should always be the case */
3352 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3353 		struct socket *so;
3354 
3355 		so = SCTP_INP_SO(stcb->sctp_ep);
3356 		if (!so_locked) {
3357 			atomic_add_int(&stcb->asoc.refcnt, 1);
3358 			SCTP_TCB_UNLOCK(stcb);
3359 			SCTP_SOCKET_LOCK(so, 1);
3360 			SCTP_TCB_LOCK(stcb);
3361 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3362 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3363 				SCTP_SOCKET_UNLOCK(so, 1);
3364 				return;
3365 			}
3366 		}
3367 #endif
3368 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3369 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3370 		if (!so_locked) {
3371 			SCTP_SOCKET_UNLOCK(so, 1);
3372 		}
3373 #endif
3374 	}
3375 }
3376 
3377 static void
3378 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3379 {
3380 	struct mbuf *m_notify;
3381 	struct sctp_shutdown_event *sse;
3382 	struct sctp_queued_to_read *control;
3383 
3384 	/*
3385 	 * For TCP model AND UDP connected sockets we will send an error up
3386 	 * when an SHUTDOWN completes
3387 	 */
3388 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3389 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3390 		/* mark socket closed for read/write and wakeup! */
3391 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3392 		struct socket *so;
3393 
3394 		so = SCTP_INP_SO(stcb->sctp_ep);
3395 		atomic_add_int(&stcb->asoc.refcnt, 1);
3396 		SCTP_TCB_UNLOCK(stcb);
3397 		SCTP_SOCKET_LOCK(so, 1);
3398 		SCTP_TCB_LOCK(stcb);
3399 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3400 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3401 			SCTP_SOCKET_UNLOCK(so, 1);
3402 			return;
3403 		}
3404 #endif
3405 		socantsendmore(stcb->sctp_socket);
3406 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3407 		SCTP_SOCKET_UNLOCK(so, 1);
3408 #endif
3409 	}
3410 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3411 		/* event not enabled */
3412 		return;
3413 	}
3414 
3415 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3416 	if (m_notify == NULL)
3417 		/* no space left */
3418 		return;
3419 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3420 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3421 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3422 	sse->sse_flags = 0;
3423 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3424 	sse->sse_assoc_id = sctp_get_associd(stcb);
3425 
3426 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3427 	SCTP_BUF_NEXT(m_notify) = NULL;
3428 
3429 	/* append to socket */
3430 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3431 	    0, 0, stcb->asoc.context, 0, 0, 0,
3432 	    m_notify);
3433 	if (control == NULL) {
3434 		/* no memory */
3435 		sctp_m_freem(m_notify);
3436 		return;
3437 	}
3438 	control->length = SCTP_BUF_LEN(m_notify);
3439 	control->spec_flags = M_NOTIFICATION;
3440 	/* not that we need this */
3441 	control->tail_mbuf = m_notify;
3442 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3443 	    control,
3444 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3445 }
3446 
3447 static void
3448 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3449     int so_locked
3450 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3451     SCTP_UNUSED
3452 #endif
3453 )
3454 {
3455 	struct mbuf *m_notify;
3456 	struct sctp_sender_dry_event *event;
3457 	struct sctp_queued_to_read *control;
3458 
3459 	if ((stcb == NULL) ||
3460 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3461 		/* event not enabled */
3462 		return;
3463 	}
3464 
3465 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3466 	if (m_notify == NULL) {
3467 		/* no space left */
3468 		return;
3469 	}
3470 	SCTP_BUF_LEN(m_notify) = 0;
3471 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3472 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3473 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3474 	event->sender_dry_flags = 0;
3475 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3476 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3477 
3478 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3479 	SCTP_BUF_NEXT(m_notify) = NULL;
3480 
3481 	/* append to socket */
3482 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3483 	    0, 0, stcb->asoc.context, 0, 0, 0,
3484 	    m_notify);
3485 	if (control == NULL) {
3486 		/* no memory */
3487 		sctp_m_freem(m_notify);
3488 		return;
3489 	}
3490 	control->length = SCTP_BUF_LEN(m_notify);
3491 	control->spec_flags = M_NOTIFICATION;
3492 	/* not that we need this */
3493 	control->tail_mbuf = m_notify;
3494 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3495 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3496 }
3497 
3498 
3499 void
3500 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3501 {
3502 	struct mbuf *m_notify;
3503 	struct sctp_queued_to_read *control;
3504 	struct sctp_stream_change_event *stradd;
3505 
3506 	if ((stcb == NULL) ||
3507 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3508 		/* event not enabled */
3509 		return;
3510 	}
3511 	if ((stcb->asoc.peer_req_out) && flag) {
3512 		/* Peer made the request, don't tell the local user */
3513 		stcb->asoc.peer_req_out = 0;
3514 		return;
3515 	}
3516 	stcb->asoc.peer_req_out = 0;
3517 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3518 	if (m_notify == NULL)
3519 		/* no space left */
3520 		return;
3521 	SCTP_BUF_LEN(m_notify) = 0;
3522 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3523 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3524 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3525 	stradd->strchange_flags = flag;
3526 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3527 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3528 	stradd->strchange_instrms = numberin;
3529 	stradd->strchange_outstrms = numberout;
3530 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3531 	SCTP_BUF_NEXT(m_notify) = NULL;
3532 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3533 		/* no space */
3534 		sctp_m_freem(m_notify);
3535 		return;
3536 	}
3537 	/* append to socket */
3538 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3539 	    0, 0, stcb->asoc.context, 0, 0, 0,
3540 	    m_notify);
3541 	if (control == NULL) {
3542 		/* no memory */
3543 		sctp_m_freem(m_notify);
3544 		return;
3545 	}
3546 	control->length = SCTP_BUF_LEN(m_notify);
3547 	control->spec_flags = M_NOTIFICATION;
3548 	/* not that we need this */
3549 	control->tail_mbuf = m_notify;
3550 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3551 	    control,
3552 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3553 }
3554 
3555 void
3556 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3557 {
3558 	struct mbuf *m_notify;
3559 	struct sctp_queued_to_read *control;
3560 	struct sctp_assoc_reset_event *strasoc;
3561 
3562 	if ((stcb == NULL) ||
3563 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3564 		/* event not enabled */
3565 		return;
3566 	}
3567 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3568 	if (m_notify == NULL)
3569 		/* no space left */
3570 		return;
3571 	SCTP_BUF_LEN(m_notify) = 0;
3572 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3573 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3574 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3575 	strasoc->assocreset_flags = flag;
3576 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3577 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3578 	strasoc->assocreset_local_tsn = sending_tsn;
3579 	strasoc->assocreset_remote_tsn = recv_tsn;
3580 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3581 	SCTP_BUF_NEXT(m_notify) = NULL;
3582 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3583 		/* no space */
3584 		sctp_m_freem(m_notify);
3585 		return;
3586 	}
3587 	/* append to socket */
3588 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3589 	    0, 0, stcb->asoc.context, 0, 0, 0,
3590 	    m_notify);
3591 	if (control == NULL) {
3592 		/* no memory */
3593 		sctp_m_freem(m_notify);
3594 		return;
3595 	}
3596 	control->length = SCTP_BUF_LEN(m_notify);
3597 	control->spec_flags = M_NOTIFICATION;
3598 	/* not that we need this */
3599 	control->tail_mbuf = m_notify;
3600 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3601 	    control,
3602 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3603 }
3604 
3605 
3606 
3607 static void
3608 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3609     int number_entries, uint16_t *list, int flag)
3610 {
3611 	struct mbuf *m_notify;
3612 	struct sctp_queued_to_read *control;
3613 	struct sctp_stream_reset_event *strreset;
3614 	int len;
3615 
3616 	if ((stcb == NULL) ||
3617 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3618 		/* event not enabled */
3619 		return;
3620 	}
3621 
3622 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3623 	if (m_notify == NULL)
3624 		/* no space left */
3625 		return;
3626 	SCTP_BUF_LEN(m_notify) = 0;
3627 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3628 	if (len > M_TRAILINGSPACE(m_notify)) {
3629 		/* never enough room */
3630 		sctp_m_freem(m_notify);
3631 		return;
3632 	}
3633 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3634 	memset(strreset, 0, len);
3635 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3636 	strreset->strreset_flags = flag;
3637 	strreset->strreset_length = len;
3638 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3639 	if (number_entries) {
3640 		int i;
3641 
3642 		for (i = 0; i < number_entries; i++) {
3643 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3644 		}
3645 	}
3646 	SCTP_BUF_LEN(m_notify) = len;
3647 	SCTP_BUF_NEXT(m_notify) = NULL;
3648 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3649 		/* no space */
3650 		sctp_m_freem(m_notify);
3651 		return;
3652 	}
3653 	/* append to socket */
3654 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3655 	    0, 0, stcb->asoc.context, 0, 0, 0,
3656 	    m_notify);
3657 	if (control == NULL) {
3658 		/* no memory */
3659 		sctp_m_freem(m_notify);
3660 		return;
3661 	}
3662 	control->length = SCTP_BUF_LEN(m_notify);
3663 	control->spec_flags = M_NOTIFICATION;
3664 	/* not that we need this */
3665 	control->tail_mbuf = m_notify;
3666 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3667 	    control,
3668 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3669 }
3670 
3671 
3672 static void
3673 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3674 {
3675 	struct mbuf *m_notify;
3676 	struct sctp_remote_error *sre;
3677 	struct sctp_queued_to_read *control;
3678 	unsigned int notif_len;
3679 	uint16_t chunk_len;
3680 
3681 	if ((stcb == NULL) ||
3682 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3683 		return;
3684 	}
3685 	if (chunk != NULL) {
3686 		chunk_len = ntohs(chunk->ch.chunk_length);
3687 		/*
3688 		 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
3689 		 * contiguous.
3690 		 */
3691 		if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) {
3692 			chunk_len = SCTP_CHUNK_BUFFER_SIZE;
3693 		}
3694 	} else {
3695 		chunk_len = 0;
3696 	}
3697 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3698 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3699 	if (m_notify == NULL) {
3700 		/* Retry with smaller value. */
3701 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3702 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3703 		if (m_notify == NULL) {
3704 			return;
3705 		}
3706 	}
3707 	SCTP_BUF_NEXT(m_notify) = NULL;
3708 	sre = mtod(m_notify, struct sctp_remote_error *);
3709 	memset(sre, 0, notif_len);
3710 	sre->sre_type = SCTP_REMOTE_ERROR;
3711 	sre->sre_flags = 0;
3712 	sre->sre_length = sizeof(struct sctp_remote_error);
3713 	sre->sre_error = error;
3714 	sre->sre_assoc_id = sctp_get_associd(stcb);
3715 	if (notif_len > sizeof(struct sctp_remote_error)) {
3716 		memcpy(sre->sre_data, chunk, chunk_len);
3717 		sre->sre_length += chunk_len;
3718 	}
3719 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3720 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3721 	    0, 0, stcb->asoc.context, 0, 0, 0,
3722 	    m_notify);
3723 	if (control != NULL) {
3724 		control->length = SCTP_BUF_LEN(m_notify);
3725 		control->spec_flags = M_NOTIFICATION;
3726 		/* not that we need this */
3727 		control->tail_mbuf = m_notify;
3728 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3729 		    control,
3730 		    &stcb->sctp_socket->so_rcv, 1,
3731 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3732 	} else {
3733 		sctp_m_freem(m_notify);
3734 	}
3735 }
3736 
3737 
3738 void
3739 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3740     uint32_t error, void *data, int so_locked
3741 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3742     SCTP_UNUSED
3743 #endif
3744 )
3745 {
3746 	if ((stcb == NULL) ||
3747 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3748 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3749 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3750 		/* If the socket is gone we are out of here */
3751 		return;
3752 	}
3753 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3754 		return;
3755 	}
3756 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3757 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3758 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3759 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3760 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3761 			/* Don't report these in front states */
3762 			return;
3763 		}
3764 	}
3765 	switch (notification) {
3766 	case SCTP_NOTIFY_ASSOC_UP:
3767 		if (stcb->asoc.assoc_up_sent == 0) {
3768 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3769 			stcb->asoc.assoc_up_sent = 1;
3770 		}
3771 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3772 			sctp_notify_adaptation_layer(stcb);
3773 		}
3774 		if (stcb->asoc.auth_supported == 0) {
3775 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3776 			    NULL, so_locked);
3777 		}
3778 		break;
3779 	case SCTP_NOTIFY_ASSOC_DOWN:
3780 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3781 		break;
3782 	case SCTP_NOTIFY_INTERFACE_DOWN:
3783 		{
3784 			struct sctp_nets *net;
3785 
3786 			net = (struct sctp_nets *)data;
3787 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3788 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3789 			break;
3790 		}
3791 	case SCTP_NOTIFY_INTERFACE_UP:
3792 		{
3793 			struct sctp_nets *net;
3794 
3795 			net = (struct sctp_nets *)data;
3796 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3797 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3798 			break;
3799 		}
3800 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3801 		{
3802 			struct sctp_nets *net;
3803 
3804 			net = (struct sctp_nets *)data;
3805 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3806 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3807 			break;
3808 		}
3809 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3810 		sctp_notify_send_failed2(stcb, error,
3811 		    (struct sctp_stream_queue_pending *)data, so_locked);
3812 		break;
3813 	case SCTP_NOTIFY_SENT_DG_FAIL:
3814 		sctp_notify_send_failed(stcb, 1, error,
3815 		    (struct sctp_tmit_chunk *)data, so_locked);
3816 		break;
3817 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3818 		sctp_notify_send_failed(stcb, 0, error,
3819 		    (struct sctp_tmit_chunk *)data, so_locked);
3820 		break;
3821 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3822 		{
3823 			uint32_t val;
3824 
3825 			val = *((uint32_t *)data);
3826 
3827 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3828 			break;
3829 		}
3830 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3831 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3832 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3833 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3834 		} else {
3835 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3836 		}
3837 		break;
3838 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3839 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3840 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3841 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3842 		} else {
3843 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3844 		}
3845 		break;
3846 	case SCTP_NOTIFY_ASSOC_RESTART:
3847 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3848 		if (stcb->asoc.auth_supported == 0) {
3849 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3850 			    NULL, so_locked);
3851 		}
3852 		break;
3853 	case SCTP_NOTIFY_STR_RESET_SEND:
3854 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3855 		break;
3856 	case SCTP_NOTIFY_STR_RESET_RECV:
3857 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3858 		break;
3859 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3860 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3861 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3862 		break;
3863 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3864 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3865 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3866 		break;
3867 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3868 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3869 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3870 		break;
3871 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3872 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3873 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3874 		break;
3875 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3876 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3877 		    error, so_locked);
3878 		break;
3879 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3880 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3881 		    error, so_locked);
3882 		break;
3883 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3884 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3885 		    error, so_locked);
3886 		break;
3887 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3888 		sctp_notify_shutdown_event(stcb);
3889 		break;
3890 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3891 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3892 		    (uint16_t)(uintptr_t)data,
3893 		    so_locked);
3894 		break;
3895 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3896 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3897 		    (uint16_t)(uintptr_t)data,
3898 		    so_locked);
3899 		break;
3900 	case SCTP_NOTIFY_NO_PEER_AUTH:
3901 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3902 		    (uint16_t)(uintptr_t)data,
3903 		    so_locked);
3904 		break;
3905 	case SCTP_NOTIFY_SENDER_DRY:
3906 		sctp_notify_sender_dry_event(stcb, so_locked);
3907 		break;
3908 	case SCTP_NOTIFY_REMOTE_ERROR:
3909 		sctp_notify_remote_error(stcb, error, data);
3910 		break;
3911 	default:
3912 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3913 		    __func__, notification, notification);
3914 		break;
3915 	}			/* end switch */
3916 }
3917 
3918 void
3919 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3920 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3921     SCTP_UNUSED
3922 #endif
3923 )
3924 {
3925 	struct sctp_association *asoc;
3926 	struct sctp_stream_out *outs;
3927 	struct sctp_tmit_chunk *chk, *nchk;
3928 	struct sctp_stream_queue_pending *sp, *nsp;
3929 	int i;
3930 
3931 	if (stcb == NULL) {
3932 		return;
3933 	}
3934 	asoc = &stcb->asoc;
3935 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3936 		/* already being freed */
3937 		return;
3938 	}
3939 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3940 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3941 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3942 		return;
3943 	}
3944 	/* now through all the gunk freeing chunks */
3945 	if (holds_lock == 0) {
3946 		SCTP_TCB_SEND_LOCK(stcb);
3947 	}
3948 	/* sent queue SHOULD be empty */
3949 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3950 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3951 		asoc->sent_queue_cnt--;
3952 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3953 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3954 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3955 #ifdef INVARIANTS
3956 			} else {
3957 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3958 #endif
3959 			}
3960 		}
3961 		if (chk->data != NULL) {
3962 			sctp_free_bufspace(stcb, asoc, chk, 1);
3963 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3964 			    error, chk, so_locked);
3965 			if (chk->data) {
3966 				sctp_m_freem(chk->data);
3967 				chk->data = NULL;
3968 			}
3969 		}
3970 		sctp_free_a_chunk(stcb, chk, so_locked);
3971 		/* sa_ignore FREED_MEMORY */
3972 	}
3973 	/* pending send queue SHOULD be empty */
3974 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3975 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3976 		asoc->send_queue_cnt--;
3977 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3978 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3979 #ifdef INVARIANTS
3980 		} else {
3981 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3982 #endif
3983 		}
3984 		if (chk->data != NULL) {
3985 			sctp_free_bufspace(stcb, asoc, chk, 1);
3986 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3987 			    error, chk, so_locked);
3988 			if (chk->data) {
3989 				sctp_m_freem(chk->data);
3990 				chk->data = NULL;
3991 			}
3992 		}
3993 		sctp_free_a_chunk(stcb, chk, so_locked);
3994 		/* sa_ignore FREED_MEMORY */
3995 	}
3996 	for (i = 0; i < asoc->streamoutcnt; i++) {
3997 		/* For each stream */
3998 		outs = &asoc->strmout[i];
3999 		/* clean up any sends there */
4000 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
4001 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
4002 			TAILQ_REMOVE(&outs->outqueue, sp, next);
4003 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
4004 			sctp_free_spbufspace(stcb, asoc, sp);
4005 			if (sp->data) {
4006 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
4007 				    error, (void *)sp, so_locked);
4008 				if (sp->data) {
4009 					sctp_m_freem(sp->data);
4010 					sp->data = NULL;
4011 					sp->tail_mbuf = NULL;
4012 					sp->length = 0;
4013 				}
4014 			}
4015 			if (sp->net) {
4016 				sctp_free_remote_addr(sp->net);
4017 				sp->net = NULL;
4018 			}
4019 			/* Free the chunk */
4020 			sctp_free_a_strmoq(stcb, sp, so_locked);
4021 			/* sa_ignore FREED_MEMORY */
4022 		}
4023 	}
4024 
4025 	if (holds_lock == 0) {
4026 		SCTP_TCB_SEND_UNLOCK(stcb);
4027 	}
4028 }
4029 
4030 void
4031 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
4032     struct sctp_abort_chunk *abort, int so_locked
4033 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4034     SCTP_UNUSED
4035 #endif
4036 )
4037 {
4038 	if (stcb == NULL) {
4039 		return;
4040 	}
4041 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
4042 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4043 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
4044 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
4045 	}
4046 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4047 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4048 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4049 		return;
4050 	}
4051 	/* Tell them we lost the asoc */
4052 	sctp_report_all_outbound(stcb, error, 0, so_locked);
4053 	if (from_peer) {
4054 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
4055 	} else {
4056 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
4057 	}
4058 }
4059 
4060 void
4061 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4062     struct mbuf *m, int iphlen,
4063     struct sockaddr *src, struct sockaddr *dst,
4064     struct sctphdr *sh, struct mbuf *op_err,
4065     uint8_t mflowtype, uint32_t mflowid,
4066     uint32_t vrf_id, uint16_t port)
4067 {
4068 	uint32_t vtag;
4069 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4070 	struct socket *so;
4071 #endif
4072 
4073 	vtag = 0;
4074 	if (stcb != NULL) {
4075 		vtag = stcb->asoc.peer_vtag;
4076 		vrf_id = stcb->asoc.vrf_id;
4077 	}
4078 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4079 	    mflowtype, mflowid, inp->fibnum,
4080 	    vrf_id, port);
4081 	if (stcb != NULL) {
4082 		/* We have a TCB to abort, send notification too */
4083 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4084 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4085 		/* Ok, now lets free it */
4086 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4087 		so = SCTP_INP_SO(inp);
4088 		atomic_add_int(&stcb->asoc.refcnt, 1);
4089 		SCTP_TCB_UNLOCK(stcb);
4090 		SCTP_SOCKET_LOCK(so, 1);
4091 		SCTP_TCB_LOCK(stcb);
4092 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4093 #endif
4094 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4095 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4096 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4097 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4098 		}
4099 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4100 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4101 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4102 		SCTP_SOCKET_UNLOCK(so, 1);
4103 #endif
4104 	}
4105 }
4106 #ifdef SCTP_ASOCLOG_OF_TSNS
4107 void
4108 sctp_print_out_track_log(struct sctp_tcb *stcb)
4109 {
4110 #ifdef NOSIY_PRINTS
4111 	int i;
4112 
4113 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4114 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4115 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4116 		SCTP_PRINTF("None rcvd\n");
4117 		goto none_in;
4118 	}
4119 	if (stcb->asoc.tsn_in_wrapped) {
4120 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4121 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4122 			    stcb->asoc.in_tsnlog[i].tsn,
4123 			    stcb->asoc.in_tsnlog[i].strm,
4124 			    stcb->asoc.in_tsnlog[i].seq,
4125 			    stcb->asoc.in_tsnlog[i].flgs,
4126 			    stcb->asoc.in_tsnlog[i].sz);
4127 		}
4128 	}
4129 	if (stcb->asoc.tsn_in_at) {
4130 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4131 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4132 			    stcb->asoc.in_tsnlog[i].tsn,
4133 			    stcb->asoc.in_tsnlog[i].strm,
4134 			    stcb->asoc.in_tsnlog[i].seq,
4135 			    stcb->asoc.in_tsnlog[i].flgs,
4136 			    stcb->asoc.in_tsnlog[i].sz);
4137 		}
4138 	}
4139 none_in:
4140 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4141 	if ((stcb->asoc.tsn_out_at == 0) &&
4142 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4143 		SCTP_PRINTF("None sent\n");
4144 	}
4145 	if (stcb->asoc.tsn_out_wrapped) {
4146 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4147 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4148 			    stcb->asoc.out_tsnlog[i].tsn,
4149 			    stcb->asoc.out_tsnlog[i].strm,
4150 			    stcb->asoc.out_tsnlog[i].seq,
4151 			    stcb->asoc.out_tsnlog[i].flgs,
4152 			    stcb->asoc.out_tsnlog[i].sz);
4153 		}
4154 	}
4155 	if (stcb->asoc.tsn_out_at) {
4156 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4157 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4158 			    stcb->asoc.out_tsnlog[i].tsn,
4159 			    stcb->asoc.out_tsnlog[i].strm,
4160 			    stcb->asoc.out_tsnlog[i].seq,
4161 			    stcb->asoc.out_tsnlog[i].flgs,
4162 			    stcb->asoc.out_tsnlog[i].sz);
4163 		}
4164 	}
4165 #endif
4166 }
4167 #endif
4168 
4169 void
4170 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4171     struct mbuf *op_err,
4172     int so_locked
4173 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4174     SCTP_UNUSED
4175 #endif
4176 )
4177 {
4178 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4179 	struct socket *so;
4180 #endif
4181 
4182 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4183 	so = SCTP_INP_SO(inp);
4184 #endif
4185 	if (stcb == NULL) {
4186 		/* Got to have a TCB */
4187 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4188 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4189 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4190 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4191 			}
4192 		}
4193 		return;
4194 	} else {
4195 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4196 	}
4197 	/* notify the peer */
4198 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4199 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4200 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4201 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4202 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4203 	}
4204 	/* notify the ulp */
4205 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4206 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4207 	}
4208 	/* now free the asoc */
4209 #ifdef SCTP_ASOCLOG_OF_TSNS
4210 	sctp_print_out_track_log(stcb);
4211 #endif
4212 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4213 	if (!so_locked) {
4214 		atomic_add_int(&stcb->asoc.refcnt, 1);
4215 		SCTP_TCB_UNLOCK(stcb);
4216 		SCTP_SOCKET_LOCK(so, 1);
4217 		SCTP_TCB_LOCK(stcb);
4218 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4219 	}
4220 #endif
4221 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4222 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4223 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4224 	if (!so_locked) {
4225 		SCTP_SOCKET_UNLOCK(so, 1);
4226 	}
4227 #endif
4228 }
4229 
4230 void
4231 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4232     struct sockaddr *src, struct sockaddr *dst,
4233     struct sctphdr *sh, struct sctp_inpcb *inp,
4234     struct mbuf *cause,
4235     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4236     uint32_t vrf_id, uint16_t port)
4237 {
4238 	struct sctp_chunkhdr *ch, chunk_buf;
4239 	unsigned int chk_length;
4240 	int contains_init_chunk;
4241 
4242 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4243 	/* Generate a TO address for future reference */
4244 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4245 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4246 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4247 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4248 		}
4249 	}
4250 	contains_init_chunk = 0;
4251 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4252 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4253 	while (ch != NULL) {
4254 		chk_length = ntohs(ch->chunk_length);
4255 		if (chk_length < sizeof(*ch)) {
4256 			/* break to abort land */
4257 			break;
4258 		}
4259 		switch (ch->chunk_type) {
4260 		case SCTP_INIT:
4261 			contains_init_chunk = 1;
4262 			break;
4263 		case SCTP_PACKET_DROPPED:
4264 			/* we don't respond to pkt-dropped */
4265 			return;
4266 		case SCTP_ABORT_ASSOCIATION:
4267 			/* we don't respond with an ABORT to an ABORT */
4268 			return;
4269 		case SCTP_SHUTDOWN_COMPLETE:
4270 			/*
4271 			 * we ignore it since we are not waiting for it and
4272 			 * peer is gone
4273 			 */
4274 			return;
4275 		case SCTP_SHUTDOWN_ACK:
4276 			sctp_send_shutdown_complete2(src, dst, sh,
4277 			    mflowtype, mflowid, fibnum,
4278 			    vrf_id, port);
4279 			return;
4280 		default:
4281 			break;
4282 		}
4283 		offset += SCTP_SIZE32(chk_length);
4284 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4285 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4286 	}
4287 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4288 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4289 	    (contains_init_chunk == 0))) {
4290 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4291 		    mflowtype, mflowid, fibnum,
4292 		    vrf_id, port);
4293 	}
4294 }
4295 
4296 /*
4297  * check the inbound datagram to make sure there is not an abort inside it,
4298  * if there is return 1, else return 0.
4299  */
4300 int
4301 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4302 {
4303 	struct sctp_chunkhdr *ch;
4304 	struct sctp_init_chunk *init_chk, chunk_buf;
4305 	int offset;
4306 	unsigned int chk_length;
4307 
4308 	offset = iphlen + sizeof(struct sctphdr);
4309 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4310 	    (uint8_t *)&chunk_buf);
4311 	while (ch != NULL) {
4312 		chk_length = ntohs(ch->chunk_length);
4313 		if (chk_length < sizeof(*ch)) {
4314 			/* packet is probably corrupt */
4315 			break;
4316 		}
4317 		/* we seem to be ok, is it an abort? */
4318 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4319 			/* yep, tell them */
4320 			return (1);
4321 		}
4322 		if (ch->chunk_type == SCTP_INITIATION) {
4323 			/* need to update the Vtag */
4324 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4325 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4326 			if (init_chk != NULL) {
4327 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4328 			}
4329 		}
4330 		/* Nope, move to the next chunk */
4331 		offset += SCTP_SIZE32(chk_length);
4332 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4333 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4334 	}
4335 	return (0);
4336 }
4337 
4338 /*
4339  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4340  * set (i.e. it's 0) so, create this function to compare link local scopes
4341  */
4342 #ifdef INET6
4343 uint32_t
4344 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4345 {
4346 	struct sockaddr_in6 a, b;
4347 
4348 	/* save copies */
4349 	a = *addr1;
4350 	b = *addr2;
4351 
4352 	if (a.sin6_scope_id == 0)
4353 		if (sa6_recoverscope(&a)) {
4354 			/* can't get scope, so can't match */
4355 			return (0);
4356 		}
4357 	if (b.sin6_scope_id == 0)
4358 		if (sa6_recoverscope(&b)) {
4359 			/* can't get scope, so can't match */
4360 			return (0);
4361 		}
4362 	if (a.sin6_scope_id != b.sin6_scope_id)
4363 		return (0);
4364 
4365 	return (1);
4366 }
4367 
4368 /*
4369  * returns a sockaddr_in6 with embedded scope recovered and removed
4370  */
4371 struct sockaddr_in6 *
4372 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4373 {
4374 	/* check and strip embedded scope junk */
4375 	if (addr->sin6_family == AF_INET6) {
4376 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4377 			if (addr->sin6_scope_id == 0) {
4378 				*store = *addr;
4379 				if (!sa6_recoverscope(store)) {
4380 					/* use the recovered scope */
4381 					addr = store;
4382 				}
4383 			} else {
4384 				/* else, return the original "to" addr */
4385 				in6_clearscope(&addr->sin6_addr);
4386 			}
4387 		}
4388 	}
4389 	return (addr);
4390 }
4391 #endif
4392 
4393 /*
4394  * are the two addresses the same?  currently a "scopeless" check returns: 1
4395  * if same, 0 if not
4396  */
4397 int
4398 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4399 {
4400 
4401 	/* must be valid */
4402 	if (sa1 == NULL || sa2 == NULL)
4403 		return (0);
4404 
4405 	/* must be the same family */
4406 	if (sa1->sa_family != sa2->sa_family)
4407 		return (0);
4408 
4409 	switch (sa1->sa_family) {
4410 #ifdef INET6
4411 	case AF_INET6:
4412 		{
4413 			/* IPv6 addresses */
4414 			struct sockaddr_in6 *sin6_1, *sin6_2;
4415 
4416 			sin6_1 = (struct sockaddr_in6 *)sa1;
4417 			sin6_2 = (struct sockaddr_in6 *)sa2;
4418 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4419 			    sin6_2));
4420 		}
4421 #endif
4422 #ifdef INET
4423 	case AF_INET:
4424 		{
4425 			/* IPv4 addresses */
4426 			struct sockaddr_in *sin_1, *sin_2;
4427 
4428 			sin_1 = (struct sockaddr_in *)sa1;
4429 			sin_2 = (struct sockaddr_in *)sa2;
4430 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4431 		}
4432 #endif
4433 	default:
4434 		/* we don't do these... */
4435 		return (0);
4436 	}
4437 }
4438 
4439 void
4440 sctp_print_address(struct sockaddr *sa)
4441 {
4442 #ifdef INET6
4443 	char ip6buf[INET6_ADDRSTRLEN];
4444 #endif
4445 
4446 	switch (sa->sa_family) {
4447 #ifdef INET6
4448 	case AF_INET6:
4449 		{
4450 			struct sockaddr_in6 *sin6;
4451 
4452 			sin6 = (struct sockaddr_in6 *)sa;
4453 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4454 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4455 			    ntohs(sin6->sin6_port),
4456 			    sin6->sin6_scope_id);
4457 			break;
4458 		}
4459 #endif
4460 #ifdef INET
4461 	case AF_INET:
4462 		{
4463 			struct sockaddr_in *sin;
4464 			unsigned char *p;
4465 
4466 			sin = (struct sockaddr_in *)sa;
4467 			p = (unsigned char *)&sin->sin_addr;
4468 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4469 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4470 			break;
4471 		}
4472 #endif
4473 	default:
4474 		SCTP_PRINTF("?\n");
4475 		break;
4476 	}
4477 }
4478 
4479 void
4480 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4481     struct sctp_inpcb *new_inp,
4482     struct sctp_tcb *stcb,
4483     int waitflags)
4484 {
4485 	/*
4486 	 * go through our old INP and pull off any control structures that
4487 	 * belong to stcb and move then to the new inp.
4488 	 */
4489 	struct socket *old_so, *new_so;
4490 	struct sctp_queued_to_read *control, *nctl;
4491 	struct sctp_readhead tmp_queue;
4492 	struct mbuf *m;
4493 	int error = 0;
4494 
4495 	old_so = old_inp->sctp_socket;
4496 	new_so = new_inp->sctp_socket;
4497 	TAILQ_INIT(&tmp_queue);
4498 	error = sblock(&old_so->so_rcv, waitflags);
4499 	if (error) {
4500 		/*
4501 		 * Gak, can't get sblock, we have a problem. data will be
4502 		 * left stranded.. and we don't dare look at it since the
4503 		 * other thread may be reading something. Oh well, its a
4504 		 * screwed up app that does a peeloff OR a accept while
4505 		 * reading from the main socket... actually its only the
4506 		 * peeloff() case, since I think read will fail on a
4507 		 * listening socket..
4508 		 */
4509 		return;
4510 	}
4511 	/* lock the socket buffers */
4512 	SCTP_INP_READ_LOCK(old_inp);
4513 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4514 		/* Pull off all for out target stcb */
4515 		if (control->stcb == stcb) {
4516 			/* remove it we want it */
4517 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4518 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4519 			m = control->data;
4520 			while (m) {
4521 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4522 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4523 				}
4524 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4525 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4526 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4527 				}
4528 				m = SCTP_BUF_NEXT(m);
4529 			}
4530 		}
4531 	}
4532 	SCTP_INP_READ_UNLOCK(old_inp);
4533 	/* Remove the sb-lock on the old socket */
4534 
4535 	sbunlock(&old_so->so_rcv);
4536 	/* Now we move them over to the new socket buffer */
4537 	SCTP_INP_READ_LOCK(new_inp);
4538 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4539 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4540 		m = control->data;
4541 		while (m) {
4542 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4543 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4544 			}
4545 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4546 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4547 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4548 			}
4549 			m = SCTP_BUF_NEXT(m);
4550 		}
4551 	}
4552 	SCTP_INP_READ_UNLOCK(new_inp);
4553 }
4554 
4555 void
4556 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4557     struct sctp_tcb *stcb,
4558     int so_locked
4559 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4560     SCTP_UNUSED
4561 #endif
4562 )
4563 {
4564 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4565 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4566 		struct socket *so;
4567 
4568 		so = SCTP_INP_SO(inp);
4569 		if (!so_locked) {
4570 			if (stcb) {
4571 				atomic_add_int(&stcb->asoc.refcnt, 1);
4572 				SCTP_TCB_UNLOCK(stcb);
4573 			}
4574 			SCTP_SOCKET_LOCK(so, 1);
4575 			if (stcb) {
4576 				SCTP_TCB_LOCK(stcb);
4577 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4578 			}
4579 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4580 				SCTP_SOCKET_UNLOCK(so, 1);
4581 				return;
4582 			}
4583 		}
4584 #endif
4585 		sctp_sorwakeup(inp, inp->sctp_socket);
4586 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4587 		if (!so_locked) {
4588 			SCTP_SOCKET_UNLOCK(so, 1);
4589 		}
4590 #endif
4591 	}
4592 }
4593 
4594 void
4595 sctp_add_to_readq(struct sctp_inpcb *inp,
4596     struct sctp_tcb *stcb,
4597     struct sctp_queued_to_read *control,
4598     struct sockbuf *sb,
4599     int end,
4600     int inp_read_lock_held,
4601     int so_locked
4602 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4603     SCTP_UNUSED
4604 #endif
4605 )
4606 {
4607 	/*
4608 	 * Here we must place the control on the end of the socket read
4609 	 * queue AND increment sb_cc so that select will work properly on
4610 	 * read.
4611 	 */
4612 	struct mbuf *m, *prev = NULL;
4613 
4614 	if (inp == NULL) {
4615 		/* Gak, TSNH!! */
4616 #ifdef INVARIANTS
4617 		panic("Gak, inp NULL on add_to_readq");
4618 #endif
4619 		return;
4620 	}
4621 	if (inp_read_lock_held == 0)
4622 		SCTP_INP_READ_LOCK(inp);
4623 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4624 		if (!control->on_strm_q) {
4625 			sctp_free_remote_addr(control->whoFrom);
4626 			if (control->data) {
4627 				sctp_m_freem(control->data);
4628 				control->data = NULL;
4629 			}
4630 			sctp_free_a_readq(stcb, control);
4631 		}
4632 		if (inp_read_lock_held == 0)
4633 			SCTP_INP_READ_UNLOCK(inp);
4634 		return;
4635 	}
4636 	if (!(control->spec_flags & M_NOTIFICATION)) {
4637 		atomic_add_int(&inp->total_recvs, 1);
4638 		if (!control->do_not_ref_stcb) {
4639 			atomic_add_int(&stcb->total_recvs, 1);
4640 		}
4641 	}
4642 	m = control->data;
4643 	control->held_length = 0;
4644 	control->length = 0;
4645 	while (m) {
4646 		if (SCTP_BUF_LEN(m) == 0) {
4647 			/* Skip mbufs with NO length */
4648 			if (prev == NULL) {
4649 				/* First one */
4650 				control->data = sctp_m_free(m);
4651 				m = control->data;
4652 			} else {
4653 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4654 				m = SCTP_BUF_NEXT(prev);
4655 			}
4656 			if (m == NULL) {
4657 				control->tail_mbuf = prev;
4658 			}
4659 			continue;
4660 		}
4661 		prev = m;
4662 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4663 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4664 		}
4665 		sctp_sballoc(stcb, sb, m);
4666 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4667 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4668 		}
4669 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4670 		m = SCTP_BUF_NEXT(m);
4671 	}
4672 	if (prev != NULL) {
4673 		control->tail_mbuf = prev;
4674 	} else {
4675 		/* Everything got collapsed out?? */
4676 		if (!control->on_strm_q) {
4677 			sctp_free_remote_addr(control->whoFrom);
4678 			sctp_free_a_readq(stcb, control);
4679 		}
4680 		if (inp_read_lock_held == 0)
4681 			SCTP_INP_READ_UNLOCK(inp);
4682 		return;
4683 	}
4684 	if (end) {
4685 		control->end_added = 1;
4686 	}
4687 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4688 	control->on_read_q = 1;
4689 	if (inp_read_lock_held == 0)
4690 		SCTP_INP_READ_UNLOCK(inp);
4691 	if (inp && inp->sctp_socket) {
4692 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4693 	}
4694 }
4695 
4696 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4697  *************ALTERNATE ROUTING CODE
4698  */
4699 
4700 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4701  *************ALTERNATE ROUTING CODE
4702  */
4703 
4704 struct mbuf *
4705 sctp_generate_cause(uint16_t code, char *info)
4706 {
4707 	struct mbuf *m;
4708 	struct sctp_gen_error_cause *cause;
4709 	size_t info_len;
4710 	uint16_t len;
4711 
4712 	if ((code == 0) || (info == NULL)) {
4713 		return (NULL);
4714 	}
4715 	info_len = strlen(info);
4716 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4717 		return (NULL);
4718 	}
4719 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4720 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4721 	if (m != NULL) {
4722 		SCTP_BUF_LEN(m) = len;
4723 		cause = mtod(m, struct sctp_gen_error_cause *);
4724 		cause->code = htons(code);
4725 		cause->length = htons(len);
4726 		memcpy(cause->info, info, info_len);
4727 	}
4728 	return (m);
4729 }
4730 
4731 struct mbuf *
4732 sctp_generate_no_user_data_cause(uint32_t tsn)
4733 {
4734 	struct mbuf *m;
4735 	struct sctp_error_no_user_data *no_user_data_cause;
4736 	uint16_t len;
4737 
4738 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4739 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4740 	if (m != NULL) {
4741 		SCTP_BUF_LEN(m) = len;
4742 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4743 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4744 		no_user_data_cause->cause.length = htons(len);
4745 		no_user_data_cause->tsn = htonl(tsn);
4746 	}
4747 	return (m);
4748 }
4749 
4750 #ifdef SCTP_MBCNT_LOGGING
4751 void
4752 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4753     struct sctp_tmit_chunk *tp1, int chk_cnt)
4754 {
4755 	if (tp1->data == NULL) {
4756 		return;
4757 	}
4758 	asoc->chunks_on_out_queue -= chk_cnt;
4759 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4760 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4761 		    asoc->total_output_queue_size,
4762 		    tp1->book_size,
4763 		    0,
4764 		    tp1->mbcnt);
4765 	}
4766 	if (asoc->total_output_queue_size >= tp1->book_size) {
4767 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4768 	} else {
4769 		asoc->total_output_queue_size = 0;
4770 	}
4771 
4772 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4773 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4774 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4775 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4776 		} else {
4777 			stcb->sctp_socket->so_snd.sb_cc = 0;
4778 
4779 		}
4780 	}
4781 }
4782 
4783 #endif
4784 
4785 int
4786 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4787     uint8_t sent, int so_locked
4788 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4789     SCTP_UNUSED
4790 #endif
4791 )
4792 {
4793 	struct sctp_stream_out *strq;
4794 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4795 	struct sctp_stream_queue_pending *sp;
4796 	uint32_t mid;
4797 	uint16_t sid;
4798 	uint8_t foundeom = 0;
4799 	int ret_sz = 0;
4800 	int notdone;
4801 	int do_wakeup_routine = 0;
4802 
4803 	sid = tp1->rec.data.sid;
4804 	mid = tp1->rec.data.mid;
4805 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4806 		stcb->asoc.abandoned_sent[0]++;
4807 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4808 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
4809 #if defined(SCTP_DETAILED_STR_STATS)
4810 		stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4811 #endif
4812 	} else {
4813 		stcb->asoc.abandoned_unsent[0]++;
4814 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4815 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4816 #if defined(SCTP_DETAILED_STR_STATS)
4817 		stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4818 #endif
4819 	}
4820 	do {
4821 		ret_sz += tp1->book_size;
4822 		if (tp1->data != NULL) {
4823 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4824 				sctp_flight_size_decrease(tp1);
4825 				sctp_total_flight_decrease(stcb, tp1);
4826 			}
4827 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4828 			stcb->asoc.peers_rwnd += tp1->send_size;
4829 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4830 			if (sent) {
4831 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4832 			} else {
4833 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4834 			}
4835 			if (tp1->data) {
4836 				sctp_m_freem(tp1->data);
4837 				tp1->data = NULL;
4838 			}
4839 			do_wakeup_routine = 1;
4840 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4841 				stcb->asoc.sent_queue_cnt_removeable--;
4842 			}
4843 		}
4844 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4845 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4846 		    SCTP_DATA_NOT_FRAG) {
4847 			/* not frag'ed we ae done   */
4848 			notdone = 0;
4849 			foundeom = 1;
4850 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4851 			/* end of frag, we are done */
4852 			notdone = 0;
4853 			foundeom = 1;
4854 		} else {
4855 			/*
4856 			 * Its a begin or middle piece, we must mark all of
4857 			 * it
4858 			 */
4859 			notdone = 1;
4860 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4861 		}
4862 	} while (tp1 && notdone);
4863 	if (foundeom == 0) {
4864 		/*
4865 		 * The multi-part message was scattered across the send and
4866 		 * sent queue.
4867 		 */
4868 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4869 			if ((tp1->rec.data.sid != sid) ||
4870 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4871 				break;
4872 			}
4873 			/*
4874 			 * save to chk in case we have some on stream out
4875 			 * queue. If so and we have an un-transmitted one we
4876 			 * don't have to fudge the TSN.
4877 			 */
4878 			chk = tp1;
4879 			ret_sz += tp1->book_size;
4880 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4881 			if (sent) {
4882 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4883 			} else {
4884 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4885 			}
4886 			if (tp1->data) {
4887 				sctp_m_freem(tp1->data);
4888 				tp1->data = NULL;
4889 			}
4890 			/* No flight involved here book the size to 0 */
4891 			tp1->book_size = 0;
4892 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4893 				foundeom = 1;
4894 			}
4895 			do_wakeup_routine = 1;
4896 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4897 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4898 			/*
4899 			 * on to the sent queue so we can wait for it to be
4900 			 * passed by.
4901 			 */
4902 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4903 			    sctp_next);
4904 			stcb->asoc.send_queue_cnt--;
4905 			stcb->asoc.sent_queue_cnt++;
4906 		}
4907 	}
4908 	if (foundeom == 0) {
4909 		/*
4910 		 * Still no eom found. That means there is stuff left on the
4911 		 * stream out queue.. yuck.
4912 		 */
4913 		SCTP_TCB_SEND_LOCK(stcb);
4914 		strq = &stcb->asoc.strmout[sid];
4915 		sp = TAILQ_FIRST(&strq->outqueue);
4916 		if (sp != NULL) {
4917 			sp->discard_rest = 1;
4918 			/*
4919 			 * We may need to put a chunk on the queue that
4920 			 * holds the TSN that would have been sent with the
4921 			 * LAST bit.
4922 			 */
4923 			if (chk == NULL) {
4924 				/* Yep, we have to */
4925 				sctp_alloc_a_chunk(stcb, chk);
4926 				if (chk == NULL) {
4927 					/*
4928 					 * we are hosed. All we can do is
4929 					 * nothing.. which will cause an
4930 					 * abort if the peer is paying
4931 					 * attention.
4932 					 */
4933 					goto oh_well;
4934 				}
4935 				memset(chk, 0, sizeof(*chk));
4936 				chk->rec.data.rcv_flags = 0;
4937 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4938 				chk->asoc = &stcb->asoc;
4939 				if (stcb->asoc.idata_supported == 0) {
4940 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4941 						chk->rec.data.mid = 0;
4942 					} else {
4943 						chk->rec.data.mid = strq->next_mid_ordered;
4944 					}
4945 				} else {
4946 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4947 						chk->rec.data.mid = strq->next_mid_unordered;
4948 					} else {
4949 						chk->rec.data.mid = strq->next_mid_ordered;
4950 					}
4951 				}
4952 				chk->rec.data.sid = sp->sid;
4953 				chk->rec.data.ppid = sp->ppid;
4954 				chk->rec.data.context = sp->context;
4955 				chk->flags = sp->act_flags;
4956 				chk->whoTo = NULL;
4957 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4958 				strq->chunks_on_queues++;
4959 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4960 				stcb->asoc.sent_queue_cnt++;
4961 				stcb->asoc.pr_sctp_cnt++;
4962 			}
4963 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4964 			if (sp->sinfo_flags & SCTP_UNORDERED) {
4965 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4966 			}
4967 			if (stcb->asoc.idata_supported == 0) {
4968 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4969 					strq->next_mid_ordered++;
4970 				}
4971 			} else {
4972 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4973 					strq->next_mid_unordered++;
4974 				} else {
4975 					strq->next_mid_ordered++;
4976 				}
4977 			}
4978 	oh_well:
4979 			if (sp->data) {
4980 				/*
4981 				 * Pull any data to free up the SB and allow
4982 				 * sender to "add more" while we will throw
4983 				 * away :-)
4984 				 */
4985 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4986 				ret_sz += sp->length;
4987 				do_wakeup_routine = 1;
4988 				sp->some_taken = 1;
4989 				sctp_m_freem(sp->data);
4990 				sp->data = NULL;
4991 				sp->tail_mbuf = NULL;
4992 				sp->length = 0;
4993 			}
4994 		}
4995 		SCTP_TCB_SEND_UNLOCK(stcb);
4996 	}
4997 	if (do_wakeup_routine) {
4998 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4999 		struct socket *so;
5000 
5001 		so = SCTP_INP_SO(stcb->sctp_ep);
5002 		if (!so_locked) {
5003 			atomic_add_int(&stcb->asoc.refcnt, 1);
5004 			SCTP_TCB_UNLOCK(stcb);
5005 			SCTP_SOCKET_LOCK(so, 1);
5006 			SCTP_TCB_LOCK(stcb);
5007 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
5008 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5009 				/* assoc was freed while we were unlocked */
5010 				SCTP_SOCKET_UNLOCK(so, 1);
5011 				return (ret_sz);
5012 			}
5013 		}
5014 #endif
5015 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5016 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5017 		if (!so_locked) {
5018 			SCTP_SOCKET_UNLOCK(so, 1);
5019 		}
5020 #endif
5021 	}
5022 	return (ret_sz);
5023 }
5024 
5025 /*
5026  * checks to see if the given address, sa, is one that is currently known by
5027  * the kernel note: can't distinguish the same address on multiple interfaces
5028  * and doesn't handle multiple addresses with different zone/scope id's note:
5029  * ifa_ifwithaddr() compares the entire sockaddr struct
5030  */
5031 struct sctp_ifa *
5032 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5033     int holds_lock)
5034 {
5035 	struct sctp_laddr *laddr;
5036 
5037 	if (holds_lock == 0) {
5038 		SCTP_INP_RLOCK(inp);
5039 	}
5040 
5041 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5042 		if (laddr->ifa == NULL)
5043 			continue;
5044 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5045 			continue;
5046 #ifdef INET
5047 		if (addr->sa_family == AF_INET) {
5048 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5049 			    laddr->ifa->address.sin.sin_addr.s_addr) {
5050 				/* found him. */
5051 				if (holds_lock == 0) {
5052 					SCTP_INP_RUNLOCK(inp);
5053 				}
5054 				return (laddr->ifa);
5055 				break;
5056 			}
5057 		}
5058 #endif
5059 #ifdef INET6
5060 		if (addr->sa_family == AF_INET6) {
5061 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5062 			    &laddr->ifa->address.sin6)) {
5063 				/* found him. */
5064 				if (holds_lock == 0) {
5065 					SCTP_INP_RUNLOCK(inp);
5066 				}
5067 				return (laddr->ifa);
5068 				break;
5069 			}
5070 		}
5071 #endif
5072 	}
5073 	if (holds_lock == 0) {
5074 		SCTP_INP_RUNLOCK(inp);
5075 	}
5076 	return (NULL);
5077 }
5078 
5079 uint32_t
5080 sctp_get_ifa_hash_val(struct sockaddr *addr)
5081 {
5082 	switch (addr->sa_family) {
5083 #ifdef INET
5084 	case AF_INET:
5085 		{
5086 			struct sockaddr_in *sin;
5087 
5088 			sin = (struct sockaddr_in *)addr;
5089 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5090 		}
5091 #endif
5092 #ifdef INET6
5093 	case AF_INET6:
5094 		{
5095 			struct sockaddr_in6 *sin6;
5096 			uint32_t hash_of_addr;
5097 
5098 			sin6 = (struct sockaddr_in6 *)addr;
5099 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5100 			    sin6->sin6_addr.s6_addr32[1] +
5101 			    sin6->sin6_addr.s6_addr32[2] +
5102 			    sin6->sin6_addr.s6_addr32[3]);
5103 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5104 			return (hash_of_addr);
5105 		}
5106 #endif
5107 	default:
5108 		break;
5109 	}
5110 	return (0);
5111 }
5112 
5113 struct sctp_ifa *
5114 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5115 {
5116 	struct sctp_ifa *sctp_ifap;
5117 	struct sctp_vrf *vrf;
5118 	struct sctp_ifalist *hash_head;
5119 	uint32_t hash_of_addr;
5120 
5121 	if (holds_lock == 0)
5122 		SCTP_IPI_ADDR_RLOCK();
5123 
5124 	vrf = sctp_find_vrf(vrf_id);
5125 	if (vrf == NULL) {
5126 		if (holds_lock == 0)
5127 			SCTP_IPI_ADDR_RUNLOCK();
5128 		return (NULL);
5129 	}
5130 
5131 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5132 
5133 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5134 	if (hash_head == NULL) {
5135 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5136 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5137 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5138 		sctp_print_address(addr);
5139 		SCTP_PRINTF("No such bucket for address\n");
5140 		if (holds_lock == 0)
5141 			SCTP_IPI_ADDR_RUNLOCK();
5142 
5143 		return (NULL);
5144 	}
5145 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5146 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5147 			continue;
5148 #ifdef INET
5149 		if (addr->sa_family == AF_INET) {
5150 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5151 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5152 				/* found him. */
5153 				if (holds_lock == 0)
5154 					SCTP_IPI_ADDR_RUNLOCK();
5155 				return (sctp_ifap);
5156 				break;
5157 			}
5158 		}
5159 #endif
5160 #ifdef INET6
5161 		if (addr->sa_family == AF_INET6) {
5162 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5163 			    &sctp_ifap->address.sin6)) {
5164 				/* found him. */
5165 				if (holds_lock == 0)
5166 					SCTP_IPI_ADDR_RUNLOCK();
5167 				return (sctp_ifap);
5168 				break;
5169 			}
5170 		}
5171 #endif
5172 	}
5173 	if (holds_lock == 0)
5174 		SCTP_IPI_ADDR_RUNLOCK();
5175 	return (NULL);
5176 }
5177 
5178 static void
5179 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5180     uint32_t rwnd_req)
5181 {
5182 	/* User pulled some data, do we need a rwnd update? */
5183 	int r_unlocked = 0;
5184 	uint32_t dif, rwnd;
5185 	struct socket *so = NULL;
5186 
5187 	if (stcb == NULL)
5188 		return;
5189 
5190 	atomic_add_int(&stcb->asoc.refcnt, 1);
5191 
5192 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
5193 	    (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) {
5194 		/* Pre-check If we are freeing no update */
5195 		goto no_lock;
5196 	}
5197 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5198 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5199 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5200 		goto out;
5201 	}
5202 	so = stcb->sctp_socket;
5203 	if (so == NULL) {
5204 		goto out;
5205 	}
5206 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5207 	/* Have you have freed enough to look */
5208 	*freed_so_far = 0;
5209 	/* Yep, its worth a look and the lock overhead */
5210 
5211 	/* Figure out what the rwnd would be */
5212 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5213 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5214 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5215 	} else {
5216 		dif = 0;
5217 	}
5218 	if (dif >= rwnd_req) {
5219 		if (hold_rlock) {
5220 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5221 			r_unlocked = 1;
5222 		}
5223 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5224 			/*
5225 			 * One last check before we allow the guy possibly
5226 			 * to get in. There is a race, where the guy has not
5227 			 * reached the gate. In that case
5228 			 */
5229 			goto out;
5230 		}
5231 		SCTP_TCB_LOCK(stcb);
5232 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5233 			/* No reports here */
5234 			SCTP_TCB_UNLOCK(stcb);
5235 			goto out;
5236 		}
5237 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5238 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5239 
5240 		sctp_chunk_output(stcb->sctp_ep, stcb,
5241 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5242 		/* make sure no timer is running */
5243 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5244 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5245 		SCTP_TCB_UNLOCK(stcb);
5246 	} else {
5247 		/* Update how much we have pending */
5248 		stcb->freed_by_sorcv_sincelast = dif;
5249 	}
5250 out:
5251 	if (so && r_unlocked && hold_rlock) {
5252 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5253 	}
5254 
5255 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5256 no_lock:
5257 	atomic_add_int(&stcb->asoc.refcnt, -1);
5258 	return;
5259 }
5260 
5261 int
5262 sctp_sorecvmsg(struct socket *so,
5263     struct uio *uio,
5264     struct mbuf **mp,
5265     struct sockaddr *from,
5266     int fromlen,
5267     int *msg_flags,
5268     struct sctp_sndrcvinfo *sinfo,
5269     int filling_sinfo)
5270 {
5271 	/*
5272 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5273 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5274 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5275 	 * On the way out we may send out any combination of:
5276 	 * MSG_NOTIFICATION MSG_EOR
5277 	 *
5278 	 */
5279 	struct sctp_inpcb *inp = NULL;
5280 	ssize_t my_len = 0;
5281 	ssize_t cp_len = 0;
5282 	int error = 0;
5283 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5284 	struct mbuf *m = NULL;
5285 	struct sctp_tcb *stcb = NULL;
5286 	int wakeup_read_socket = 0;
5287 	int freecnt_applied = 0;
5288 	int out_flags = 0, in_flags = 0;
5289 	int block_allowed = 1;
5290 	uint32_t freed_so_far = 0;
5291 	ssize_t copied_so_far = 0;
5292 	int in_eeor_mode = 0;
5293 	int no_rcv_needed = 0;
5294 	uint32_t rwnd_req = 0;
5295 	int hold_sblock = 0;
5296 	int hold_rlock = 0;
5297 	ssize_t slen = 0;
5298 	uint32_t held_length = 0;
5299 	int sockbuf_lock = 0;
5300 
5301 	if (uio == NULL) {
5302 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5303 		return (EINVAL);
5304 	}
5305 
5306 	if (msg_flags) {
5307 		in_flags = *msg_flags;
5308 		if (in_flags & MSG_PEEK)
5309 			SCTP_STAT_INCR(sctps_read_peeks);
5310 	} else {
5311 		in_flags = 0;
5312 	}
5313 	slen = uio->uio_resid;
5314 
5315 	/* Pull in and set up our int flags */
5316 	if (in_flags & MSG_OOB) {
5317 		/* Out of band's NOT supported */
5318 		return (EOPNOTSUPP);
5319 	}
5320 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5321 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5322 		return (EINVAL);
5323 	}
5324 	if ((in_flags & (MSG_DONTWAIT
5325 	    | MSG_NBIO
5326 	    )) ||
5327 	    SCTP_SO_IS_NBIO(so)) {
5328 		block_allowed = 0;
5329 	}
5330 	/* setup the endpoint */
5331 	inp = (struct sctp_inpcb *)so->so_pcb;
5332 	if (inp == NULL) {
5333 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5334 		return (EFAULT);
5335 	}
5336 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5337 	/* Must be at least a MTU's worth */
5338 	if (rwnd_req < SCTP_MIN_RWND)
5339 		rwnd_req = SCTP_MIN_RWND;
5340 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5341 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5342 		sctp_misc_ints(SCTP_SORECV_ENTER,
5343 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5344 	}
5345 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5346 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5347 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5348 	}
5349 
5350 
5351 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5352 	if (error) {
5353 		goto release_unlocked;
5354 	}
5355 	sockbuf_lock = 1;
5356 restart:
5357 
5358 
5359 restart_nosblocks:
5360 	if (hold_sblock == 0) {
5361 		SOCKBUF_LOCK(&so->so_rcv);
5362 		hold_sblock = 1;
5363 	}
5364 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5365 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5366 		goto out;
5367 	}
5368 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5369 		if (so->so_error) {
5370 			error = so->so_error;
5371 			if ((in_flags & MSG_PEEK) == 0)
5372 				so->so_error = 0;
5373 			goto out;
5374 		} else {
5375 			if (so->so_rcv.sb_cc == 0) {
5376 				/* indicate EOF */
5377 				error = 0;
5378 				goto out;
5379 			}
5380 		}
5381 	}
5382 	if (so->so_rcv.sb_cc <= held_length) {
5383 		if (so->so_error) {
5384 			error = so->so_error;
5385 			if ((in_flags & MSG_PEEK) == 0) {
5386 				so->so_error = 0;
5387 			}
5388 			goto out;
5389 		}
5390 		if ((so->so_rcv.sb_cc == 0) &&
5391 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5392 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5393 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5394 				/*
5395 				 * For active open side clear flags for
5396 				 * re-use passive open is blocked by
5397 				 * connect.
5398 				 */
5399 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5400 					/*
5401 					 * You were aborted, passive side
5402 					 * always hits here
5403 					 */
5404 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5405 					error = ECONNRESET;
5406 				}
5407 				so->so_state &= ~(SS_ISCONNECTING |
5408 				    SS_ISDISCONNECTING |
5409 				    SS_ISCONFIRMING |
5410 				    SS_ISCONNECTED);
5411 				if (error == 0) {
5412 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5413 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5414 						error = ENOTCONN;
5415 					}
5416 				}
5417 				goto out;
5418 			}
5419 		}
5420 		if (block_allowed) {
5421 			error = sbwait(&so->so_rcv);
5422 			if (error) {
5423 				goto out;
5424 			}
5425 			held_length = 0;
5426 			goto restart_nosblocks;
5427 		} else {
5428 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5429 			error = EWOULDBLOCK;
5430 			goto out;
5431 		}
5432 	}
5433 	if (hold_sblock == 1) {
5434 		SOCKBUF_UNLOCK(&so->so_rcv);
5435 		hold_sblock = 0;
5436 	}
5437 	/* we possibly have data we can read */
5438 	/* sa_ignore FREED_MEMORY */
5439 	control = TAILQ_FIRST(&inp->read_queue);
5440 	if (control == NULL) {
5441 		/*
5442 		 * This could be happening since the appender did the
5443 		 * increment but as not yet did the tailq insert onto the
5444 		 * read_queue
5445 		 */
5446 		if (hold_rlock == 0) {
5447 			SCTP_INP_READ_LOCK(inp);
5448 		}
5449 		control = TAILQ_FIRST(&inp->read_queue);
5450 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5451 #ifdef INVARIANTS
5452 			panic("Huh, its non zero and nothing on control?");
5453 #endif
5454 			so->so_rcv.sb_cc = 0;
5455 		}
5456 		SCTP_INP_READ_UNLOCK(inp);
5457 		hold_rlock = 0;
5458 		goto restart;
5459 	}
5460 
5461 	if ((control->length == 0) &&
5462 	    (control->do_not_ref_stcb)) {
5463 		/*
5464 		 * Clean up code for freeing assoc that left behind a
5465 		 * pdapi.. maybe a peer in EEOR that just closed after
5466 		 * sending and never indicated a EOR.
5467 		 */
5468 		if (hold_rlock == 0) {
5469 			hold_rlock = 1;
5470 			SCTP_INP_READ_LOCK(inp);
5471 		}
5472 		control->held_length = 0;
5473 		if (control->data) {
5474 			/* Hmm there is data here .. fix */
5475 			struct mbuf *m_tmp;
5476 			int cnt = 0;
5477 
5478 			m_tmp = control->data;
5479 			while (m_tmp) {
5480 				cnt += SCTP_BUF_LEN(m_tmp);
5481 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5482 					control->tail_mbuf = m_tmp;
5483 					control->end_added = 1;
5484 				}
5485 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5486 			}
5487 			control->length = cnt;
5488 		} else {
5489 			/* remove it */
5490 			TAILQ_REMOVE(&inp->read_queue, control, next);
5491 			/* Add back any hiddend data */
5492 			sctp_free_remote_addr(control->whoFrom);
5493 			sctp_free_a_readq(stcb, control);
5494 		}
5495 		if (hold_rlock) {
5496 			hold_rlock = 0;
5497 			SCTP_INP_READ_UNLOCK(inp);
5498 		}
5499 		goto restart;
5500 	}
5501 	if ((control->length == 0) &&
5502 	    (control->end_added == 1)) {
5503 		/*
5504 		 * Do we also need to check for (control->pdapi_aborted ==
5505 		 * 1)?
5506 		 */
5507 		if (hold_rlock == 0) {
5508 			hold_rlock = 1;
5509 			SCTP_INP_READ_LOCK(inp);
5510 		}
5511 		TAILQ_REMOVE(&inp->read_queue, control, next);
5512 		if (control->data) {
5513 #ifdef INVARIANTS
5514 			panic("control->data not null but control->length == 0");
5515 #else
5516 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5517 			sctp_m_freem(control->data);
5518 			control->data = NULL;
5519 #endif
5520 		}
5521 		if (control->aux_data) {
5522 			sctp_m_free(control->aux_data);
5523 			control->aux_data = NULL;
5524 		}
5525 #ifdef INVARIANTS
5526 		if (control->on_strm_q) {
5527 			panic("About to free ctl:%p so:%p and its in %d",
5528 			    control, so, control->on_strm_q);
5529 		}
5530 #endif
5531 		sctp_free_remote_addr(control->whoFrom);
5532 		sctp_free_a_readq(stcb, control);
5533 		if (hold_rlock) {
5534 			hold_rlock = 0;
5535 			SCTP_INP_READ_UNLOCK(inp);
5536 		}
5537 		goto restart;
5538 	}
5539 	if (control->length == 0) {
5540 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5541 		    (filling_sinfo)) {
5542 			/* find a more suitable one then this */
5543 			ctl = TAILQ_NEXT(control, next);
5544 			while (ctl) {
5545 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5546 				    (ctl->some_taken ||
5547 				    (ctl->spec_flags & M_NOTIFICATION) ||
5548 				    ((ctl->do_not_ref_stcb == 0) &&
5549 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5550 				    ) {
5551 					/*-
5552 					 * If we have a different TCB next, and there is data
5553 					 * present. If we have already taken some (pdapi), OR we can
5554 					 * ref the tcb and no delivery as started on this stream, we
5555 					 * take it. Note we allow a notification on a different
5556 					 * assoc to be delivered..
5557 					 */
5558 					control = ctl;
5559 					goto found_one;
5560 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5561 					    (ctl->length) &&
5562 					    ((ctl->some_taken) ||
5563 					    ((ctl->do_not_ref_stcb == 0) &&
5564 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5565 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5566 					/*-
5567 					 * If we have the same tcb, and there is data present, and we
5568 					 * have the strm interleave feature present. Then if we have
5569 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5570 					 * not started a delivery for this stream, we can take it.
5571 					 * Note we do NOT allow a notificaiton on the same assoc to
5572 					 * be delivered.
5573 					 */
5574 					control = ctl;
5575 					goto found_one;
5576 				}
5577 				ctl = TAILQ_NEXT(ctl, next);
5578 			}
5579 		}
5580 		/*
5581 		 * if we reach here, not suitable replacement is available
5582 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5583 		 * into the our held count, and its time to sleep again.
5584 		 */
5585 		held_length = so->so_rcv.sb_cc;
5586 		control->held_length = so->so_rcv.sb_cc;
5587 		goto restart;
5588 	}
5589 	/* Clear the held length since there is something to read */
5590 	control->held_length = 0;
5591 found_one:
5592 	/*
5593 	 * If we reach here, control has a some data for us to read off.
5594 	 * Note that stcb COULD be NULL.
5595 	 */
5596 	if (hold_rlock == 0) {
5597 		hold_rlock = 1;
5598 		SCTP_INP_READ_LOCK(inp);
5599 	}
5600 	control->some_taken++;
5601 	stcb = control->stcb;
5602 	if (stcb) {
5603 		if ((control->do_not_ref_stcb == 0) &&
5604 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5605 			if (freecnt_applied == 0)
5606 				stcb = NULL;
5607 		} else if (control->do_not_ref_stcb == 0) {
5608 			/* you can't free it on me please */
5609 			/*
5610 			 * The lock on the socket buffer protects us so the
5611 			 * free code will stop. But since we used the
5612 			 * socketbuf lock and the sender uses the tcb_lock
5613 			 * to increment, we need to use the atomic add to
5614 			 * the refcnt
5615 			 */
5616 			if (freecnt_applied) {
5617 #ifdef INVARIANTS
5618 				panic("refcnt already incremented");
5619 #else
5620 				SCTP_PRINTF("refcnt already incremented?\n");
5621 #endif
5622 			} else {
5623 				atomic_add_int(&stcb->asoc.refcnt, 1);
5624 				freecnt_applied = 1;
5625 			}
5626 			/*
5627 			 * Setup to remember how much we have not yet told
5628 			 * the peer our rwnd has opened up. Note we grab the
5629 			 * value from the tcb from last time. Note too that
5630 			 * sack sending clears this when a sack is sent,
5631 			 * which is fine. Once we hit the rwnd_req, we then
5632 			 * will go to the sctp_user_rcvd() that will not
5633 			 * lock until it KNOWs it MUST send a WUP-SACK.
5634 			 */
5635 			freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast;
5636 			stcb->freed_by_sorcv_sincelast = 0;
5637 		}
5638 	}
5639 	if (stcb &&
5640 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5641 	    control->do_not_ref_stcb == 0) {
5642 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5643 	}
5644 
5645 	/* First lets get off the sinfo and sockaddr info */
5646 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5647 		sinfo->sinfo_stream = control->sinfo_stream;
5648 		sinfo->sinfo_ssn = (uint16_t)control->mid;
5649 		sinfo->sinfo_flags = control->sinfo_flags;
5650 		sinfo->sinfo_ppid = control->sinfo_ppid;
5651 		sinfo->sinfo_context = control->sinfo_context;
5652 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5653 		sinfo->sinfo_tsn = control->sinfo_tsn;
5654 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5655 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5656 		nxt = TAILQ_NEXT(control, next);
5657 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5658 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5659 			struct sctp_extrcvinfo *s_extra;
5660 
5661 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5662 			if ((nxt) &&
5663 			    (nxt->length)) {
5664 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5665 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5666 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5667 				}
5668 				if (nxt->spec_flags & M_NOTIFICATION) {
5669 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5670 				}
5671 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5672 				s_extra->serinfo_next_length = nxt->length;
5673 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5674 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5675 				if (nxt->tail_mbuf != NULL) {
5676 					if (nxt->end_added) {
5677 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5678 					}
5679 				}
5680 			} else {
5681 				/*
5682 				 * we explicitly 0 this, since the memcpy
5683 				 * got some other things beyond the older
5684 				 * sinfo_ that is on the control's structure
5685 				 * :-D
5686 				 */
5687 				nxt = NULL;
5688 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5689 				s_extra->serinfo_next_aid = 0;
5690 				s_extra->serinfo_next_length = 0;
5691 				s_extra->serinfo_next_ppid = 0;
5692 				s_extra->serinfo_next_stream = 0;
5693 			}
5694 		}
5695 		/*
5696 		 * update off the real current cum-ack, if we have an stcb.
5697 		 */
5698 		if ((control->do_not_ref_stcb == 0) && stcb)
5699 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5700 		/*
5701 		 * mask off the high bits, we keep the actual chunk bits in
5702 		 * there.
5703 		 */
5704 		sinfo->sinfo_flags &= 0x00ff;
5705 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5706 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5707 		}
5708 	}
5709 #ifdef SCTP_ASOCLOG_OF_TSNS
5710 	{
5711 		int index, newindex;
5712 		struct sctp_pcbtsn_rlog *entry;
5713 
5714 		do {
5715 			index = inp->readlog_index;
5716 			newindex = index + 1;
5717 			if (newindex >= SCTP_READ_LOG_SIZE) {
5718 				newindex = 0;
5719 			}
5720 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5721 		entry = &inp->readlog[index];
5722 		entry->vtag = control->sinfo_assoc_id;
5723 		entry->strm = control->sinfo_stream;
5724 		entry->seq = (uint16_t)control->mid;
5725 		entry->sz = control->length;
5726 		entry->flgs = control->sinfo_flags;
5727 	}
5728 #endif
5729 	if ((fromlen > 0) && (from != NULL)) {
5730 		union sctp_sockstore store;
5731 		size_t len;
5732 
5733 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5734 #ifdef INET6
5735 		case AF_INET6:
5736 			len = sizeof(struct sockaddr_in6);
5737 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5738 			store.sin6.sin6_port = control->port_from;
5739 			break;
5740 #endif
5741 #ifdef INET
5742 		case AF_INET:
5743 #ifdef INET6
5744 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5745 				len = sizeof(struct sockaddr_in6);
5746 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5747 				    &store.sin6);
5748 				store.sin6.sin6_port = control->port_from;
5749 			} else {
5750 				len = sizeof(struct sockaddr_in);
5751 				store.sin = control->whoFrom->ro._l_addr.sin;
5752 				store.sin.sin_port = control->port_from;
5753 			}
5754 #else
5755 			len = sizeof(struct sockaddr_in);
5756 			store.sin = control->whoFrom->ro._l_addr.sin;
5757 			store.sin.sin_port = control->port_from;
5758 #endif
5759 			break;
5760 #endif
5761 		default:
5762 			len = 0;
5763 			break;
5764 		}
5765 		memcpy(from, &store, min((size_t)fromlen, len));
5766 #ifdef INET6
5767 		{
5768 			struct sockaddr_in6 lsa6, *from6;
5769 
5770 			from6 = (struct sockaddr_in6 *)from;
5771 			sctp_recover_scope_mac(from6, (&lsa6));
5772 		}
5773 #endif
5774 	}
5775 	if (hold_rlock) {
5776 		SCTP_INP_READ_UNLOCK(inp);
5777 		hold_rlock = 0;
5778 	}
5779 	if (hold_sblock) {
5780 		SOCKBUF_UNLOCK(&so->so_rcv);
5781 		hold_sblock = 0;
5782 	}
5783 	/* now copy out what data we can */
5784 	if (mp == NULL) {
5785 		/* copy out each mbuf in the chain up to length */
5786 get_more_data:
5787 		m = control->data;
5788 		while (m) {
5789 			/* Move out all we can */
5790 			cp_len = uio->uio_resid;
5791 			my_len = SCTP_BUF_LEN(m);
5792 			if (cp_len > my_len) {
5793 				/* not enough in this buf */
5794 				cp_len = my_len;
5795 			}
5796 			if (hold_rlock) {
5797 				SCTP_INP_READ_UNLOCK(inp);
5798 				hold_rlock = 0;
5799 			}
5800 			if (cp_len > 0)
5801 				error = uiomove(mtod(m, char *), (int)cp_len, uio);
5802 			/* re-read */
5803 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5804 				goto release;
5805 			}
5806 
5807 			if ((control->do_not_ref_stcb == 0) && stcb &&
5808 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5809 				no_rcv_needed = 1;
5810 			}
5811 			if (error) {
5812 				/* error we are out of here */
5813 				goto release;
5814 			}
5815 			SCTP_INP_READ_LOCK(inp);
5816 			hold_rlock = 1;
5817 			if (cp_len == SCTP_BUF_LEN(m)) {
5818 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5819 				    (control->end_added)) {
5820 					out_flags |= MSG_EOR;
5821 					if ((control->do_not_ref_stcb == 0) &&
5822 					    (control->stcb != NULL) &&
5823 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5824 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5825 				}
5826 				if (control->spec_flags & M_NOTIFICATION) {
5827 					out_flags |= MSG_NOTIFICATION;
5828 				}
5829 				/* we ate up the mbuf */
5830 				if (in_flags & MSG_PEEK) {
5831 					/* just looking */
5832 					m = SCTP_BUF_NEXT(m);
5833 					copied_so_far += cp_len;
5834 				} else {
5835 					/* dispose of the mbuf */
5836 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5837 						sctp_sblog(&so->so_rcv,
5838 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5839 					}
5840 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5841 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5842 						sctp_sblog(&so->so_rcv,
5843 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5844 					}
5845 					copied_so_far += cp_len;
5846 					freed_so_far += (uint32_t)cp_len;
5847 					freed_so_far += MSIZE;
5848 					atomic_subtract_int(&control->length, cp_len);
5849 					control->data = sctp_m_free(m);
5850 					m = control->data;
5851 					/*
5852 					 * been through it all, must hold sb
5853 					 * lock ok to null tail
5854 					 */
5855 					if (control->data == NULL) {
5856 #ifdef INVARIANTS
5857 						if ((control->end_added == 0) ||
5858 						    (TAILQ_NEXT(control, next) == NULL)) {
5859 							/*
5860 							 * If the end is not
5861 							 * added, OR the
5862 							 * next is NOT null
5863 							 * we MUST have the
5864 							 * lock.
5865 							 */
5866 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5867 								panic("Hmm we don't own the lock?");
5868 							}
5869 						}
5870 #endif
5871 						control->tail_mbuf = NULL;
5872 #ifdef INVARIANTS
5873 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5874 							panic("end_added, nothing left and no MSG_EOR");
5875 						}
5876 #endif
5877 					}
5878 				}
5879 			} else {
5880 				/* Do we need to trim the mbuf? */
5881 				if (control->spec_flags & M_NOTIFICATION) {
5882 					out_flags |= MSG_NOTIFICATION;
5883 				}
5884 				if ((in_flags & MSG_PEEK) == 0) {
5885 					SCTP_BUF_RESV_UF(m, cp_len);
5886 					SCTP_BUF_LEN(m) -= (int)cp_len;
5887 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5888 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len);
5889 					}
5890 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5891 					if ((control->do_not_ref_stcb == 0) &&
5892 					    stcb) {
5893 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5894 					}
5895 					copied_so_far += cp_len;
5896 					freed_so_far += (uint32_t)cp_len;
5897 					freed_so_far += MSIZE;
5898 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5899 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5900 						    SCTP_LOG_SBRESULT, 0);
5901 					}
5902 					atomic_subtract_int(&control->length, cp_len);
5903 				} else {
5904 					copied_so_far += cp_len;
5905 				}
5906 			}
5907 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5908 				break;
5909 			}
5910 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5911 			    (control->do_not_ref_stcb == 0) &&
5912 			    (freed_so_far >= rwnd_req)) {
5913 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5914 			}
5915 		}		/* end while(m) */
5916 		/*
5917 		 * At this point we have looked at it all and we either have
5918 		 * a MSG_EOR/or read all the user wants... <OR>
5919 		 * control->length == 0.
5920 		 */
5921 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5922 			/* we are done with this control */
5923 			if (control->length == 0) {
5924 				if (control->data) {
5925 #ifdef INVARIANTS
5926 					panic("control->data not null at read eor?");
5927 #else
5928 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5929 					sctp_m_freem(control->data);
5930 					control->data = NULL;
5931 #endif
5932 				}
5933 		done_with_control:
5934 				if (hold_rlock == 0) {
5935 					SCTP_INP_READ_LOCK(inp);
5936 					hold_rlock = 1;
5937 				}
5938 				TAILQ_REMOVE(&inp->read_queue, control, next);
5939 				/* Add back any hiddend data */
5940 				if (control->held_length) {
5941 					held_length = 0;
5942 					control->held_length = 0;
5943 					wakeup_read_socket = 1;
5944 				}
5945 				if (control->aux_data) {
5946 					sctp_m_free(control->aux_data);
5947 					control->aux_data = NULL;
5948 				}
5949 				no_rcv_needed = control->do_not_ref_stcb;
5950 				sctp_free_remote_addr(control->whoFrom);
5951 				control->data = NULL;
5952 #ifdef INVARIANTS
5953 				if (control->on_strm_q) {
5954 					panic("About to free ctl:%p so:%p and its in %d",
5955 					    control, so, control->on_strm_q);
5956 				}
5957 #endif
5958 				sctp_free_a_readq(stcb, control);
5959 				control = NULL;
5960 				if ((freed_so_far >= rwnd_req) &&
5961 				    (no_rcv_needed == 0))
5962 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5963 
5964 			} else {
5965 				/*
5966 				 * The user did not read all of this
5967 				 * message, turn off the returned MSG_EOR
5968 				 * since we are leaving more behind on the
5969 				 * control to read.
5970 				 */
5971 #ifdef INVARIANTS
5972 				if (control->end_added &&
5973 				    (control->data == NULL) &&
5974 				    (control->tail_mbuf == NULL)) {
5975 					panic("Gak, control->length is corrupt?");
5976 				}
5977 #endif
5978 				no_rcv_needed = control->do_not_ref_stcb;
5979 				out_flags &= ~MSG_EOR;
5980 			}
5981 		}
5982 		if (out_flags & MSG_EOR) {
5983 			goto release;
5984 		}
5985 		if ((uio->uio_resid == 0) ||
5986 		    ((in_eeor_mode) &&
5987 		    (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) {
5988 			goto release;
5989 		}
5990 		/*
5991 		 * If I hit here the receiver wants more and this message is
5992 		 * NOT done (pd-api). So two questions. Can we block? if not
5993 		 * we are done. Did the user NOT set MSG_WAITALL?
5994 		 */
5995 		if (block_allowed == 0) {
5996 			goto release;
5997 		}
5998 		/*
5999 		 * We need to wait for more data a few things: - We don't
6000 		 * sbunlock() so we don't get someone else reading. - We
6001 		 * must be sure to account for the case where what is added
6002 		 * is NOT to our control when we wakeup.
6003 		 */
6004 
6005 		/*
6006 		 * Do we need to tell the transport a rwnd update might be
6007 		 * needed before we go to sleep?
6008 		 */
6009 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6010 		    ((freed_so_far >= rwnd_req) &&
6011 		    (control->do_not_ref_stcb == 0) &&
6012 		    (no_rcv_needed == 0))) {
6013 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6014 		}
6015 wait_some_more:
6016 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6017 			goto release;
6018 		}
6019 
6020 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6021 			goto release;
6022 
6023 		if (hold_rlock == 1) {
6024 			SCTP_INP_READ_UNLOCK(inp);
6025 			hold_rlock = 0;
6026 		}
6027 		if (hold_sblock == 0) {
6028 			SOCKBUF_LOCK(&so->so_rcv);
6029 			hold_sblock = 1;
6030 		}
6031 		if ((copied_so_far) && (control->length == 0) &&
6032 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6033 			goto release;
6034 		}
6035 		if (so->so_rcv.sb_cc <= control->held_length) {
6036 			error = sbwait(&so->so_rcv);
6037 			if (error) {
6038 				goto release;
6039 			}
6040 			control->held_length = 0;
6041 		}
6042 		if (hold_sblock) {
6043 			SOCKBUF_UNLOCK(&so->so_rcv);
6044 			hold_sblock = 0;
6045 		}
6046 		if (control->length == 0) {
6047 			/* still nothing here */
6048 			if (control->end_added == 1) {
6049 				/* he aborted, or is done i.e.did a shutdown */
6050 				out_flags |= MSG_EOR;
6051 				if (control->pdapi_aborted) {
6052 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6053 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6054 
6055 					out_flags |= MSG_TRUNC;
6056 				} else {
6057 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6058 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6059 				}
6060 				goto done_with_control;
6061 			}
6062 			if (so->so_rcv.sb_cc > held_length) {
6063 				control->held_length = so->so_rcv.sb_cc;
6064 				held_length = 0;
6065 			}
6066 			goto wait_some_more;
6067 		} else if (control->data == NULL) {
6068 			/*
6069 			 * we must re-sync since data is probably being
6070 			 * added
6071 			 */
6072 			SCTP_INP_READ_LOCK(inp);
6073 			if ((control->length > 0) && (control->data == NULL)) {
6074 				/*
6075 				 * big trouble.. we have the lock and its
6076 				 * corrupt?
6077 				 */
6078 #ifdef INVARIANTS
6079 				panic("Impossible data==NULL length !=0");
6080 #endif
6081 				out_flags |= MSG_EOR;
6082 				out_flags |= MSG_TRUNC;
6083 				control->length = 0;
6084 				SCTP_INP_READ_UNLOCK(inp);
6085 				goto done_with_control;
6086 			}
6087 			SCTP_INP_READ_UNLOCK(inp);
6088 			/* We will fall around to get more data */
6089 		}
6090 		goto get_more_data;
6091 	} else {
6092 		/*-
6093 		 * Give caller back the mbuf chain,
6094 		 * store in uio_resid the length
6095 		 */
6096 		wakeup_read_socket = 0;
6097 		if ((control->end_added == 0) ||
6098 		    (TAILQ_NEXT(control, next) == NULL)) {
6099 			/* Need to get rlock */
6100 			if (hold_rlock == 0) {
6101 				SCTP_INP_READ_LOCK(inp);
6102 				hold_rlock = 1;
6103 			}
6104 		}
6105 		if (control->end_added) {
6106 			out_flags |= MSG_EOR;
6107 			if ((control->do_not_ref_stcb == 0) &&
6108 			    (control->stcb != NULL) &&
6109 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6110 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6111 		}
6112 		if (control->spec_flags & M_NOTIFICATION) {
6113 			out_flags |= MSG_NOTIFICATION;
6114 		}
6115 		uio->uio_resid = control->length;
6116 		*mp = control->data;
6117 		m = control->data;
6118 		while (m) {
6119 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6120 				sctp_sblog(&so->so_rcv,
6121 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6122 			}
6123 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6124 			freed_so_far += (uint32_t)SCTP_BUF_LEN(m);
6125 			freed_so_far += MSIZE;
6126 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6127 				sctp_sblog(&so->so_rcv,
6128 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6129 			}
6130 			m = SCTP_BUF_NEXT(m);
6131 		}
6132 		control->data = control->tail_mbuf = NULL;
6133 		control->length = 0;
6134 		if (out_flags & MSG_EOR) {
6135 			/* Done with this control */
6136 			goto done_with_control;
6137 		}
6138 	}
6139 release:
6140 	if (hold_rlock == 1) {
6141 		SCTP_INP_READ_UNLOCK(inp);
6142 		hold_rlock = 0;
6143 	}
6144 	if (hold_sblock == 1) {
6145 		SOCKBUF_UNLOCK(&so->so_rcv);
6146 		hold_sblock = 0;
6147 	}
6148 
6149 	sbunlock(&so->so_rcv);
6150 	sockbuf_lock = 0;
6151 
6152 release_unlocked:
6153 	if (hold_sblock) {
6154 		SOCKBUF_UNLOCK(&so->so_rcv);
6155 		hold_sblock = 0;
6156 	}
6157 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6158 		if ((freed_so_far >= rwnd_req) &&
6159 		    (control && (control->do_not_ref_stcb == 0)) &&
6160 		    (no_rcv_needed == 0))
6161 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6162 	}
6163 out:
6164 	if (msg_flags) {
6165 		*msg_flags = out_flags;
6166 	}
6167 	if (((out_flags & MSG_EOR) == 0) &&
6168 	    ((in_flags & MSG_PEEK) == 0) &&
6169 	    (sinfo) &&
6170 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6171 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6172 		struct sctp_extrcvinfo *s_extra;
6173 
6174 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6175 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6176 	}
6177 	if (hold_rlock == 1) {
6178 		SCTP_INP_READ_UNLOCK(inp);
6179 	}
6180 	if (hold_sblock) {
6181 		SOCKBUF_UNLOCK(&so->so_rcv);
6182 	}
6183 	if (sockbuf_lock) {
6184 		sbunlock(&so->so_rcv);
6185 	}
6186 
6187 	if (freecnt_applied) {
6188 		/*
6189 		 * The lock on the socket buffer protects us so the free
6190 		 * code will stop. But since we used the socketbuf lock and
6191 		 * the sender uses the tcb_lock to increment, we need to use
6192 		 * the atomic add to the refcnt.
6193 		 */
6194 		if (stcb == NULL) {
6195 #ifdef INVARIANTS
6196 			panic("stcb for refcnt has gone NULL?");
6197 			goto stage_left;
6198 #else
6199 			goto stage_left;
6200 #endif
6201 		}
6202 		/* Save the value back for next time */
6203 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6204 		atomic_add_int(&stcb->asoc.refcnt, -1);
6205 	}
6206 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6207 		if (stcb) {
6208 			sctp_misc_ints(SCTP_SORECV_DONE,
6209 			    freed_so_far,
6210 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6211 			    stcb->asoc.my_rwnd,
6212 			    so->so_rcv.sb_cc);
6213 		} else {
6214 			sctp_misc_ints(SCTP_SORECV_DONE,
6215 			    freed_so_far,
6216 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6217 			    0,
6218 			    so->so_rcv.sb_cc);
6219 		}
6220 	}
6221 stage_left:
6222 	if (wakeup_read_socket) {
6223 		sctp_sorwakeup(inp, so);
6224 	}
6225 	return (error);
6226 }
6227 
6228 
6229 #ifdef SCTP_MBUF_LOGGING
6230 struct mbuf *
6231 sctp_m_free(struct mbuf *m)
6232 {
6233 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6234 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6235 	}
6236 	return (m_free(m));
6237 }
6238 
6239 void
6240 sctp_m_freem(struct mbuf *mb)
6241 {
6242 	while (mb != NULL)
6243 		mb = sctp_m_free(mb);
6244 }
6245 
6246 #endif
6247 
6248 int
6249 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6250 {
6251 	/*
6252 	 * Given a local address. For all associations that holds the
6253 	 * address, request a peer-set-primary.
6254 	 */
6255 	struct sctp_ifa *ifa;
6256 	struct sctp_laddr *wi;
6257 
6258 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6259 	if (ifa == NULL) {
6260 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6261 		return (EADDRNOTAVAIL);
6262 	}
6263 	/*
6264 	 * Now that we have the ifa we must awaken the iterator with this
6265 	 * message.
6266 	 */
6267 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6268 	if (wi == NULL) {
6269 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6270 		return (ENOMEM);
6271 	}
6272 	/* Now incr the count and int wi structure */
6273 	SCTP_INCR_LADDR_COUNT();
6274 	memset(wi, 0, sizeof(*wi));
6275 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6276 	wi->ifa = ifa;
6277 	wi->action = SCTP_SET_PRIM_ADDR;
6278 	atomic_add_int(&ifa->refcount, 1);
6279 
6280 	/* Now add it to the work queue */
6281 	SCTP_WQ_ADDR_LOCK();
6282 	/*
6283 	 * Should this really be a tailq? As it is we will process the
6284 	 * newest first :-0
6285 	 */
6286 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6287 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6288 	    (struct sctp_inpcb *)NULL,
6289 	    (struct sctp_tcb *)NULL,
6290 	    (struct sctp_nets *)NULL);
6291 	SCTP_WQ_ADDR_UNLOCK();
6292 	return (0);
6293 }
6294 
6295 
6296 int
6297 sctp_soreceive(struct socket *so,
6298     struct sockaddr **psa,
6299     struct uio *uio,
6300     struct mbuf **mp0,
6301     struct mbuf **controlp,
6302     int *flagsp)
6303 {
6304 	int error, fromlen;
6305 	uint8_t sockbuf[256];
6306 	struct sockaddr *from;
6307 	struct sctp_extrcvinfo sinfo;
6308 	int filling_sinfo = 1;
6309 	int flags;
6310 	struct sctp_inpcb *inp;
6311 
6312 	inp = (struct sctp_inpcb *)so->so_pcb;
6313 	/* pickup the assoc we are reading from */
6314 	if (inp == NULL) {
6315 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6316 		return (EINVAL);
6317 	}
6318 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6319 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6320 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6321 	    (controlp == NULL)) {
6322 		/* user does not want the sndrcv ctl */
6323 		filling_sinfo = 0;
6324 	}
6325 	if (psa) {
6326 		from = (struct sockaddr *)sockbuf;
6327 		fromlen = sizeof(sockbuf);
6328 		from->sa_len = 0;
6329 	} else {
6330 		from = NULL;
6331 		fromlen = 0;
6332 	}
6333 
6334 	if (filling_sinfo) {
6335 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6336 	}
6337 	if (flagsp != NULL) {
6338 		flags = *flagsp;
6339 	} else {
6340 		flags = 0;
6341 	}
6342 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags,
6343 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6344 	if (flagsp != NULL) {
6345 		*flagsp = flags;
6346 	}
6347 	if (controlp != NULL) {
6348 		/* copy back the sinfo in a CMSG format */
6349 		if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) {
6350 			*controlp = sctp_build_ctl_nchunk(inp,
6351 			    (struct sctp_sndrcvinfo *)&sinfo);
6352 		} else {
6353 			*controlp = NULL;
6354 		}
6355 	}
6356 	if (psa) {
6357 		/* copy back the address info */
6358 		if (from && from->sa_len) {
6359 			*psa = sodupsockaddr(from, M_NOWAIT);
6360 		} else {
6361 			*psa = NULL;
6362 		}
6363 	}
6364 	return (error);
6365 }
6366 
6367 
6368 
6369 
6370 
6371 int
6372 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6373     int totaddr, int *error)
6374 {
6375 	int added = 0;
6376 	int i;
6377 	struct sctp_inpcb *inp;
6378 	struct sockaddr *sa;
6379 	size_t incr = 0;
6380 #ifdef INET
6381 	struct sockaddr_in *sin;
6382 #endif
6383 #ifdef INET6
6384 	struct sockaddr_in6 *sin6;
6385 #endif
6386 
6387 	sa = addr;
6388 	inp = stcb->sctp_ep;
6389 	*error = 0;
6390 	for (i = 0; i < totaddr; i++) {
6391 		switch (sa->sa_family) {
6392 #ifdef INET
6393 		case AF_INET:
6394 			incr = sizeof(struct sockaddr_in);
6395 			sin = (struct sockaddr_in *)sa;
6396 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6397 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6398 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6399 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6400 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6401 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6402 				*error = EINVAL;
6403 				goto out_now;
6404 			}
6405 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6406 			    SCTP_DONOT_SETSCOPE,
6407 			    SCTP_ADDR_IS_CONFIRMED)) {
6408 				/* assoc gone no un-lock */
6409 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6410 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6411 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6412 				*error = ENOBUFS;
6413 				goto out_now;
6414 			}
6415 			added++;
6416 			break;
6417 #endif
6418 #ifdef INET6
6419 		case AF_INET6:
6420 			incr = sizeof(struct sockaddr_in6);
6421 			sin6 = (struct sockaddr_in6 *)sa;
6422 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6423 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6424 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6425 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6426 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6427 				*error = EINVAL;
6428 				goto out_now;
6429 			}
6430 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6431 			    SCTP_DONOT_SETSCOPE,
6432 			    SCTP_ADDR_IS_CONFIRMED)) {
6433 				/* assoc gone no un-lock */
6434 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6435 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6436 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6437 				*error = ENOBUFS;
6438 				goto out_now;
6439 			}
6440 			added++;
6441 			break;
6442 #endif
6443 		default:
6444 			break;
6445 		}
6446 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6447 	}
6448 out_now:
6449 	return (added);
6450 }
6451 
6452 int
6453 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6454     unsigned int totaddr,
6455     unsigned int *num_v4, unsigned int *num_v6,
6456     unsigned int limit)
6457 {
6458 	struct sockaddr *sa;
6459 	struct sctp_tcb *stcb;
6460 	unsigned int incr, at, i;
6461 
6462 	at = 0;
6463 	sa = addr;
6464 	*num_v6 = *num_v4 = 0;
6465 	/* account and validate addresses */
6466 	if (totaddr == 0) {
6467 		return (EINVAL);
6468 	}
6469 	for (i = 0; i < totaddr; i++) {
6470 		if (at + sizeof(struct sockaddr) > limit) {
6471 			return (EINVAL);
6472 		}
6473 		switch (sa->sa_family) {
6474 #ifdef INET
6475 		case AF_INET:
6476 			incr = (unsigned int)sizeof(struct sockaddr_in);
6477 			if (sa->sa_len != incr) {
6478 				return (EINVAL);
6479 			}
6480 			(*num_v4) += 1;
6481 			break;
6482 #endif
6483 #ifdef INET6
6484 		case AF_INET6:
6485 			{
6486 				struct sockaddr_in6 *sin6;
6487 
6488 				sin6 = (struct sockaddr_in6 *)sa;
6489 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6490 					/* Must be non-mapped for connectx */
6491 					return (EINVAL);
6492 				}
6493 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6494 				if (sa->sa_len != incr) {
6495 					return (EINVAL);
6496 				}
6497 				(*num_v6) += 1;
6498 				break;
6499 			}
6500 #endif
6501 		default:
6502 			return (EINVAL);
6503 		}
6504 		if ((at + incr) > limit) {
6505 			return (EINVAL);
6506 		}
6507 		SCTP_INP_INCR_REF(inp);
6508 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6509 		if (stcb != NULL) {
6510 			SCTP_TCB_UNLOCK(stcb);
6511 			return (EALREADY);
6512 		} else {
6513 			SCTP_INP_DECR_REF(inp);
6514 		}
6515 		at += incr;
6516 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6517 	}
6518 	return (0);
6519 }
6520 
6521 /*
6522  * sctp_bindx(ADD) for one address.
6523  * assumes all arguments are valid/checked by caller.
6524  */
6525 void
6526 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6527     struct sockaddr *sa, sctp_assoc_t assoc_id,
6528     uint32_t vrf_id, int *error, void *p)
6529 {
6530 	struct sockaddr *addr_touse;
6531 #if defined(INET) && defined(INET6)
6532 	struct sockaddr_in sin;
6533 #endif
6534 
6535 	/* see if we're bound all already! */
6536 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6537 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6538 		*error = EINVAL;
6539 		return;
6540 	}
6541 	addr_touse = sa;
6542 #ifdef INET6
6543 	if (sa->sa_family == AF_INET6) {
6544 #ifdef INET
6545 		struct sockaddr_in6 *sin6;
6546 
6547 #endif
6548 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6549 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6550 			*error = EINVAL;
6551 			return;
6552 		}
6553 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6554 			/* can only bind v6 on PF_INET6 sockets */
6555 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6556 			*error = EINVAL;
6557 			return;
6558 		}
6559 #ifdef INET
6560 		sin6 = (struct sockaddr_in6 *)addr_touse;
6561 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6562 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6563 			    SCTP_IPV6_V6ONLY(inp)) {
6564 				/* can't bind v4-mapped on PF_INET sockets */
6565 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6566 				*error = EINVAL;
6567 				return;
6568 			}
6569 			in6_sin6_2_sin(&sin, sin6);
6570 			addr_touse = (struct sockaddr *)&sin;
6571 		}
6572 #endif
6573 	}
6574 #endif
6575 #ifdef INET
6576 	if (sa->sa_family == AF_INET) {
6577 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6578 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6579 			*error = EINVAL;
6580 			return;
6581 		}
6582 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6583 		    SCTP_IPV6_V6ONLY(inp)) {
6584 			/* can't bind v4 on PF_INET sockets */
6585 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6586 			*error = EINVAL;
6587 			return;
6588 		}
6589 	}
6590 #endif
6591 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6592 		if (p == NULL) {
6593 			/* Can't get proc for Net/Open BSD */
6594 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6595 			*error = EINVAL;
6596 			return;
6597 		}
6598 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6599 		return;
6600 	}
6601 	/*
6602 	 * No locks required here since bind and mgmt_ep_sa all do their own
6603 	 * locking. If we do something for the FIX: below we may need to
6604 	 * lock in that case.
6605 	 */
6606 	if (assoc_id == 0) {
6607 		/* add the address */
6608 		struct sctp_inpcb *lep;
6609 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6610 
6611 		/* validate the incoming port */
6612 		if ((lsin->sin_port != 0) &&
6613 		    (lsin->sin_port != inp->sctp_lport)) {
6614 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6615 			*error = EINVAL;
6616 			return;
6617 		} else {
6618 			/* user specified 0 port, set it to existing port */
6619 			lsin->sin_port = inp->sctp_lport;
6620 		}
6621 
6622 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6623 		if (lep != NULL) {
6624 			/*
6625 			 * We must decrement the refcount since we have the
6626 			 * ep already and are binding. No remove going on
6627 			 * here.
6628 			 */
6629 			SCTP_INP_DECR_REF(lep);
6630 		}
6631 		if (lep == inp) {
6632 			/* already bound to it.. ok */
6633 			return;
6634 		} else if (lep == NULL) {
6635 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6636 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6637 			    SCTP_ADD_IP_ADDRESS,
6638 			    vrf_id, NULL);
6639 		} else {
6640 			*error = EADDRINUSE;
6641 		}
6642 		if (*error)
6643 			return;
6644 	} else {
6645 		/*
6646 		 * FIX: decide whether we allow assoc based bindx
6647 		 */
6648 	}
6649 }
6650 
6651 /*
6652  * sctp_bindx(DELETE) for one address.
6653  * assumes all arguments are valid/checked by caller.
6654  */
6655 void
6656 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6657     struct sockaddr *sa, sctp_assoc_t assoc_id,
6658     uint32_t vrf_id, int *error)
6659 {
6660 	struct sockaddr *addr_touse;
6661 #if defined(INET) && defined(INET6)
6662 	struct sockaddr_in sin;
6663 #endif
6664 
6665 	/* see if we're bound all already! */
6666 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6667 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6668 		*error = EINVAL;
6669 		return;
6670 	}
6671 	addr_touse = sa;
6672 #ifdef INET6
6673 	if (sa->sa_family == AF_INET6) {
6674 #ifdef INET
6675 		struct sockaddr_in6 *sin6;
6676 #endif
6677 
6678 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6679 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6680 			*error = EINVAL;
6681 			return;
6682 		}
6683 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6684 			/* can only bind v6 on PF_INET6 sockets */
6685 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6686 			*error = EINVAL;
6687 			return;
6688 		}
6689 #ifdef INET
6690 		sin6 = (struct sockaddr_in6 *)addr_touse;
6691 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6692 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6693 			    SCTP_IPV6_V6ONLY(inp)) {
6694 				/* can't bind mapped-v4 on PF_INET sockets */
6695 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6696 				*error = EINVAL;
6697 				return;
6698 			}
6699 			in6_sin6_2_sin(&sin, sin6);
6700 			addr_touse = (struct sockaddr *)&sin;
6701 		}
6702 #endif
6703 	}
6704 #endif
6705 #ifdef INET
6706 	if (sa->sa_family == AF_INET) {
6707 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6708 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6709 			*error = EINVAL;
6710 			return;
6711 		}
6712 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6713 		    SCTP_IPV6_V6ONLY(inp)) {
6714 			/* can't bind v4 on PF_INET sockets */
6715 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6716 			*error = EINVAL;
6717 			return;
6718 		}
6719 	}
6720 #endif
6721 	/*
6722 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6723 	 * below is ever changed we may need to lock before calling
6724 	 * association level binding.
6725 	 */
6726 	if (assoc_id == 0) {
6727 		/* delete the address */
6728 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6729 		    SCTP_DEL_IP_ADDRESS,
6730 		    vrf_id, NULL);
6731 	} else {
6732 		/*
6733 		 * FIX: decide whether we allow assoc based bindx
6734 		 */
6735 	}
6736 }
6737 
6738 /*
6739  * returns the valid local address count for an assoc, taking into account
6740  * all scoping rules
6741  */
6742 int
6743 sctp_local_addr_count(struct sctp_tcb *stcb)
6744 {
6745 	int loopback_scope;
6746 #if defined(INET)
6747 	int ipv4_local_scope, ipv4_addr_legal;
6748 #endif
6749 #if defined (INET6)
6750 	int local_scope, site_scope, ipv6_addr_legal;
6751 #endif
6752 	struct sctp_vrf *vrf;
6753 	struct sctp_ifn *sctp_ifn;
6754 	struct sctp_ifa *sctp_ifa;
6755 	int count = 0;
6756 
6757 	/* Turn on all the appropriate scopes */
6758 	loopback_scope = stcb->asoc.scope.loopback_scope;
6759 #if defined(INET)
6760 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6761 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6762 #endif
6763 #if defined(INET6)
6764 	local_scope = stcb->asoc.scope.local_scope;
6765 	site_scope = stcb->asoc.scope.site_scope;
6766 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6767 #endif
6768 	SCTP_IPI_ADDR_RLOCK();
6769 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6770 	if (vrf == NULL) {
6771 		/* no vrf, no addresses */
6772 		SCTP_IPI_ADDR_RUNLOCK();
6773 		return (0);
6774 	}
6775 
6776 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6777 		/*
6778 		 * bound all case: go through all ifns on the vrf
6779 		 */
6780 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6781 			if ((loopback_scope == 0) &&
6782 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6783 				continue;
6784 			}
6785 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6786 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6787 					continue;
6788 				switch (sctp_ifa->address.sa.sa_family) {
6789 #ifdef INET
6790 				case AF_INET:
6791 					if (ipv4_addr_legal) {
6792 						struct sockaddr_in *sin;
6793 
6794 						sin = &sctp_ifa->address.sin;
6795 						if (sin->sin_addr.s_addr == 0) {
6796 							/*
6797 							 * skip unspecified
6798 							 * addrs
6799 							 */
6800 							continue;
6801 						}
6802 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6803 						    &sin->sin_addr) != 0) {
6804 							continue;
6805 						}
6806 						if ((ipv4_local_scope == 0) &&
6807 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6808 							continue;
6809 						}
6810 						/* count this one */
6811 						count++;
6812 					} else {
6813 						continue;
6814 					}
6815 					break;
6816 #endif
6817 #ifdef INET6
6818 				case AF_INET6:
6819 					if (ipv6_addr_legal) {
6820 						struct sockaddr_in6 *sin6;
6821 
6822 						sin6 = &sctp_ifa->address.sin6;
6823 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6824 							continue;
6825 						}
6826 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6827 						    &sin6->sin6_addr) != 0) {
6828 							continue;
6829 						}
6830 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6831 							if (local_scope == 0)
6832 								continue;
6833 							if (sin6->sin6_scope_id == 0) {
6834 								if (sa6_recoverscope(sin6) != 0)
6835 									/*
6836 									 *
6837 									 * bad
6838 									 * link
6839 									 *
6840 									 * local
6841 									 *
6842 									 * address
6843 									 */
6844 									continue;
6845 							}
6846 						}
6847 						if ((site_scope == 0) &&
6848 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6849 							continue;
6850 						}
6851 						/* count this one */
6852 						count++;
6853 					}
6854 					break;
6855 #endif
6856 				default:
6857 					/* TSNH */
6858 					break;
6859 				}
6860 			}
6861 		}
6862 	} else {
6863 		/*
6864 		 * subset bound case
6865 		 */
6866 		struct sctp_laddr *laddr;
6867 
6868 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6869 		    sctp_nxt_addr) {
6870 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6871 				continue;
6872 			}
6873 			/* count this one */
6874 			count++;
6875 		}
6876 	}
6877 	SCTP_IPI_ADDR_RUNLOCK();
6878 	return (count);
6879 }
6880 
6881 #if defined(SCTP_LOCAL_TRACE_BUF)
6882 
6883 void
6884 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6885 {
6886 	uint32_t saveindex, newindex;
6887 
6888 	do {
6889 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6890 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6891 			newindex = 1;
6892 		} else {
6893 			newindex = saveindex + 1;
6894 		}
6895 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6896 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6897 		saveindex = 0;
6898 	}
6899 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6900 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6901 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6902 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6903 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6904 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6905 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6906 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6907 }
6908 
6909 #endif
6910 static void
6911 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6912     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6913 {
6914 	struct ip *iph;
6915 #ifdef INET6
6916 	struct ip6_hdr *ip6;
6917 #endif
6918 	struct mbuf *sp, *last;
6919 	struct udphdr *uhdr;
6920 	uint16_t port;
6921 
6922 	if ((m->m_flags & M_PKTHDR) == 0) {
6923 		/* Can't handle one that is not a pkt hdr */
6924 		goto out;
6925 	}
6926 	/* Pull the src port */
6927 	iph = mtod(m, struct ip *);
6928 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6929 	port = uhdr->uh_sport;
6930 	/*
6931 	 * Split out the mbuf chain. Leave the IP header in m, place the
6932 	 * rest in the sp.
6933 	 */
6934 	sp = m_split(m, off, M_NOWAIT);
6935 	if (sp == NULL) {
6936 		/* Gak, drop packet, we can't do a split */
6937 		goto out;
6938 	}
6939 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6940 		/* Gak, packet can't have an SCTP header in it - too small */
6941 		m_freem(sp);
6942 		goto out;
6943 	}
6944 	/* Now pull up the UDP header and SCTP header together */
6945 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6946 	if (sp == NULL) {
6947 		/* Gak pullup failed */
6948 		goto out;
6949 	}
6950 	/* Trim out the UDP header */
6951 	m_adj(sp, sizeof(struct udphdr));
6952 
6953 	/* Now reconstruct the mbuf chain */
6954 	for (last = m; last->m_next; last = last->m_next);
6955 	last->m_next = sp;
6956 	m->m_pkthdr.len += sp->m_pkthdr.len;
6957 	/*
6958 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6959 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6960 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6961 	 * SCTP checksum. Therefore, clear the bit.
6962 	 */
6963 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6964 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6965 	    m->m_pkthdr.len,
6966 	    if_name(m->m_pkthdr.rcvif),
6967 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6968 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6969 	iph = mtod(m, struct ip *);
6970 	switch (iph->ip_v) {
6971 #ifdef INET
6972 	case IPVERSION:
6973 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6974 		sctp_input_with_port(m, off, port);
6975 		break;
6976 #endif
6977 #ifdef INET6
6978 	case IPV6_VERSION >> 4:
6979 		ip6 = mtod(m, struct ip6_hdr *);
6980 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6981 		sctp6_input_with_port(&m, &off, port);
6982 		break;
6983 #endif
6984 	default:
6985 		goto out;
6986 		break;
6987 	}
6988 	return;
6989 out:
6990 	m_freem(m);
6991 }
6992 
6993 #ifdef INET
6994 static void
6995 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6996 {
6997 	struct ip *outer_ip, *inner_ip;
6998 	struct sctphdr *sh;
6999 	struct icmp *icmp;
7000 	struct udphdr *udp;
7001 	struct sctp_inpcb *inp;
7002 	struct sctp_tcb *stcb;
7003 	struct sctp_nets *net;
7004 	struct sctp_init_chunk *ch;
7005 	struct sockaddr_in src, dst;
7006 	uint8_t type, code;
7007 
7008 	inner_ip = (struct ip *)vip;
7009 	icmp = (struct icmp *)((caddr_t)inner_ip -
7010 	    (sizeof(struct icmp) - sizeof(struct ip)));
7011 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
7012 	if (ntohs(outer_ip->ip_len) <
7013 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
7014 		return;
7015 	}
7016 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
7017 	sh = (struct sctphdr *)(udp + 1);
7018 	memset(&src, 0, sizeof(struct sockaddr_in));
7019 	src.sin_family = AF_INET;
7020 	src.sin_len = sizeof(struct sockaddr_in);
7021 	src.sin_port = sh->src_port;
7022 	src.sin_addr = inner_ip->ip_src;
7023 	memset(&dst, 0, sizeof(struct sockaddr_in));
7024 	dst.sin_family = AF_INET;
7025 	dst.sin_len = sizeof(struct sockaddr_in);
7026 	dst.sin_port = sh->dest_port;
7027 	dst.sin_addr = inner_ip->ip_dst;
7028 	/*
7029 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
7030 	 * holds our local endpoint address. Thus we reverse the dst and the
7031 	 * src in the lookup.
7032 	 */
7033 	inp = NULL;
7034 	net = NULL;
7035 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7036 	    (struct sockaddr *)&src,
7037 	    &inp, &net, 1,
7038 	    SCTP_DEFAULT_VRFID);
7039 	if ((stcb != NULL) &&
7040 	    (net != NULL) &&
7041 	    (inp != NULL)) {
7042 		/* Check the UDP port numbers */
7043 		if ((udp->uh_dport != net->port) ||
7044 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7045 			SCTP_TCB_UNLOCK(stcb);
7046 			return;
7047 		}
7048 		/* Check the verification tag */
7049 		if (ntohl(sh->v_tag) != 0) {
7050 			/*
7051 			 * This must be the verification tag used for
7052 			 * sending out packets. We don't consider packets
7053 			 * reflecting the verification tag.
7054 			 */
7055 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
7056 				SCTP_TCB_UNLOCK(stcb);
7057 				return;
7058 			}
7059 		} else {
7060 			if (ntohs(outer_ip->ip_len) >=
7061 			    sizeof(struct ip) +
7062 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
7063 				/*
7064 				 * In this case we can check if we got an
7065 				 * INIT chunk and if the initiate tag
7066 				 * matches.
7067 				 */
7068 				ch = (struct sctp_init_chunk *)(sh + 1);
7069 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
7070 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
7071 					SCTP_TCB_UNLOCK(stcb);
7072 					return;
7073 				}
7074 			} else {
7075 				SCTP_TCB_UNLOCK(stcb);
7076 				return;
7077 			}
7078 		}
7079 		type = icmp->icmp_type;
7080 		code = icmp->icmp_code;
7081 		if ((type == ICMP_UNREACH) &&
7082 		    (code == ICMP_UNREACH_PORT)) {
7083 			code = ICMP_UNREACH_PROTOCOL;
7084 		}
7085 		sctp_notify(inp, stcb, net, type, code,
7086 		    ntohs(inner_ip->ip_len),
7087 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
7088 	} else {
7089 		if ((stcb == NULL) && (inp != NULL)) {
7090 			/* reduce ref-count */
7091 			SCTP_INP_WLOCK(inp);
7092 			SCTP_INP_DECR_REF(inp);
7093 			SCTP_INP_WUNLOCK(inp);
7094 		}
7095 		if (stcb) {
7096 			SCTP_TCB_UNLOCK(stcb);
7097 		}
7098 	}
7099 	return;
7100 }
7101 #endif
7102 
7103 #ifdef INET6
7104 static void
7105 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7106 {
7107 	struct ip6ctlparam *ip6cp;
7108 	struct sctp_inpcb *inp;
7109 	struct sctp_tcb *stcb;
7110 	struct sctp_nets *net;
7111 	struct sctphdr sh;
7112 	struct udphdr udp;
7113 	struct sockaddr_in6 src, dst;
7114 	uint8_t type, code;
7115 
7116 	ip6cp = (struct ip6ctlparam *)d;
7117 	/*
7118 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7119 	 */
7120 	if (ip6cp->ip6c_m == NULL) {
7121 		return;
7122 	}
7123 	/*
7124 	 * Check if we can safely examine the ports and the verification tag
7125 	 * of the SCTP common header.
7126 	 */
7127 	if (ip6cp->ip6c_m->m_pkthdr.len <
7128 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7129 		return;
7130 	}
7131 	/* Copy out the UDP header. */
7132 	memset(&udp, 0, sizeof(struct udphdr));
7133 	m_copydata(ip6cp->ip6c_m,
7134 	    ip6cp->ip6c_off,
7135 	    sizeof(struct udphdr),
7136 	    (caddr_t)&udp);
7137 	/* Copy out the port numbers and the verification tag. */
7138 	memset(&sh, 0, sizeof(struct sctphdr));
7139 	m_copydata(ip6cp->ip6c_m,
7140 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7141 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7142 	    (caddr_t)&sh);
7143 	memset(&src, 0, sizeof(struct sockaddr_in6));
7144 	src.sin6_family = AF_INET6;
7145 	src.sin6_len = sizeof(struct sockaddr_in6);
7146 	src.sin6_port = sh.src_port;
7147 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7148 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7149 		return;
7150 	}
7151 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7152 	dst.sin6_family = AF_INET6;
7153 	dst.sin6_len = sizeof(struct sockaddr_in6);
7154 	dst.sin6_port = sh.dest_port;
7155 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7156 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7157 		return;
7158 	}
7159 	inp = NULL;
7160 	net = NULL;
7161 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7162 	    (struct sockaddr *)&src,
7163 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7164 	if ((stcb != NULL) &&
7165 	    (net != NULL) &&
7166 	    (inp != NULL)) {
7167 		/* Check the UDP port numbers */
7168 		if ((udp.uh_dport != net->port) ||
7169 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7170 			SCTP_TCB_UNLOCK(stcb);
7171 			return;
7172 		}
7173 		/* Check the verification tag */
7174 		if (ntohl(sh.v_tag) != 0) {
7175 			/*
7176 			 * This must be the verification tag used for
7177 			 * sending out packets. We don't consider packets
7178 			 * reflecting the verification tag.
7179 			 */
7180 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7181 				SCTP_TCB_UNLOCK(stcb);
7182 				return;
7183 			}
7184 		} else {
7185 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7186 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7187 			    sizeof(struct sctphdr) +
7188 			    sizeof(struct sctp_chunkhdr) +
7189 			    offsetof(struct sctp_init, a_rwnd)) {
7190 				/*
7191 				 * In this case we can check if we got an
7192 				 * INIT chunk and if the initiate tag
7193 				 * matches.
7194 				 */
7195 				uint32_t initiate_tag;
7196 				uint8_t chunk_type;
7197 
7198 				m_copydata(ip6cp->ip6c_m,
7199 				    ip6cp->ip6c_off +
7200 				    sizeof(struct udphdr) +
7201 				    sizeof(struct sctphdr),
7202 				    sizeof(uint8_t),
7203 				    (caddr_t)&chunk_type);
7204 				m_copydata(ip6cp->ip6c_m,
7205 				    ip6cp->ip6c_off +
7206 				    sizeof(struct udphdr) +
7207 				    sizeof(struct sctphdr) +
7208 				    sizeof(struct sctp_chunkhdr),
7209 				    sizeof(uint32_t),
7210 				    (caddr_t)&initiate_tag);
7211 				if ((chunk_type != SCTP_INITIATION) ||
7212 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7213 					SCTP_TCB_UNLOCK(stcb);
7214 					return;
7215 				}
7216 			} else {
7217 				SCTP_TCB_UNLOCK(stcb);
7218 				return;
7219 			}
7220 		}
7221 		type = ip6cp->ip6c_icmp6->icmp6_type;
7222 		code = ip6cp->ip6c_icmp6->icmp6_code;
7223 		if ((type == ICMP6_DST_UNREACH) &&
7224 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7225 			type = ICMP6_PARAM_PROB;
7226 			code = ICMP6_PARAMPROB_NEXTHEADER;
7227 		}
7228 		sctp6_notify(inp, stcb, net, type, code,
7229 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7230 	} else {
7231 		if ((stcb == NULL) && (inp != NULL)) {
7232 			/* reduce inp's ref-count */
7233 			SCTP_INP_WLOCK(inp);
7234 			SCTP_INP_DECR_REF(inp);
7235 			SCTP_INP_WUNLOCK(inp);
7236 		}
7237 		if (stcb) {
7238 			SCTP_TCB_UNLOCK(stcb);
7239 		}
7240 	}
7241 }
7242 #endif
7243 
7244 void
7245 sctp_over_udp_stop(void)
7246 {
7247 	/*
7248 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7249 	 * for writting!
7250 	 */
7251 #ifdef INET
7252 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7253 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7254 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7255 	}
7256 #endif
7257 #ifdef INET6
7258 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7259 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7260 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7261 	}
7262 #endif
7263 }
7264 
7265 int
7266 sctp_over_udp_start(void)
7267 {
7268 	uint16_t port;
7269 	int ret;
7270 #ifdef INET
7271 	struct sockaddr_in sin;
7272 #endif
7273 #ifdef INET6
7274 	struct sockaddr_in6 sin6;
7275 #endif
7276 	/*
7277 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7278 	 * for writting!
7279 	 */
7280 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7281 	if (ntohs(port) == 0) {
7282 		/* Must have a port set */
7283 		return (EINVAL);
7284 	}
7285 #ifdef INET
7286 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7287 		/* Already running -- must stop first */
7288 		return (EALREADY);
7289 	}
7290 #endif
7291 #ifdef INET6
7292 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7293 		/* Already running -- must stop first */
7294 		return (EALREADY);
7295 	}
7296 #endif
7297 #ifdef INET
7298 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7299 	    SOCK_DGRAM, IPPROTO_UDP,
7300 	    curthread->td_ucred, curthread))) {
7301 		sctp_over_udp_stop();
7302 		return (ret);
7303 	}
7304 	/* Call the special UDP hook. */
7305 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7306 	    sctp_recv_udp_tunneled_packet,
7307 	    sctp_recv_icmp_tunneled_packet,
7308 	    NULL))) {
7309 		sctp_over_udp_stop();
7310 		return (ret);
7311 	}
7312 	/* Ok, we have a socket, bind it to the port. */
7313 	memset(&sin, 0, sizeof(struct sockaddr_in));
7314 	sin.sin_len = sizeof(struct sockaddr_in);
7315 	sin.sin_family = AF_INET;
7316 	sin.sin_port = htons(port);
7317 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7318 	    (struct sockaddr *)&sin, curthread))) {
7319 		sctp_over_udp_stop();
7320 		return (ret);
7321 	}
7322 #endif
7323 #ifdef INET6
7324 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7325 	    SOCK_DGRAM, IPPROTO_UDP,
7326 	    curthread->td_ucred, curthread))) {
7327 		sctp_over_udp_stop();
7328 		return (ret);
7329 	}
7330 	/* Call the special UDP hook. */
7331 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7332 	    sctp_recv_udp_tunneled_packet,
7333 	    sctp_recv_icmp6_tunneled_packet,
7334 	    NULL))) {
7335 		sctp_over_udp_stop();
7336 		return (ret);
7337 	}
7338 	/* Ok, we have a socket, bind it to the port. */
7339 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7340 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7341 	sin6.sin6_family = AF_INET6;
7342 	sin6.sin6_port = htons(port);
7343 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7344 	    (struct sockaddr *)&sin6, curthread))) {
7345 		sctp_over_udp_stop();
7346 		return (ret);
7347 	}
7348 #endif
7349 	return (0);
7350 }
7351 
7352 /*
7353  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7354  * If all arguments are zero, zero is returned.
7355  */
7356 uint32_t
7357 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7358 {
7359 	if (mtu1 > 0) {
7360 		if (mtu2 > 0) {
7361 			if (mtu3 > 0) {
7362 				return (min(mtu1, min(mtu2, mtu3)));
7363 			} else {
7364 				return (min(mtu1, mtu2));
7365 			}
7366 		} else {
7367 			if (mtu3 > 0) {
7368 				return (min(mtu1, mtu3));
7369 			} else {
7370 				return (mtu1);
7371 			}
7372 		}
7373 	} else {
7374 		if (mtu2 > 0) {
7375 			if (mtu3 > 0) {
7376 				return (min(mtu2, mtu3));
7377 			} else {
7378 				return (mtu2);
7379 			}
7380 		} else {
7381 			return (mtu3);
7382 		}
7383 	}
7384 }
7385 
7386 void
7387 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7388 {
7389 	struct in_conninfo inc;
7390 
7391 	memset(&inc, 0, sizeof(struct in_conninfo));
7392 	inc.inc_fibnum = fibnum;
7393 	switch (addr->sa.sa_family) {
7394 #ifdef INET
7395 	case AF_INET:
7396 		inc.inc_faddr = addr->sin.sin_addr;
7397 		break;
7398 #endif
7399 #ifdef INET6
7400 	case AF_INET6:
7401 		inc.inc_flags |= INC_ISIPV6;
7402 		inc.inc6_faddr = addr->sin6.sin6_addr;
7403 		break;
7404 #endif
7405 	default:
7406 		return;
7407 	}
7408 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7409 }
7410 
7411 uint32_t
7412 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7413 {
7414 	struct in_conninfo inc;
7415 
7416 	memset(&inc, 0, sizeof(struct in_conninfo));
7417 	inc.inc_fibnum = fibnum;
7418 	switch (addr->sa.sa_family) {
7419 #ifdef INET
7420 	case AF_INET:
7421 		inc.inc_faddr = addr->sin.sin_addr;
7422 		break;
7423 #endif
7424 #ifdef INET6
7425 	case AF_INET6:
7426 		inc.inc_flags |= INC_ISIPV6;
7427 		inc.inc6_faddr = addr->sin6.sin6_addr;
7428 		break;
7429 #endif
7430 	default:
7431 		return (0);
7432 	}
7433 	return ((uint32_t)tcp_hc_getmtu(&inc));
7434 }
7435 
7436 void
7437 sctp_set_state(struct sctp_tcb *stcb, int new_state)
7438 {
7439 #if defined(KDTRACE_HOOKS)
7440 	int old_state = stcb->asoc.state;
7441 #endif
7442 
7443 	KASSERT((new_state & ~SCTP_STATE_MASK) == 0,
7444 	    ("sctp_set_state: Can't set substate (new_state = %x)",
7445 	    new_state));
7446 	stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state;
7447 	if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) ||
7448 	    (new_state == SCTP_STATE_SHUTDOWN_SENT) ||
7449 	    (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7450 		SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
7451 	}
7452 #if defined(KDTRACE_HOOKS)
7453 	if (((old_state & SCTP_STATE_MASK) != new_state) &&
7454 	    !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) &&
7455 	    (new_state == SCTP_STATE_INUSE))) {
7456 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7457 	}
7458 #endif
7459 }
7460 
7461 void
7462 sctp_add_substate(struct sctp_tcb *stcb, int substate)
7463 {
7464 #if defined(KDTRACE_HOOKS)
7465 	int old_state = stcb->asoc.state;
7466 #endif
7467 
7468 	KASSERT((substate & SCTP_STATE_MASK) == 0,
7469 	    ("sctp_add_substate: Can't set state (substate = %x)",
7470 	    substate));
7471 	stcb->asoc.state |= substate;
7472 #if defined(KDTRACE_HOOKS)
7473 	if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) &&
7474 	    ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) ||
7475 	    ((substate & SCTP_STATE_SHUTDOWN_PENDING) &&
7476 	    ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) {
7477 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7478 	}
7479 #endif
7480 }
7481