xref: /freebsd/sys/netinet/sctputil.c (revision 19fe57fdb4fd2c18a37f2a972617c8769609cdb8)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #include <netinet6/sctp6_var.h>
45 #endif
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_output.h>
48 #include <netinet/sctp_uio.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_auth.h>
52 #include <netinet/sctp_asconf.h>
53 #include <netinet/sctp_bsd_addr.h>
54 #include <netinet/sctp_kdtrace.h>
55 #if defined(INET6) || defined(INET)
56 #include <netinet/tcp_var.h>
57 #endif
58 #include <netinet/udp.h>
59 #include <netinet/udp_var.h>
60 #include <sys/proc.h>
61 #ifdef INET6
62 #include <netinet/icmp6.h>
63 #endif
64 
65 
66 #ifndef KTR_SCTP
67 #define KTR_SCTP KTR_SUBSYS
68 #endif
69 
70 extern const struct sctp_cc_functions sctp_cc_functions[];
71 extern const struct sctp_ss_functions sctp_ss_functions[];
72 
73 void
74 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
75 {
76 #if defined(SCTP_LOCAL_TRACE_BUF)
77 	struct sctp_cwnd_log sctp_clog;
78 
79 	sctp_clog.x.sb.stcb = stcb;
80 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
81 	if (stcb)
82 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
83 	else
84 		sctp_clog.x.sb.stcb_sbcc = 0;
85 	sctp_clog.x.sb.incr = incr;
86 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
87 	    SCTP_LOG_EVENT_SB,
88 	    from,
89 	    sctp_clog.x.misc.log1,
90 	    sctp_clog.x.misc.log2,
91 	    sctp_clog.x.misc.log3,
92 	    sctp_clog.x.misc.log4);
93 #endif
94 }
95 
96 void
97 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
98 {
99 #if defined(SCTP_LOCAL_TRACE_BUF)
100 	struct sctp_cwnd_log sctp_clog;
101 
102 	sctp_clog.x.close.inp = (void *)inp;
103 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
104 	if (stcb) {
105 		sctp_clog.x.close.stcb = (void *)stcb;
106 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
107 	} else {
108 		sctp_clog.x.close.stcb = 0;
109 		sctp_clog.x.close.state = 0;
110 	}
111 	sctp_clog.x.close.loc = loc;
112 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
113 	    SCTP_LOG_EVENT_CLOSE,
114 	    0,
115 	    sctp_clog.x.misc.log1,
116 	    sctp_clog.x.misc.log2,
117 	    sctp_clog.x.misc.log3,
118 	    sctp_clog.x.misc.log4);
119 #endif
120 }
121 
122 void
123 rto_logging(struct sctp_nets *net, int from)
124 {
125 #if defined(SCTP_LOCAL_TRACE_BUF)
126 	struct sctp_cwnd_log sctp_clog;
127 
128 	memset(&sctp_clog, 0, sizeof(sctp_clog));
129 	sctp_clog.x.rto.net = (void *)net;
130 	sctp_clog.x.rto.rtt = net->rtt / 1000;
131 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
132 	    SCTP_LOG_EVENT_RTT,
133 	    from,
134 	    sctp_clog.x.misc.log1,
135 	    sctp_clog.x.misc.log2,
136 	    sctp_clog.x.misc.log3,
137 	    sctp_clog.x.misc.log4);
138 #endif
139 }
140 
141 void
142 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
143 {
144 #if defined(SCTP_LOCAL_TRACE_BUF)
145 	struct sctp_cwnd_log sctp_clog;
146 
147 	sctp_clog.x.strlog.stcb = stcb;
148 	sctp_clog.x.strlog.n_tsn = tsn;
149 	sctp_clog.x.strlog.n_sseq = sseq;
150 	sctp_clog.x.strlog.e_tsn = 0;
151 	sctp_clog.x.strlog.e_sseq = 0;
152 	sctp_clog.x.strlog.strm = stream;
153 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
154 	    SCTP_LOG_EVENT_STRM,
155 	    from,
156 	    sctp_clog.x.misc.log1,
157 	    sctp_clog.x.misc.log2,
158 	    sctp_clog.x.misc.log3,
159 	    sctp_clog.x.misc.log4);
160 #endif
161 }
162 
163 void
164 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
165 {
166 #if defined(SCTP_LOCAL_TRACE_BUF)
167 	struct sctp_cwnd_log sctp_clog;
168 
169 	sctp_clog.x.nagle.stcb = (void *)stcb;
170 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
171 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
172 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
173 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
174 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
175 	    SCTP_LOG_EVENT_NAGLE,
176 	    action,
177 	    sctp_clog.x.misc.log1,
178 	    sctp_clog.x.misc.log2,
179 	    sctp_clog.x.misc.log3,
180 	    sctp_clog.x.misc.log4);
181 #endif
182 }
183 
184 void
185 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
186 {
187 #if defined(SCTP_LOCAL_TRACE_BUF)
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	sctp_clog.x.sack.cumack = cumack;
191 	sctp_clog.x.sack.oldcumack = old_cumack;
192 	sctp_clog.x.sack.tsn = tsn;
193 	sctp_clog.x.sack.numGaps = gaps;
194 	sctp_clog.x.sack.numDups = dups;
195 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
196 	    SCTP_LOG_EVENT_SACK,
197 	    from,
198 	    sctp_clog.x.misc.log1,
199 	    sctp_clog.x.misc.log2,
200 	    sctp_clog.x.misc.log3,
201 	    sctp_clog.x.misc.log4);
202 #endif
203 }
204 
205 void
206 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
207 {
208 #if defined(SCTP_LOCAL_TRACE_BUF)
209 	struct sctp_cwnd_log sctp_clog;
210 
211 	memset(&sctp_clog, 0, sizeof(sctp_clog));
212 	sctp_clog.x.map.base = map;
213 	sctp_clog.x.map.cum = cum;
214 	sctp_clog.x.map.high = high;
215 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
216 	    SCTP_LOG_EVENT_MAP,
217 	    from,
218 	    sctp_clog.x.misc.log1,
219 	    sctp_clog.x.misc.log2,
220 	    sctp_clog.x.misc.log3,
221 	    sctp_clog.x.misc.log4);
222 #endif
223 }
224 
225 void
226 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
227 {
228 #if defined(SCTP_LOCAL_TRACE_BUF)
229 	struct sctp_cwnd_log sctp_clog;
230 
231 	memset(&sctp_clog, 0, sizeof(sctp_clog));
232 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
233 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
234 	sctp_clog.x.fr.tsn = tsn;
235 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
236 	    SCTP_LOG_EVENT_FR,
237 	    from,
238 	    sctp_clog.x.misc.log1,
239 	    sctp_clog.x.misc.log2,
240 	    sctp_clog.x.misc.log3,
241 	    sctp_clog.x.misc.log4);
242 #endif
243 }
244 
245 #ifdef SCTP_MBUF_LOGGING
246 void
247 sctp_log_mb(struct mbuf *m, int from)
248 {
249 #if defined(SCTP_LOCAL_TRACE_BUF)
250 	struct sctp_cwnd_log sctp_clog;
251 
252 	sctp_clog.x.mb.mp = m;
253 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
254 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
255 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
256 	if (SCTP_BUF_IS_EXTENDED(m)) {
257 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
258 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
259 	} else {
260 		sctp_clog.x.mb.ext = 0;
261 		sctp_clog.x.mb.refcnt = 0;
262 	}
263 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
264 	    SCTP_LOG_EVENT_MBUF,
265 	    from,
266 	    sctp_clog.x.misc.log1,
267 	    sctp_clog.x.misc.log2,
268 	    sctp_clog.x.misc.log3,
269 	    sctp_clog.x.misc.log4);
270 #endif
271 }
272 
273 void
274 sctp_log_mbc(struct mbuf *m, int from)
275 {
276 	struct mbuf *mat;
277 
278 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
279 		sctp_log_mb(mat, from);
280 	}
281 }
282 #endif
283 
284 void
285 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
286 {
287 #if defined(SCTP_LOCAL_TRACE_BUF)
288 	struct sctp_cwnd_log sctp_clog;
289 
290 	if (control == NULL) {
291 		SCTP_PRINTF("Gak log of NULL?\n");
292 		return;
293 	}
294 	sctp_clog.x.strlog.stcb = control->stcb;
295 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
296 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
297 	sctp_clog.x.strlog.strm = control->sinfo_stream;
298 	if (poschk != NULL) {
299 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
300 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
301 	} else {
302 		sctp_clog.x.strlog.e_tsn = 0;
303 		sctp_clog.x.strlog.e_sseq = 0;
304 	}
305 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
306 	    SCTP_LOG_EVENT_STRM,
307 	    from,
308 	    sctp_clog.x.misc.log1,
309 	    sctp_clog.x.misc.log2,
310 	    sctp_clog.x.misc.log3,
311 	    sctp_clog.x.misc.log4);
312 #endif
313 }
314 
315 void
316 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
317 {
318 #if defined(SCTP_LOCAL_TRACE_BUF)
319 	struct sctp_cwnd_log sctp_clog;
320 
321 	sctp_clog.x.cwnd.net = net;
322 	if (stcb->asoc.send_queue_cnt > 255)
323 		sctp_clog.x.cwnd.cnt_in_send = 255;
324 	else
325 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
326 	if (stcb->asoc.stream_queue_cnt > 255)
327 		sctp_clog.x.cwnd.cnt_in_str = 255;
328 	else
329 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
330 
331 	if (net) {
332 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
333 		sctp_clog.x.cwnd.inflight = net->flight_size;
334 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
335 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
336 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
337 	}
338 	if (SCTP_CWNDLOG_PRESEND == from) {
339 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
340 	}
341 	sctp_clog.x.cwnd.cwnd_augment = augment;
342 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
343 	    SCTP_LOG_EVENT_CWND,
344 	    from,
345 	    sctp_clog.x.misc.log1,
346 	    sctp_clog.x.misc.log2,
347 	    sctp_clog.x.misc.log3,
348 	    sctp_clog.x.misc.log4);
349 #endif
350 }
351 
352 void
353 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
354 {
355 #if defined(SCTP_LOCAL_TRACE_BUF)
356 	struct sctp_cwnd_log sctp_clog;
357 
358 	memset(&sctp_clog, 0, sizeof(sctp_clog));
359 	if (inp) {
360 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
361 
362 	} else {
363 		sctp_clog.x.lock.sock = (void *)NULL;
364 	}
365 	sctp_clog.x.lock.inp = (void *)inp;
366 	if (stcb) {
367 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
368 	} else {
369 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
370 	}
371 	if (inp) {
372 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
373 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
374 	} else {
375 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
376 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
377 	}
378 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
379 	if (inp && (inp->sctp_socket)) {
380 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
381 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
382 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
383 	} else {
384 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
385 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
386 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
387 	}
388 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
389 	    SCTP_LOG_LOCK_EVENT,
390 	    from,
391 	    sctp_clog.x.misc.log1,
392 	    sctp_clog.x.misc.log2,
393 	    sctp_clog.x.misc.log3,
394 	    sctp_clog.x.misc.log4);
395 #endif
396 }
397 
398 void
399 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
400 {
401 #if defined(SCTP_LOCAL_TRACE_BUF)
402 	struct sctp_cwnd_log sctp_clog;
403 
404 	memset(&sctp_clog, 0, sizeof(sctp_clog));
405 	sctp_clog.x.cwnd.net = net;
406 	sctp_clog.x.cwnd.cwnd_new_value = error;
407 	sctp_clog.x.cwnd.inflight = net->flight_size;
408 	sctp_clog.x.cwnd.cwnd_augment = burst;
409 	if (stcb->asoc.send_queue_cnt > 255)
410 		sctp_clog.x.cwnd.cnt_in_send = 255;
411 	else
412 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
413 	if (stcb->asoc.stream_queue_cnt > 255)
414 		sctp_clog.x.cwnd.cnt_in_str = 255;
415 	else
416 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
417 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
418 	    SCTP_LOG_EVENT_MAXBURST,
419 	    from,
420 	    sctp_clog.x.misc.log1,
421 	    sctp_clog.x.misc.log2,
422 	    sctp_clog.x.misc.log3,
423 	    sctp_clog.x.misc.log4);
424 #endif
425 }
426 
427 void
428 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
429 {
430 #if defined(SCTP_LOCAL_TRACE_BUF)
431 	struct sctp_cwnd_log sctp_clog;
432 
433 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
434 	sctp_clog.x.rwnd.send_size = snd_size;
435 	sctp_clog.x.rwnd.overhead = overhead;
436 	sctp_clog.x.rwnd.new_rwnd = 0;
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_EVENT_RWND,
439 	    from,
440 	    sctp_clog.x.misc.log1,
441 	    sctp_clog.x.misc.log2,
442 	    sctp_clog.x.misc.log3,
443 	    sctp_clog.x.misc.log4);
444 #endif
445 }
446 
447 void
448 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
449 {
450 #if defined(SCTP_LOCAL_TRACE_BUF)
451 	struct sctp_cwnd_log sctp_clog;
452 
453 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
454 	sctp_clog.x.rwnd.send_size = flight_size;
455 	sctp_clog.x.rwnd.overhead = overhead;
456 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
457 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
458 	    SCTP_LOG_EVENT_RWND,
459 	    from,
460 	    sctp_clog.x.misc.log1,
461 	    sctp_clog.x.misc.log2,
462 	    sctp_clog.x.misc.log3,
463 	    sctp_clog.x.misc.log4);
464 #endif
465 }
466 
467 #ifdef SCTP_MBCNT_LOGGING
468 static void
469 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
470 {
471 #if defined(SCTP_LOCAL_TRACE_BUF)
472 	struct sctp_cwnd_log sctp_clog;
473 
474 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
475 	sctp_clog.x.mbcnt.size_change = book;
476 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
477 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
478 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
479 	    SCTP_LOG_EVENT_MBCNT,
480 	    from,
481 	    sctp_clog.x.misc.log1,
482 	    sctp_clog.x.misc.log2,
483 	    sctp_clog.x.misc.log3,
484 	    sctp_clog.x.misc.log4);
485 #endif
486 }
487 #endif
488 
489 void
490 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
491 {
492 #if defined(SCTP_LOCAL_TRACE_BUF)
493 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
494 	    SCTP_LOG_MISC_EVENT,
495 	    from,
496 	    a, b, c, d);
497 #endif
498 }
499 
500 void
501 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
502 {
503 #if defined(SCTP_LOCAL_TRACE_BUF)
504 	struct sctp_cwnd_log sctp_clog;
505 
506 	sctp_clog.x.wake.stcb = (void *)stcb;
507 	sctp_clog.x.wake.wake_cnt = wake_cnt;
508 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
509 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
510 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
511 
512 	if (stcb->asoc.stream_queue_cnt < 0xff)
513 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
514 	else
515 		sctp_clog.x.wake.stream_qcnt = 0xff;
516 
517 	if (stcb->asoc.chunks_on_out_queue < 0xff)
518 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
519 	else
520 		sctp_clog.x.wake.chunks_on_oque = 0xff;
521 
522 	sctp_clog.x.wake.sctpflags = 0;
523 	/* set in the defered mode stuff */
524 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
525 		sctp_clog.x.wake.sctpflags |= 1;
526 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
527 		sctp_clog.x.wake.sctpflags |= 2;
528 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
529 		sctp_clog.x.wake.sctpflags |= 4;
530 	/* what about the sb */
531 	if (stcb->sctp_socket) {
532 		struct socket *so = stcb->sctp_socket;
533 
534 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
535 	} else {
536 		sctp_clog.x.wake.sbflags = 0xff;
537 	}
538 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
539 	    SCTP_LOG_EVENT_WAKE,
540 	    from,
541 	    sctp_clog.x.misc.log1,
542 	    sctp_clog.x.misc.log2,
543 	    sctp_clog.x.misc.log3,
544 	    sctp_clog.x.misc.log4);
545 #endif
546 }
547 
548 void
549 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen)
550 {
551 #if defined(SCTP_LOCAL_TRACE_BUF)
552 	struct sctp_cwnd_log sctp_clog;
553 
554 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
555 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
556 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
557 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
558 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
559 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
560 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
561 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
562 	    SCTP_LOG_EVENT_BLOCK,
563 	    from,
564 	    sctp_clog.x.misc.log1,
565 	    sctp_clog.x.misc.log2,
566 	    sctp_clog.x.misc.log3,
567 	    sctp_clog.x.misc.log4);
568 #endif
569 }
570 
571 int
572 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
573 {
574 	/* May need to fix this if ktrdump does not work */
575 	return (0);
576 }
577 
578 #ifdef SCTP_AUDITING_ENABLED
579 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
580 static int sctp_audit_indx = 0;
581 
582 static
583 void
584 sctp_print_audit_report(void)
585 {
586 	int i;
587 	int cnt;
588 
589 	cnt = 0;
590 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
591 		if ((sctp_audit_data[i][0] == 0xe0) &&
592 		    (sctp_audit_data[i][1] == 0x01)) {
593 			cnt = 0;
594 			SCTP_PRINTF("\n");
595 		} else if (sctp_audit_data[i][0] == 0xf0) {
596 			cnt = 0;
597 			SCTP_PRINTF("\n");
598 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
599 		    (sctp_audit_data[i][1] == 0x01)) {
600 			SCTP_PRINTF("\n");
601 			cnt = 0;
602 		}
603 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
604 		    (uint32_t)sctp_audit_data[i][1]);
605 		cnt++;
606 		if ((cnt % 14) == 0)
607 			SCTP_PRINTF("\n");
608 	}
609 	for (i = 0; i < sctp_audit_indx; i++) {
610 		if ((sctp_audit_data[i][0] == 0xe0) &&
611 		    (sctp_audit_data[i][1] == 0x01)) {
612 			cnt = 0;
613 			SCTP_PRINTF("\n");
614 		} else if (sctp_audit_data[i][0] == 0xf0) {
615 			cnt = 0;
616 			SCTP_PRINTF("\n");
617 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
618 		    (sctp_audit_data[i][1] == 0x01)) {
619 			SCTP_PRINTF("\n");
620 			cnt = 0;
621 		}
622 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
623 		    (uint32_t)sctp_audit_data[i][1]);
624 		cnt++;
625 		if ((cnt % 14) == 0)
626 			SCTP_PRINTF("\n");
627 	}
628 	SCTP_PRINTF("\n");
629 }
630 
631 void
632 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
633     struct sctp_nets *net)
634 {
635 	int resend_cnt, tot_out, rep, tot_book_cnt;
636 	struct sctp_nets *lnet;
637 	struct sctp_tmit_chunk *chk;
638 
639 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
640 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
641 	sctp_audit_indx++;
642 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
643 		sctp_audit_indx = 0;
644 	}
645 	if (inp == NULL) {
646 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
647 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
648 		sctp_audit_indx++;
649 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
650 			sctp_audit_indx = 0;
651 		}
652 		return;
653 	}
654 	if (stcb == NULL) {
655 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
656 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
657 		sctp_audit_indx++;
658 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
659 			sctp_audit_indx = 0;
660 		}
661 		return;
662 	}
663 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
664 	sctp_audit_data[sctp_audit_indx][1] =
665 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
666 	sctp_audit_indx++;
667 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
668 		sctp_audit_indx = 0;
669 	}
670 	rep = 0;
671 	tot_book_cnt = 0;
672 	resend_cnt = tot_out = 0;
673 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
674 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
675 			resend_cnt++;
676 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
677 			tot_out += chk->book_size;
678 			tot_book_cnt++;
679 		}
680 	}
681 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
682 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
683 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
684 		sctp_audit_indx++;
685 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
686 			sctp_audit_indx = 0;
687 		}
688 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
689 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
690 		rep = 1;
691 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
692 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
693 		sctp_audit_data[sctp_audit_indx][1] =
694 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
695 		sctp_audit_indx++;
696 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
697 			sctp_audit_indx = 0;
698 		}
699 	}
700 	if (tot_out != stcb->asoc.total_flight) {
701 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
702 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
703 		sctp_audit_indx++;
704 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
705 			sctp_audit_indx = 0;
706 		}
707 		rep = 1;
708 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
709 		    (int)stcb->asoc.total_flight);
710 		stcb->asoc.total_flight = tot_out;
711 	}
712 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
713 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
714 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
715 		sctp_audit_indx++;
716 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
717 			sctp_audit_indx = 0;
718 		}
719 		rep = 1;
720 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
721 
722 		stcb->asoc.total_flight_count = tot_book_cnt;
723 	}
724 	tot_out = 0;
725 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
726 		tot_out += lnet->flight_size;
727 	}
728 	if (tot_out != stcb->asoc.total_flight) {
729 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
730 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
731 		sctp_audit_indx++;
732 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
733 			sctp_audit_indx = 0;
734 		}
735 		rep = 1;
736 		SCTP_PRINTF("real flight:%d net total was %d\n",
737 		    stcb->asoc.total_flight, tot_out);
738 		/* now corrective action */
739 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
740 
741 			tot_out = 0;
742 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
743 				if ((chk->whoTo == lnet) &&
744 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
745 					tot_out += chk->book_size;
746 				}
747 			}
748 			if (lnet->flight_size != tot_out) {
749 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
750 				    (void *)lnet, lnet->flight_size,
751 				    tot_out);
752 				lnet->flight_size = tot_out;
753 			}
754 		}
755 	}
756 	if (rep) {
757 		sctp_print_audit_report();
758 	}
759 }
760 
761 void
762 sctp_audit_log(uint8_t ev, uint8_t fd)
763 {
764 
765 	sctp_audit_data[sctp_audit_indx][0] = ev;
766 	sctp_audit_data[sctp_audit_indx][1] = fd;
767 	sctp_audit_indx++;
768 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
769 		sctp_audit_indx = 0;
770 	}
771 }
772 
773 #endif
774 
775 /*
776  * sctp_stop_timers_for_shutdown() should be called
777  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
778  * state to make sure that all timers are stopped.
779  */
780 void
781 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
782 {
783 	struct sctp_inpcb *inp;
784 	struct sctp_nets *net;
785 
786 	inp = stcb->sctp_ep;
787 
788 	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
789 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_12);
790 	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
791 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_13);
792 	sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
793 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_14);
794 	sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
795 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_15);
796 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
797 		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
798 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_16);
799 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
800 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_17);
801 	}
802 }
803 
804 void
805 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer)
806 {
807 	struct sctp_inpcb *inp;
808 	struct sctp_nets *net;
809 
810 	inp = stcb->sctp_ep;
811 	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
812 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_18);
813 	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
814 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_19);
815 	if (stop_assoc_kill_timer) {
816 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
817 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_20);
818 	}
819 	sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
820 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_21);
821 	sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
822 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_22);
823 	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL,
824 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_23);
825 	/* Mobility adaptation */
826 	sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL,
827 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_24);
828 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
829 		sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
830 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_25);
831 		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net,
832 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_26);
833 		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net,
834 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_27);
835 		sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net,
836 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_28);
837 		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net,
838 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_29);
839 		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
840 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_30);
841 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
842 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_31);
843 	}
844 }
845 
846 /*
847  * A list of sizes based on typical mtu's, used only if next hop size not
848  * returned. These values MUST be multiples of 4 and MUST be ordered.
849  */
850 static uint32_t sctp_mtu_sizes[] = {
851 	68,
852 	296,
853 	508,
854 	512,
855 	544,
856 	576,
857 	1004,
858 	1492,
859 	1500,
860 	1536,
861 	2000,
862 	2048,
863 	4352,
864 	4464,
865 	8166,
866 	17912,
867 	32000,
868 	65532
869 };
870 
871 /*
872  * Return the largest MTU in sctp_mtu_sizes smaller than val.
873  * If val is smaller than the minimum, just return the largest
874  * multiple of 4 smaller or equal to val.
875  * Ensure that the result is a multiple of 4.
876  */
877 uint32_t
878 sctp_get_prev_mtu(uint32_t val)
879 {
880 	uint32_t i;
881 
882 	val &= 0xfffffffc;
883 	if (val <= sctp_mtu_sizes[0]) {
884 		return (val);
885 	}
886 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
887 		if (val <= sctp_mtu_sizes[i]) {
888 			break;
889 		}
890 	}
891 	KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0,
892 	    ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1));
893 	return (sctp_mtu_sizes[i - 1]);
894 }
895 
896 /*
897  * Return the smallest MTU in sctp_mtu_sizes larger than val.
898  * If val is larger than the maximum, just return the largest multiple of 4 smaller
899  * or equal to val.
900  * Ensure that the result is a multiple of 4.
901  */
902 uint32_t
903 sctp_get_next_mtu(uint32_t val)
904 {
905 	/* select another MTU that is just bigger than this one */
906 	uint32_t i;
907 
908 	val &= 0xfffffffc;
909 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
910 		if (val < sctp_mtu_sizes[i]) {
911 			KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0,
912 			    ("sctp_mtu_sizes[%u] not a multiple of 4", i));
913 			return (sctp_mtu_sizes[i]);
914 		}
915 	}
916 	return (val);
917 }
918 
919 void
920 sctp_fill_random_store(struct sctp_pcb *m)
921 {
922 	/*
923 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
924 	 * our counter. The result becomes our good random numbers and we
925 	 * then setup to give these out. Note that we do no locking to
926 	 * protect this. This is ok, since if competing folks call this we
927 	 * will get more gobbled gook in the random store which is what we
928 	 * want. There is a danger that two guys will use the same random
929 	 * numbers, but thats ok too since that is random as well :->
930 	 */
931 	m->store_at = 0;
932 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
933 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
934 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
935 	m->random_counter++;
936 }
937 
938 uint32_t
939 sctp_select_initial_TSN(struct sctp_pcb *inp)
940 {
941 	/*
942 	 * A true implementation should use random selection process to get
943 	 * the initial stream sequence number, using RFC1750 as a good
944 	 * guideline
945 	 */
946 	uint32_t x, *xp;
947 	uint8_t *p;
948 	int store_at, new_store;
949 
950 	if (inp->initial_sequence_debug != 0) {
951 		uint32_t ret;
952 
953 		ret = inp->initial_sequence_debug;
954 		inp->initial_sequence_debug++;
955 		return (ret);
956 	}
957 retry:
958 	store_at = inp->store_at;
959 	new_store = store_at + sizeof(uint32_t);
960 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
961 		new_store = 0;
962 	}
963 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
964 		goto retry;
965 	}
966 	if (new_store == 0) {
967 		/* Refill the random store */
968 		sctp_fill_random_store(inp);
969 	}
970 	p = &inp->random_store[store_at];
971 	xp = (uint32_t *)p;
972 	x = *xp;
973 	return (x);
974 }
975 
976 uint32_t
977 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
978 {
979 	uint32_t x;
980 	struct timeval now;
981 
982 	if (check) {
983 		(void)SCTP_GETTIME_TIMEVAL(&now);
984 	}
985 	for (;;) {
986 		x = sctp_select_initial_TSN(&inp->sctp_ep);
987 		if (x == 0) {
988 			/* we never use 0 */
989 			continue;
990 		}
991 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
992 			break;
993 		}
994 	}
995 	return (x);
996 }
997 
998 int32_t
999 sctp_map_assoc_state(int kernel_state)
1000 {
1001 	int32_t user_state;
1002 
1003 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
1004 		user_state = SCTP_CLOSED;
1005 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
1006 		user_state = SCTP_SHUTDOWN_PENDING;
1007 	} else {
1008 		switch (kernel_state & SCTP_STATE_MASK) {
1009 		case SCTP_STATE_EMPTY:
1010 			user_state = SCTP_CLOSED;
1011 			break;
1012 		case SCTP_STATE_INUSE:
1013 			user_state = SCTP_CLOSED;
1014 			break;
1015 		case SCTP_STATE_COOKIE_WAIT:
1016 			user_state = SCTP_COOKIE_WAIT;
1017 			break;
1018 		case SCTP_STATE_COOKIE_ECHOED:
1019 			user_state = SCTP_COOKIE_ECHOED;
1020 			break;
1021 		case SCTP_STATE_OPEN:
1022 			user_state = SCTP_ESTABLISHED;
1023 			break;
1024 		case SCTP_STATE_SHUTDOWN_SENT:
1025 			user_state = SCTP_SHUTDOWN_SENT;
1026 			break;
1027 		case SCTP_STATE_SHUTDOWN_RECEIVED:
1028 			user_state = SCTP_SHUTDOWN_RECEIVED;
1029 			break;
1030 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
1031 			user_state = SCTP_SHUTDOWN_ACK_SENT;
1032 			break;
1033 		default:
1034 			user_state = SCTP_CLOSED;
1035 			break;
1036 		}
1037 	}
1038 	return (user_state);
1039 }
1040 
1041 int
1042 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1043     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
1044 {
1045 	struct sctp_association *asoc;
1046 
1047 	/*
1048 	 * Anything set to zero is taken care of by the allocation routine's
1049 	 * bzero
1050 	 */
1051 
1052 	/*
1053 	 * Up front select what scoping to apply on addresses I tell my peer
1054 	 * Not sure what to do with these right now, we will need to come up
1055 	 * with a way to set them. We may need to pass them through from the
1056 	 * caller in the sctp_aloc_assoc() function.
1057 	 */
1058 	int i;
1059 #if defined(SCTP_DETAILED_STR_STATS)
1060 	int j;
1061 #endif
1062 
1063 	asoc = &stcb->asoc;
1064 	/* init all variables to a known value. */
1065 	SCTP_SET_STATE(stcb, SCTP_STATE_INUSE);
1066 	asoc->max_burst = inp->sctp_ep.max_burst;
1067 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
1068 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
1069 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
1070 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
1071 	asoc->ecn_supported = inp->ecn_supported;
1072 	asoc->prsctp_supported = inp->prsctp_supported;
1073 	asoc->idata_supported = inp->idata_supported;
1074 	asoc->auth_supported = inp->auth_supported;
1075 	asoc->asconf_supported = inp->asconf_supported;
1076 	asoc->reconfig_supported = inp->reconfig_supported;
1077 	asoc->nrsack_supported = inp->nrsack_supported;
1078 	asoc->pktdrop_supported = inp->pktdrop_supported;
1079 	asoc->idata_supported = inp->idata_supported;
1080 	asoc->sctp_cmt_pf = (uint8_t)0;
1081 	asoc->sctp_frag_point = inp->sctp_frag_point;
1082 	asoc->sctp_features = inp->sctp_features;
1083 	asoc->default_dscp = inp->sctp_ep.default_dscp;
1084 	asoc->max_cwnd = inp->max_cwnd;
1085 #ifdef INET6
1086 	if (inp->sctp_ep.default_flowlabel) {
1087 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
1088 	} else {
1089 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
1090 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
1091 			asoc->default_flowlabel &= 0x000fffff;
1092 			asoc->default_flowlabel |= 0x80000000;
1093 		} else {
1094 			asoc->default_flowlabel = 0;
1095 		}
1096 	}
1097 #endif
1098 	asoc->sb_send_resv = 0;
1099 	if (override_tag) {
1100 		asoc->my_vtag = override_tag;
1101 	} else {
1102 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1103 	}
1104 	/* Get the nonce tags */
1105 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1106 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1107 	asoc->vrf_id = vrf_id;
1108 
1109 #ifdef SCTP_ASOCLOG_OF_TSNS
1110 	asoc->tsn_in_at = 0;
1111 	asoc->tsn_out_at = 0;
1112 	asoc->tsn_in_wrapped = 0;
1113 	asoc->tsn_out_wrapped = 0;
1114 	asoc->cumack_log_at = 0;
1115 	asoc->cumack_log_atsnt = 0;
1116 #endif
1117 #ifdef SCTP_FS_SPEC_LOG
1118 	asoc->fs_index = 0;
1119 #endif
1120 	asoc->refcnt = 0;
1121 	asoc->assoc_up_sent = 0;
1122 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1123 	    sctp_select_initial_TSN(&inp->sctp_ep);
1124 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1125 	/* we are optimisitic here */
1126 	asoc->peer_supports_nat = 0;
1127 	asoc->sent_queue_retran_cnt = 0;
1128 
1129 	/* for CMT */
1130 	asoc->last_net_cmt_send_started = NULL;
1131 
1132 	/* This will need to be adjusted */
1133 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1134 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1135 	asoc->asconf_seq_in = asoc->last_acked_seq;
1136 
1137 	/* here we are different, we hold the next one we expect */
1138 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1139 
1140 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1141 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1142 
1143 	asoc->default_mtu = inp->sctp_ep.default_mtu;
1144 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1145 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1146 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1147 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1148 	asoc->free_chunk_cnt = 0;
1149 
1150 	asoc->iam_blocking = 0;
1151 	asoc->context = inp->sctp_context;
1152 	asoc->local_strreset_support = inp->local_strreset_support;
1153 	asoc->def_send = inp->def_send;
1154 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1155 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1156 	asoc->pr_sctp_cnt = 0;
1157 	asoc->total_output_queue_size = 0;
1158 
1159 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1160 		asoc->scope.ipv6_addr_legal = 1;
1161 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1162 			asoc->scope.ipv4_addr_legal = 1;
1163 		} else {
1164 			asoc->scope.ipv4_addr_legal = 0;
1165 		}
1166 	} else {
1167 		asoc->scope.ipv6_addr_legal = 0;
1168 		asoc->scope.ipv4_addr_legal = 1;
1169 	}
1170 
1171 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1172 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1173 
1174 	asoc->smallest_mtu = inp->sctp_frag_point;
1175 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1176 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1177 
1178 	asoc->stream_locked_on = 0;
1179 	asoc->ecn_echo_cnt_onq = 0;
1180 	asoc->stream_locked = 0;
1181 
1182 	asoc->send_sack = 1;
1183 
1184 	LIST_INIT(&asoc->sctp_restricted_addrs);
1185 
1186 	TAILQ_INIT(&asoc->nets);
1187 	TAILQ_INIT(&asoc->pending_reply_queue);
1188 	TAILQ_INIT(&asoc->asconf_ack_sent);
1189 	/* Setup to fill the hb random cache at first HB */
1190 	asoc->hb_random_idx = 4;
1191 
1192 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1193 
1194 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1195 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1196 
1197 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1198 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1199 
1200 	/*
1201 	 * Now the stream parameters, here we allocate space for all streams
1202 	 * that we request by default.
1203 	 */
1204 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1205 	    o_strms;
1206 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1207 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1208 	    SCTP_M_STRMO);
1209 	if (asoc->strmout == NULL) {
1210 		/* big trouble no memory */
1211 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1212 		return (ENOMEM);
1213 	}
1214 	for (i = 0; i < asoc->streamoutcnt; i++) {
1215 		/*
1216 		 * inbound side must be set to 0xffff, also NOTE when we get
1217 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1218 		 * count (streamoutcnt) but first check if we sent to any of
1219 		 * the upper streams that were dropped (if some were). Those
1220 		 * that were dropped must be notified to the upper layer as
1221 		 * failed to send.
1222 		 */
1223 		asoc->strmout[i].next_mid_ordered = 0;
1224 		asoc->strmout[i].next_mid_unordered = 0;
1225 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1226 		asoc->strmout[i].chunks_on_queues = 0;
1227 #if defined(SCTP_DETAILED_STR_STATS)
1228 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1229 			asoc->strmout[i].abandoned_sent[j] = 0;
1230 			asoc->strmout[i].abandoned_unsent[j] = 0;
1231 		}
1232 #else
1233 		asoc->strmout[i].abandoned_sent[0] = 0;
1234 		asoc->strmout[i].abandoned_unsent[0] = 0;
1235 #endif
1236 		asoc->strmout[i].sid = i;
1237 		asoc->strmout[i].last_msg_incomplete = 0;
1238 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1239 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1240 	}
1241 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1242 
1243 	/* Now the mapping array */
1244 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1245 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1246 	    SCTP_M_MAP);
1247 	if (asoc->mapping_array == NULL) {
1248 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1249 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1250 		return (ENOMEM);
1251 	}
1252 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1253 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1254 	    SCTP_M_MAP);
1255 	if (asoc->nr_mapping_array == NULL) {
1256 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1257 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1258 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1259 		return (ENOMEM);
1260 	}
1261 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1262 
1263 	/* Now the init of the other outqueues */
1264 	TAILQ_INIT(&asoc->free_chunks);
1265 	TAILQ_INIT(&asoc->control_send_queue);
1266 	TAILQ_INIT(&asoc->asconf_send_queue);
1267 	TAILQ_INIT(&asoc->send_queue);
1268 	TAILQ_INIT(&asoc->sent_queue);
1269 	TAILQ_INIT(&asoc->resetHead);
1270 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1271 	TAILQ_INIT(&asoc->asconf_queue);
1272 	/* authentication fields */
1273 	asoc->authinfo.random = NULL;
1274 	asoc->authinfo.active_keyid = 0;
1275 	asoc->authinfo.assoc_key = NULL;
1276 	asoc->authinfo.assoc_keyid = 0;
1277 	asoc->authinfo.recv_key = NULL;
1278 	asoc->authinfo.recv_keyid = 0;
1279 	LIST_INIT(&asoc->shared_keys);
1280 	asoc->marked_retrans = 0;
1281 	asoc->port = inp->sctp_ep.port;
1282 	asoc->timoinit = 0;
1283 	asoc->timodata = 0;
1284 	asoc->timosack = 0;
1285 	asoc->timoshutdown = 0;
1286 	asoc->timoheartbeat = 0;
1287 	asoc->timocookie = 0;
1288 	asoc->timoshutdownack = 0;
1289 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1290 	asoc->discontinuity_time = asoc->start_time;
1291 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1292 		asoc->abandoned_unsent[i] = 0;
1293 		asoc->abandoned_sent[i] = 0;
1294 	}
1295 	/*
1296 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1297 	 * freed later when the association is freed.
1298 	 */
1299 	return (0);
1300 }
1301 
1302 void
1303 sctp_print_mapping_array(struct sctp_association *asoc)
1304 {
1305 	unsigned int i, limit;
1306 
1307 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1308 	    asoc->mapping_array_size,
1309 	    asoc->mapping_array_base_tsn,
1310 	    asoc->cumulative_tsn,
1311 	    asoc->highest_tsn_inside_map,
1312 	    asoc->highest_tsn_inside_nr_map);
1313 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1314 		if (asoc->mapping_array[limit - 1] != 0) {
1315 			break;
1316 		}
1317 	}
1318 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1319 	for (i = 0; i < limit; i++) {
1320 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1321 	}
1322 	if (limit % 16)
1323 		SCTP_PRINTF("\n");
1324 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1325 		if (asoc->nr_mapping_array[limit - 1]) {
1326 			break;
1327 		}
1328 	}
1329 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1330 	for (i = 0; i < limit; i++) {
1331 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1332 	}
1333 	if (limit % 16)
1334 		SCTP_PRINTF("\n");
1335 }
1336 
1337 int
1338 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1339 {
1340 	/* mapping array needs to grow */
1341 	uint8_t *new_array1, *new_array2;
1342 	uint32_t new_size;
1343 
1344 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1345 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1346 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1347 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1348 		/* can't get more, forget it */
1349 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1350 		if (new_array1) {
1351 			SCTP_FREE(new_array1, SCTP_M_MAP);
1352 		}
1353 		if (new_array2) {
1354 			SCTP_FREE(new_array2, SCTP_M_MAP);
1355 		}
1356 		return (-1);
1357 	}
1358 	memset(new_array1, 0, new_size);
1359 	memset(new_array2, 0, new_size);
1360 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1361 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1362 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1363 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1364 	asoc->mapping_array = new_array1;
1365 	asoc->nr_mapping_array = new_array2;
1366 	asoc->mapping_array_size = new_size;
1367 	return (0);
1368 }
1369 
1370 
1371 static void
1372 sctp_iterator_work(struct sctp_iterator *it)
1373 {
1374 	struct epoch_tracker et;
1375 	struct sctp_inpcb *tinp;
1376 	int iteration_count = 0;
1377 	int inp_skip = 0;
1378 	int first_in = 1;
1379 
1380 	NET_EPOCH_ENTER(et);
1381 	SCTP_INP_INFO_RLOCK();
1382 	SCTP_ITERATOR_LOCK();
1383 	sctp_it_ctl.cur_it = it;
1384 	if (it->inp) {
1385 		SCTP_INP_RLOCK(it->inp);
1386 		SCTP_INP_DECR_REF(it->inp);
1387 	}
1388 	if (it->inp == NULL) {
1389 		/* iterator is complete */
1390 done_with_iterator:
1391 		sctp_it_ctl.cur_it = NULL;
1392 		SCTP_ITERATOR_UNLOCK();
1393 		SCTP_INP_INFO_RUNLOCK();
1394 		if (it->function_atend != NULL) {
1395 			(*it->function_atend) (it->pointer, it->val);
1396 		}
1397 		SCTP_FREE(it, SCTP_M_ITER);
1398 		NET_EPOCH_EXIT(et);
1399 		return;
1400 	}
1401 select_a_new_ep:
1402 	if (first_in) {
1403 		first_in = 0;
1404 	} else {
1405 		SCTP_INP_RLOCK(it->inp);
1406 	}
1407 	while (((it->pcb_flags) &&
1408 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1409 	    ((it->pcb_features) &&
1410 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1411 		/* endpoint flags or features don't match, so keep looking */
1412 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1413 			SCTP_INP_RUNLOCK(it->inp);
1414 			goto done_with_iterator;
1415 		}
1416 		tinp = it->inp;
1417 		it->inp = LIST_NEXT(it->inp, sctp_list);
1418 		SCTP_INP_RUNLOCK(tinp);
1419 		if (it->inp == NULL) {
1420 			goto done_with_iterator;
1421 		}
1422 		SCTP_INP_RLOCK(it->inp);
1423 	}
1424 	/* now go through each assoc which is in the desired state */
1425 	if (it->done_current_ep == 0) {
1426 		if (it->function_inp != NULL)
1427 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1428 		it->done_current_ep = 1;
1429 	}
1430 	if (it->stcb == NULL) {
1431 		/* run the per instance function */
1432 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1433 	}
1434 	if ((inp_skip) || it->stcb == NULL) {
1435 		if (it->function_inp_end != NULL) {
1436 			inp_skip = (*it->function_inp_end) (it->inp,
1437 			    it->pointer,
1438 			    it->val);
1439 		}
1440 		SCTP_INP_RUNLOCK(it->inp);
1441 		goto no_stcb;
1442 	}
1443 	while (it->stcb) {
1444 		SCTP_TCB_LOCK(it->stcb);
1445 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1446 			/* not in the right state... keep looking */
1447 			SCTP_TCB_UNLOCK(it->stcb);
1448 			goto next_assoc;
1449 		}
1450 		/* see if we have limited out the iterator loop */
1451 		iteration_count++;
1452 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1453 			/* Pause to let others grab the lock */
1454 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1455 			SCTP_TCB_UNLOCK(it->stcb);
1456 			SCTP_INP_INCR_REF(it->inp);
1457 			SCTP_INP_RUNLOCK(it->inp);
1458 			SCTP_ITERATOR_UNLOCK();
1459 			SCTP_INP_INFO_RUNLOCK();
1460 			SCTP_INP_INFO_RLOCK();
1461 			SCTP_ITERATOR_LOCK();
1462 			if (sctp_it_ctl.iterator_flags) {
1463 				/* We won't be staying here */
1464 				SCTP_INP_DECR_REF(it->inp);
1465 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1466 				if (sctp_it_ctl.iterator_flags &
1467 				    SCTP_ITERATOR_STOP_CUR_IT) {
1468 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1469 					goto done_with_iterator;
1470 				}
1471 				if (sctp_it_ctl.iterator_flags &
1472 				    SCTP_ITERATOR_STOP_CUR_INP) {
1473 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1474 					goto no_stcb;
1475 				}
1476 				/* If we reach here huh? */
1477 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1478 				    sctp_it_ctl.iterator_flags);
1479 				sctp_it_ctl.iterator_flags = 0;
1480 			}
1481 			SCTP_INP_RLOCK(it->inp);
1482 			SCTP_INP_DECR_REF(it->inp);
1483 			SCTP_TCB_LOCK(it->stcb);
1484 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1485 			iteration_count = 0;
1486 		}
1487 
1488 		/* run function on this one */
1489 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1490 
1491 		/*
1492 		 * we lie here, it really needs to have its own type but
1493 		 * first I must verify that this won't effect things :-0
1494 		 */
1495 		if (it->no_chunk_output == 0)
1496 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1497 
1498 		SCTP_TCB_UNLOCK(it->stcb);
1499 next_assoc:
1500 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1501 		if (it->stcb == NULL) {
1502 			/* Run last function */
1503 			if (it->function_inp_end != NULL) {
1504 				inp_skip = (*it->function_inp_end) (it->inp,
1505 				    it->pointer,
1506 				    it->val);
1507 			}
1508 		}
1509 	}
1510 	SCTP_INP_RUNLOCK(it->inp);
1511 no_stcb:
1512 	/* done with all assocs on this endpoint, move on to next endpoint */
1513 	it->done_current_ep = 0;
1514 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1515 		it->inp = NULL;
1516 	} else {
1517 		it->inp = LIST_NEXT(it->inp, sctp_list);
1518 	}
1519 	if (it->inp == NULL) {
1520 		goto done_with_iterator;
1521 	}
1522 	goto select_a_new_ep;
1523 }
1524 
1525 void
1526 sctp_iterator_worker(void)
1527 {
1528 	struct sctp_iterator *it;
1529 
1530 	/* This function is called with the WQ lock in place */
1531 	sctp_it_ctl.iterator_running = 1;
1532 	while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) {
1533 		/* now lets work on this one */
1534 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1535 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1536 		CURVNET_SET(it->vn);
1537 		sctp_iterator_work(it);
1538 		CURVNET_RESTORE();
1539 		SCTP_IPI_ITERATOR_WQ_LOCK();
1540 		/* sa_ignore FREED_MEMORY */
1541 	}
1542 	sctp_it_ctl.iterator_running = 0;
1543 	return;
1544 }
1545 
1546 
1547 static void
1548 sctp_handle_addr_wq(void)
1549 {
1550 	/* deal with the ADDR wq from the rtsock calls */
1551 	struct sctp_laddr *wi, *nwi;
1552 	struct sctp_asconf_iterator *asc;
1553 
1554 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1555 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1556 	if (asc == NULL) {
1557 		/* Try later, no memory */
1558 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1559 		    (struct sctp_inpcb *)NULL,
1560 		    (struct sctp_tcb *)NULL,
1561 		    (struct sctp_nets *)NULL);
1562 		return;
1563 	}
1564 	LIST_INIT(&asc->list_of_work);
1565 	asc->cnt = 0;
1566 
1567 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1568 		LIST_REMOVE(wi, sctp_nxt_addr);
1569 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1570 		asc->cnt++;
1571 	}
1572 
1573 	if (asc->cnt == 0) {
1574 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1575 	} else {
1576 		int ret;
1577 
1578 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1579 		    sctp_asconf_iterator_stcb,
1580 		    NULL,	/* No ep end for boundall */
1581 		    SCTP_PCB_FLAGS_BOUNDALL,
1582 		    SCTP_PCB_ANY_FEATURES,
1583 		    SCTP_ASOC_ANY_STATE,
1584 		    (void *)asc, 0,
1585 		    sctp_asconf_iterator_end, NULL, 0);
1586 		if (ret) {
1587 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1588 			/*
1589 			 * Freeing if we are stopping or put back on the
1590 			 * addr_wq.
1591 			 */
1592 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1593 				sctp_asconf_iterator_end(asc, 0);
1594 			} else {
1595 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1596 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1597 				}
1598 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1599 			}
1600 		}
1601 	}
1602 }
1603 
1604 void
1605 sctp_timeout_handler(void *t)
1606 {
1607 	struct epoch_tracker et;
1608 	struct sctp_inpcb *inp;
1609 	struct sctp_tcb *stcb;
1610 	struct sctp_nets *net;
1611 	struct sctp_timer *tmr;
1612 	struct mbuf *op_err;
1613 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1614 	struct socket *so;
1615 #endif
1616 	int did_output;
1617 	int type;
1618 
1619 	tmr = (struct sctp_timer *)t;
1620 	inp = (struct sctp_inpcb *)tmr->ep;
1621 	stcb = (struct sctp_tcb *)tmr->tcb;
1622 	net = (struct sctp_nets *)tmr->net;
1623 	CURVNET_SET((struct vnet *)tmr->vnet);
1624 	did_output = 1;
1625 
1626 #ifdef SCTP_AUDITING_ENABLED
1627 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1628 	sctp_auditing(3, inp, stcb, net);
1629 #endif
1630 
1631 	/* sanity checks... */
1632 	if (tmr->self != (void *)tmr) {
1633 		/*
1634 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1635 		 * (void *)tmr);
1636 		 */
1637 		CURVNET_RESTORE();
1638 		return;
1639 	}
1640 	tmr->stopped_from = 0xa001;
1641 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1642 		/*
1643 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1644 		 * tmr->type);
1645 		 */
1646 		CURVNET_RESTORE();
1647 		return;
1648 	}
1649 	tmr->stopped_from = 0xa002;
1650 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1651 		CURVNET_RESTORE();
1652 		return;
1653 	}
1654 	/* if this is an iterator timeout, get the struct and clear inp */
1655 	tmr->stopped_from = 0xa003;
1656 	if (inp) {
1657 		SCTP_INP_INCR_REF(inp);
1658 		if ((inp->sctp_socket == NULL) &&
1659 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1660 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1661 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1662 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1663 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1664 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1665 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1666 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1667 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))) {
1668 			SCTP_INP_DECR_REF(inp);
1669 			CURVNET_RESTORE();
1670 			return;
1671 		}
1672 	}
1673 	tmr->stopped_from = 0xa004;
1674 	if (stcb) {
1675 		atomic_add_int(&stcb->asoc.refcnt, 1);
1676 		if (stcb->asoc.state == 0) {
1677 			atomic_add_int(&stcb->asoc.refcnt, -1);
1678 			if (inp) {
1679 				SCTP_INP_DECR_REF(inp);
1680 			}
1681 			CURVNET_RESTORE();
1682 			return;
1683 		}
1684 	}
1685 	type = tmr->type;
1686 	tmr->stopped_from = 0xa005;
1687 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1688 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1689 		if (inp) {
1690 			SCTP_INP_DECR_REF(inp);
1691 		}
1692 		if (stcb) {
1693 			atomic_add_int(&stcb->asoc.refcnt, -1);
1694 		}
1695 		CURVNET_RESTORE();
1696 		return;
1697 	}
1698 	tmr->stopped_from = 0xa006;
1699 
1700 	if (stcb) {
1701 		SCTP_TCB_LOCK(stcb);
1702 		atomic_add_int(&stcb->asoc.refcnt, -1);
1703 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1704 		    ((stcb->asoc.state == 0) ||
1705 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1706 			SCTP_TCB_UNLOCK(stcb);
1707 			if (inp) {
1708 				SCTP_INP_DECR_REF(inp);
1709 			}
1710 			CURVNET_RESTORE();
1711 			return;
1712 		}
1713 	} else if (inp != NULL) {
1714 		if (type != SCTP_TIMER_TYPE_INPKILL) {
1715 			SCTP_INP_WLOCK(inp);
1716 		}
1717 	} else {
1718 		SCTP_WQ_ADDR_LOCK();
1719 	}
1720 	/* record in stopped what t-o occurred */
1721 	tmr->stopped_from = type;
1722 
1723 	NET_EPOCH_ENTER(et);
1724 	/* mark as being serviced now */
1725 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1726 		/*
1727 		 * Callout has been rescheduled.
1728 		 */
1729 		goto get_out;
1730 	}
1731 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1732 		/*
1733 		 * Not active, so no action.
1734 		 */
1735 		goto get_out;
1736 	}
1737 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1738 
1739 	/* call the handler for the appropriate timer type */
1740 	switch (type) {
1741 	case SCTP_TIMER_TYPE_ADDR_WQ:
1742 		sctp_handle_addr_wq();
1743 		break;
1744 	case SCTP_TIMER_TYPE_SEND:
1745 		if ((stcb == NULL) || (inp == NULL)) {
1746 			break;
1747 		}
1748 		SCTP_STAT_INCR(sctps_timodata);
1749 		stcb->asoc.timodata++;
1750 		stcb->asoc.num_send_timers_up--;
1751 		if (stcb->asoc.num_send_timers_up < 0) {
1752 			stcb->asoc.num_send_timers_up = 0;
1753 		}
1754 		SCTP_TCB_LOCK_ASSERT(stcb);
1755 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1756 			/* no need to unlock on tcb its gone */
1757 
1758 			goto out_decr;
1759 		}
1760 		SCTP_TCB_LOCK_ASSERT(stcb);
1761 #ifdef SCTP_AUDITING_ENABLED
1762 		sctp_auditing(4, inp, stcb, net);
1763 #endif
1764 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1765 		if ((stcb->asoc.num_send_timers_up == 0) &&
1766 		    (stcb->asoc.sent_queue_cnt > 0)) {
1767 			struct sctp_tmit_chunk *chk;
1768 
1769 			/*
1770 			 * safeguard. If there on some on the sent queue
1771 			 * somewhere but no timers running something is
1772 			 * wrong... so we start a timer on the first chunk
1773 			 * on the send queue on whatever net it is sent to.
1774 			 */
1775 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1776 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1777 			    chk->whoTo);
1778 		}
1779 		break;
1780 	case SCTP_TIMER_TYPE_INIT:
1781 		if ((stcb == NULL) || (inp == NULL)) {
1782 			break;
1783 		}
1784 		SCTP_STAT_INCR(sctps_timoinit);
1785 		stcb->asoc.timoinit++;
1786 		if (sctp_t1init_timer(inp, stcb, net)) {
1787 			/* no need to unlock on tcb its gone */
1788 			goto out_decr;
1789 		}
1790 		/* We do output but not here */
1791 		did_output = 0;
1792 		break;
1793 	case SCTP_TIMER_TYPE_RECV:
1794 		if ((stcb == NULL) || (inp == NULL)) {
1795 			break;
1796 		}
1797 		SCTP_STAT_INCR(sctps_timosack);
1798 		stcb->asoc.timosack++;
1799 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1800 #ifdef SCTP_AUDITING_ENABLED
1801 		sctp_auditing(4, inp, stcb, net);
1802 #endif
1803 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1804 		break;
1805 	case SCTP_TIMER_TYPE_SHUTDOWN:
1806 		if ((stcb == NULL) || (inp == NULL)) {
1807 			break;
1808 		}
1809 		if (sctp_shutdown_timer(inp, stcb, net)) {
1810 			/* no need to unlock on tcb its gone */
1811 			goto out_decr;
1812 		}
1813 		SCTP_STAT_INCR(sctps_timoshutdown);
1814 		stcb->asoc.timoshutdown++;
1815 #ifdef SCTP_AUDITING_ENABLED
1816 		sctp_auditing(4, inp, stcb, net);
1817 #endif
1818 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1819 		break;
1820 	case SCTP_TIMER_TYPE_HEARTBEAT:
1821 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1822 			break;
1823 		}
1824 		SCTP_STAT_INCR(sctps_timoheartbeat);
1825 		stcb->asoc.timoheartbeat++;
1826 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1827 			/* no need to unlock on tcb its gone */
1828 			goto out_decr;
1829 		}
1830 #ifdef SCTP_AUDITING_ENABLED
1831 		sctp_auditing(4, inp, stcb, net);
1832 #endif
1833 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1834 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1835 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1836 		}
1837 		break;
1838 	case SCTP_TIMER_TYPE_COOKIE:
1839 		if ((stcb == NULL) || (inp == NULL)) {
1840 			break;
1841 		}
1842 
1843 		if (sctp_cookie_timer(inp, stcb, net)) {
1844 			/* no need to unlock on tcb its gone */
1845 			goto out_decr;
1846 		}
1847 		SCTP_STAT_INCR(sctps_timocookie);
1848 		stcb->asoc.timocookie++;
1849 #ifdef SCTP_AUDITING_ENABLED
1850 		sctp_auditing(4, inp, stcb, net);
1851 #endif
1852 		/*
1853 		 * We consider T3 and Cookie timer pretty much the same with
1854 		 * respect to where from in chunk_output.
1855 		 */
1856 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1857 		break;
1858 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1859 		{
1860 			struct timeval tv;
1861 			int i, secret;
1862 
1863 			if (inp == NULL) {
1864 				break;
1865 			}
1866 			SCTP_STAT_INCR(sctps_timosecret);
1867 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1868 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1869 			inp->sctp_ep.last_secret_number =
1870 			    inp->sctp_ep.current_secret_number;
1871 			inp->sctp_ep.current_secret_number++;
1872 			if (inp->sctp_ep.current_secret_number >=
1873 			    SCTP_HOW_MANY_SECRETS) {
1874 				inp->sctp_ep.current_secret_number = 0;
1875 			}
1876 			secret = (int)inp->sctp_ep.current_secret_number;
1877 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1878 				inp->sctp_ep.secret_key[secret][i] =
1879 				    sctp_select_initial_TSN(&inp->sctp_ep);
1880 			}
1881 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1882 		}
1883 		did_output = 0;
1884 		break;
1885 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1886 		if ((stcb == NULL) || (inp == NULL)) {
1887 			break;
1888 		}
1889 		SCTP_STAT_INCR(sctps_timopathmtu);
1890 		sctp_pathmtu_timer(inp, stcb, net);
1891 		did_output = 0;
1892 		break;
1893 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1894 		if ((stcb == NULL) || (inp == NULL)) {
1895 			break;
1896 		}
1897 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1898 			/* no need to unlock on tcb its gone */
1899 			goto out_decr;
1900 		}
1901 		SCTP_STAT_INCR(sctps_timoshutdownack);
1902 		stcb->asoc.timoshutdownack++;
1903 #ifdef SCTP_AUDITING_ENABLED
1904 		sctp_auditing(4, inp, stcb, net);
1905 #endif
1906 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1907 		break;
1908 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1909 		if ((stcb == NULL) || (inp == NULL)) {
1910 			break;
1911 		}
1912 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1913 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1914 		    "Shutdown guard timer expired");
1915 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1916 		/* no need to unlock on tcb its gone */
1917 		goto out_decr;
1918 	case SCTP_TIMER_TYPE_STRRESET:
1919 		if ((stcb == NULL) || (inp == NULL)) {
1920 			break;
1921 		}
1922 		if (sctp_strreset_timer(inp, stcb, net)) {
1923 			/* no need to unlock on tcb its gone */
1924 			goto out_decr;
1925 		}
1926 		SCTP_STAT_INCR(sctps_timostrmrst);
1927 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1928 		break;
1929 	case SCTP_TIMER_TYPE_ASCONF:
1930 		if ((stcb == NULL) || (inp == NULL)) {
1931 			break;
1932 		}
1933 		if (sctp_asconf_timer(inp, stcb, net)) {
1934 			/* no need to unlock on tcb its gone */
1935 			goto out_decr;
1936 		}
1937 		SCTP_STAT_INCR(sctps_timoasconf);
1938 #ifdef SCTP_AUDITING_ENABLED
1939 		sctp_auditing(4, inp, stcb, net);
1940 #endif
1941 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1942 		break;
1943 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1944 		if ((stcb == NULL) || (inp == NULL)) {
1945 			break;
1946 		}
1947 		sctp_delete_prim_timer(inp, stcb, net);
1948 		SCTP_STAT_INCR(sctps_timodelprim);
1949 		break;
1950 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1951 		if ((stcb == NULL) || (inp == NULL)) {
1952 			break;
1953 		}
1954 		SCTP_STAT_INCR(sctps_timoautoclose);
1955 		sctp_autoclose_timer(inp, stcb, net);
1956 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1957 		did_output = 0;
1958 		break;
1959 	case SCTP_TIMER_TYPE_ASOCKILL:
1960 		if ((stcb == NULL) || (inp == NULL)) {
1961 			break;
1962 		}
1963 		SCTP_STAT_INCR(sctps_timoassockill);
1964 		/* Can we free it yet? */
1965 		SCTP_INP_DECR_REF(inp);
1966 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1967 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1968 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1969 		so = SCTP_INP_SO(inp);
1970 		atomic_add_int(&stcb->asoc.refcnt, 1);
1971 		SCTP_TCB_UNLOCK(stcb);
1972 		SCTP_SOCKET_LOCK(so, 1);
1973 		SCTP_TCB_LOCK(stcb);
1974 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1975 #endif
1976 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1977 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1978 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1979 		SCTP_SOCKET_UNLOCK(so, 1);
1980 #endif
1981 		/*
1982 		 * free asoc, always unlocks (or destroy's) so prevent
1983 		 * duplicate unlock or unlock of a free mtx :-0
1984 		 */
1985 		stcb = NULL;
1986 		goto out_no_decr;
1987 	case SCTP_TIMER_TYPE_INPKILL:
1988 		SCTP_STAT_INCR(sctps_timoinpkill);
1989 		if (inp == NULL) {
1990 			break;
1991 		}
1992 		/*
1993 		 * special case, take away our increment since WE are the
1994 		 * killer
1995 		 */
1996 		SCTP_INP_DECR_REF(inp);
1997 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1998 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1999 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
2000 		    SCTP_CALLED_FROM_INPKILL_TIMER);
2001 		inp = NULL;
2002 		goto out_no_decr;
2003 	default:
2004 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
2005 		    type);
2006 		break;
2007 	}
2008 #ifdef SCTP_AUDITING_ENABLED
2009 	sctp_audit_log(0xF1, (uint8_t)type);
2010 	if (inp)
2011 		sctp_auditing(5, inp, stcb, net);
2012 #endif
2013 	if ((did_output) && stcb) {
2014 		/*
2015 		 * Now we need to clean up the control chunk chain if an
2016 		 * ECNE is on it. It must be marked as UNSENT again so next
2017 		 * call will continue to send it until such time that we get
2018 		 * a CWR, to remove it. It is, however, less likely that we
2019 		 * will find a ecn echo on the chain though.
2020 		 */
2021 		sctp_fix_ecn_echo(&stcb->asoc);
2022 	}
2023 get_out:
2024 	if (stcb) {
2025 		SCTP_TCB_UNLOCK(stcb);
2026 	} else if (inp != NULL) {
2027 		SCTP_INP_WUNLOCK(inp);
2028 	} else {
2029 		SCTP_WQ_ADDR_UNLOCK();
2030 	}
2031 
2032 out_decr:
2033 	if (inp) {
2034 		SCTP_INP_DECR_REF(inp);
2035 	}
2036 
2037 out_no_decr:
2038 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
2039 	CURVNET_RESTORE();
2040 	NET_EPOCH_EXIT(et);
2041 }
2042 
2043 void
2044 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2045     struct sctp_nets *net)
2046 {
2047 	uint32_t to_ticks;
2048 	struct sctp_timer *tmr;
2049 
2050 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
2051 		return;
2052 
2053 	tmr = NULL;
2054 	if (stcb) {
2055 		SCTP_TCB_LOCK_ASSERT(stcb);
2056 	}
2057 	/* Don't restart timer on net that's been removed. */
2058 	if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) {
2059 		return;
2060 	}
2061 	switch (t_type) {
2062 	case SCTP_TIMER_TYPE_ADDR_WQ:
2063 		/* Only 1 tick away :-) */
2064 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2065 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
2066 		break;
2067 	case SCTP_TIMER_TYPE_SEND:
2068 		/* Here we use the RTO timer */
2069 		{
2070 			int rto_val;
2071 
2072 			if ((stcb == NULL) || (net == NULL)) {
2073 				return;
2074 			}
2075 			tmr = &net->rxt_timer;
2076 			if (net->RTO == 0) {
2077 				rto_val = stcb->asoc.initial_rto;
2078 			} else {
2079 				rto_val = net->RTO;
2080 			}
2081 			to_ticks = MSEC_TO_TICKS(rto_val);
2082 		}
2083 		break;
2084 	case SCTP_TIMER_TYPE_INIT:
2085 		/*
2086 		 * Here we use the INIT timer default usually about 1
2087 		 * minute.
2088 		 */
2089 		if ((stcb == NULL) || (net == NULL)) {
2090 			return;
2091 		}
2092 		tmr = &net->rxt_timer;
2093 		if (net->RTO == 0) {
2094 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2095 		} else {
2096 			to_ticks = MSEC_TO_TICKS(net->RTO);
2097 		}
2098 		break;
2099 	case SCTP_TIMER_TYPE_RECV:
2100 		/*
2101 		 * Here we use the Delayed-Ack timer value from the inp
2102 		 * ususually about 200ms.
2103 		 */
2104 		if (stcb == NULL) {
2105 			return;
2106 		}
2107 		tmr = &stcb->asoc.dack_timer;
2108 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2109 		break;
2110 	case SCTP_TIMER_TYPE_SHUTDOWN:
2111 		/* Here we use the RTO of the destination. */
2112 		if ((stcb == NULL) || (net == NULL)) {
2113 			return;
2114 		}
2115 		if (net->RTO == 0) {
2116 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2117 		} else {
2118 			to_ticks = MSEC_TO_TICKS(net->RTO);
2119 		}
2120 		tmr = &net->rxt_timer;
2121 		break;
2122 	case SCTP_TIMER_TYPE_HEARTBEAT:
2123 		/*
2124 		 * the net is used here so that we can add in the RTO. Even
2125 		 * though we use a different timer. We also add the HB timer
2126 		 * PLUS a random jitter.
2127 		 */
2128 		if ((stcb == NULL) || (net == NULL)) {
2129 			return;
2130 		} else {
2131 			uint32_t rndval;
2132 			uint32_t jitter;
2133 
2134 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2135 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2136 				return;
2137 			}
2138 			if (net->RTO == 0) {
2139 				to_ticks = stcb->asoc.initial_rto;
2140 			} else {
2141 				to_ticks = net->RTO;
2142 			}
2143 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2144 			jitter = rndval % to_ticks;
2145 			if (jitter >= (to_ticks >> 1)) {
2146 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2147 			} else {
2148 				to_ticks = to_ticks - jitter;
2149 			}
2150 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2151 			    !(net->dest_state & SCTP_ADDR_PF)) {
2152 				to_ticks += net->heart_beat_delay;
2153 			}
2154 			/*
2155 			 * Now we must convert the to_ticks that are now in
2156 			 * ms to ticks.
2157 			 */
2158 			to_ticks = MSEC_TO_TICKS(to_ticks);
2159 			tmr = &net->hb_timer;
2160 		}
2161 		break;
2162 	case SCTP_TIMER_TYPE_COOKIE:
2163 		/*
2164 		 * Here we can use the RTO timer from the network since one
2165 		 * RTT was compelete. If a retran happened then we will be
2166 		 * using the RTO initial value.
2167 		 */
2168 		if ((stcb == NULL) || (net == NULL)) {
2169 			return;
2170 		}
2171 		if (net->RTO == 0) {
2172 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2173 		} else {
2174 			to_ticks = MSEC_TO_TICKS(net->RTO);
2175 		}
2176 		tmr = &net->rxt_timer;
2177 		break;
2178 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2179 		/*
2180 		 * nothing needed but the endpoint here ususually about 60
2181 		 * minutes.
2182 		 */
2183 		tmr = &inp->sctp_ep.signature_change;
2184 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2185 		break;
2186 	case SCTP_TIMER_TYPE_ASOCKILL:
2187 		if (stcb == NULL) {
2188 			return;
2189 		}
2190 		tmr = &stcb->asoc.strreset_timer;
2191 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2192 		break;
2193 	case SCTP_TIMER_TYPE_INPKILL:
2194 		/*
2195 		 * The inp is setup to die. We re-use the signature_chage
2196 		 * timer since that has stopped and we are in the GONE
2197 		 * state.
2198 		 */
2199 		tmr = &inp->sctp_ep.signature_change;
2200 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2201 		break;
2202 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2203 		/*
2204 		 * Here we use the value found in the EP for PMTU ususually
2205 		 * about 10 minutes.
2206 		 */
2207 		if ((stcb == NULL) || (net == NULL)) {
2208 			return;
2209 		}
2210 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2211 			return;
2212 		}
2213 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2214 		tmr = &net->pmtu_timer;
2215 		break;
2216 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2217 		/* Here we use the RTO of the destination */
2218 		if ((stcb == NULL) || (net == NULL)) {
2219 			return;
2220 		}
2221 		if (net->RTO == 0) {
2222 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2223 		} else {
2224 			to_ticks = MSEC_TO_TICKS(net->RTO);
2225 		}
2226 		tmr = &net->rxt_timer;
2227 		break;
2228 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2229 		/*
2230 		 * Here we use the endpoints shutdown guard timer usually
2231 		 * about 3 minutes.
2232 		 */
2233 		if (stcb == NULL) {
2234 			return;
2235 		}
2236 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2237 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2238 		} else {
2239 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2240 		}
2241 		tmr = &stcb->asoc.shut_guard_timer;
2242 		break;
2243 	case SCTP_TIMER_TYPE_STRRESET:
2244 		/*
2245 		 * Here the timer comes from the stcb but its value is from
2246 		 * the net's RTO.
2247 		 */
2248 		if ((stcb == NULL) || (net == NULL)) {
2249 			return;
2250 		}
2251 		if (net->RTO == 0) {
2252 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2253 		} else {
2254 			to_ticks = MSEC_TO_TICKS(net->RTO);
2255 		}
2256 		tmr = &stcb->asoc.strreset_timer;
2257 		break;
2258 	case SCTP_TIMER_TYPE_ASCONF:
2259 		/*
2260 		 * Here the timer comes from the stcb but its value is from
2261 		 * the net's RTO.
2262 		 */
2263 		if ((stcb == NULL) || (net == NULL)) {
2264 			return;
2265 		}
2266 		if (net->RTO == 0) {
2267 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2268 		} else {
2269 			to_ticks = MSEC_TO_TICKS(net->RTO);
2270 		}
2271 		tmr = &stcb->asoc.asconf_timer;
2272 		break;
2273 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2274 		if ((stcb == NULL) || (net != NULL)) {
2275 			return;
2276 		}
2277 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2278 		tmr = &stcb->asoc.delete_prim_timer;
2279 		break;
2280 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2281 		if (stcb == NULL) {
2282 			return;
2283 		}
2284 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2285 			/*
2286 			 * Really an error since stcb is NOT set to
2287 			 * autoclose
2288 			 */
2289 			return;
2290 		}
2291 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2292 		tmr = &stcb->asoc.autoclose_timer;
2293 		break;
2294 	default:
2295 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2296 		    __func__, t_type);
2297 		return;
2298 		break;
2299 	}
2300 	if ((to_ticks <= 0) || (tmr == NULL)) {
2301 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2302 		    __func__, t_type, to_ticks, (void *)tmr);
2303 		return;
2304 	}
2305 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2306 		/*
2307 		 * we do NOT allow you to have it already running. if it is
2308 		 * we leave the current one up unchanged
2309 		 */
2310 		return;
2311 	}
2312 	/* At this point we can proceed */
2313 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2314 		stcb->asoc.num_send_timers_up++;
2315 	}
2316 	tmr->stopped_from = 0;
2317 	tmr->type = t_type;
2318 	tmr->ep = (void *)inp;
2319 	tmr->tcb = (void *)stcb;
2320 	tmr->net = (void *)net;
2321 	tmr->self = (void *)tmr;
2322 	tmr->vnet = (void *)curvnet;
2323 	tmr->ticks = sctp_get_tick_count();
2324 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2325 	return;
2326 }
2327 
2328 void
2329 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2330     struct sctp_nets *net, uint32_t from)
2331 {
2332 	struct sctp_timer *tmr;
2333 
2334 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2335 	    (inp == NULL))
2336 		return;
2337 
2338 	tmr = NULL;
2339 	if (stcb) {
2340 		SCTP_TCB_LOCK_ASSERT(stcb);
2341 	}
2342 	switch (t_type) {
2343 	case SCTP_TIMER_TYPE_ADDR_WQ:
2344 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2345 		break;
2346 	case SCTP_TIMER_TYPE_SEND:
2347 		if ((stcb == NULL) || (net == NULL)) {
2348 			return;
2349 		}
2350 		tmr = &net->rxt_timer;
2351 		break;
2352 	case SCTP_TIMER_TYPE_INIT:
2353 		if ((stcb == NULL) || (net == NULL)) {
2354 			return;
2355 		}
2356 		tmr = &net->rxt_timer;
2357 		break;
2358 	case SCTP_TIMER_TYPE_RECV:
2359 		if (stcb == NULL) {
2360 			return;
2361 		}
2362 		tmr = &stcb->asoc.dack_timer;
2363 		break;
2364 	case SCTP_TIMER_TYPE_SHUTDOWN:
2365 		if ((stcb == NULL) || (net == NULL)) {
2366 			return;
2367 		}
2368 		tmr = &net->rxt_timer;
2369 		break;
2370 	case SCTP_TIMER_TYPE_HEARTBEAT:
2371 		if ((stcb == NULL) || (net == NULL)) {
2372 			return;
2373 		}
2374 		tmr = &net->hb_timer;
2375 		break;
2376 	case SCTP_TIMER_TYPE_COOKIE:
2377 		if ((stcb == NULL) || (net == NULL)) {
2378 			return;
2379 		}
2380 		tmr = &net->rxt_timer;
2381 		break;
2382 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2383 		/* nothing needed but the endpoint here */
2384 		tmr = &inp->sctp_ep.signature_change;
2385 		/*
2386 		 * We re-use the newcookie timer for the INP kill timer. We
2387 		 * must assure that we do not kill it by accident.
2388 		 */
2389 		break;
2390 	case SCTP_TIMER_TYPE_ASOCKILL:
2391 		/*
2392 		 * Stop the asoc kill timer.
2393 		 */
2394 		if (stcb == NULL) {
2395 			return;
2396 		}
2397 		tmr = &stcb->asoc.strreset_timer;
2398 		break;
2399 
2400 	case SCTP_TIMER_TYPE_INPKILL:
2401 		/*
2402 		 * The inp is setup to die. We re-use the signature_chage
2403 		 * timer since that has stopped and we are in the GONE
2404 		 * state.
2405 		 */
2406 		tmr = &inp->sctp_ep.signature_change;
2407 		break;
2408 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2409 		if ((stcb == NULL) || (net == NULL)) {
2410 			return;
2411 		}
2412 		tmr = &net->pmtu_timer;
2413 		break;
2414 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2415 		if ((stcb == NULL) || (net == NULL)) {
2416 			return;
2417 		}
2418 		tmr = &net->rxt_timer;
2419 		break;
2420 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2421 		if (stcb == NULL) {
2422 			return;
2423 		}
2424 		tmr = &stcb->asoc.shut_guard_timer;
2425 		break;
2426 	case SCTP_TIMER_TYPE_STRRESET:
2427 		if (stcb == NULL) {
2428 			return;
2429 		}
2430 		tmr = &stcb->asoc.strreset_timer;
2431 		break;
2432 	case SCTP_TIMER_TYPE_ASCONF:
2433 		if (stcb == NULL) {
2434 			return;
2435 		}
2436 		tmr = &stcb->asoc.asconf_timer;
2437 		break;
2438 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2439 		if (stcb == NULL) {
2440 			return;
2441 		}
2442 		tmr = &stcb->asoc.delete_prim_timer;
2443 		break;
2444 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2445 		if (stcb == NULL) {
2446 			return;
2447 		}
2448 		tmr = &stcb->asoc.autoclose_timer;
2449 		break;
2450 	default:
2451 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2452 		    __func__, t_type);
2453 		break;
2454 	}
2455 	if (tmr == NULL) {
2456 		return;
2457 	}
2458 	if ((tmr->type != t_type) && tmr->type) {
2459 		/*
2460 		 * Ok we have a timer that is under joint use. Cookie timer
2461 		 * per chance with the SEND timer. We therefore are NOT
2462 		 * running the timer that the caller wants stopped.  So just
2463 		 * return.
2464 		 */
2465 		return;
2466 	}
2467 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2468 		stcb->asoc.num_send_timers_up--;
2469 		if (stcb->asoc.num_send_timers_up < 0) {
2470 			stcb->asoc.num_send_timers_up = 0;
2471 		}
2472 	}
2473 	tmr->self = NULL;
2474 	tmr->stopped_from = from;
2475 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2476 	return;
2477 }
2478 
2479 uint32_t
2480 sctp_calculate_len(struct mbuf *m)
2481 {
2482 	uint32_t tlen = 0;
2483 	struct mbuf *at;
2484 
2485 	at = m;
2486 	while (at) {
2487 		tlen += SCTP_BUF_LEN(at);
2488 		at = SCTP_BUF_NEXT(at);
2489 	}
2490 	return (tlen);
2491 }
2492 
2493 void
2494 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2495     struct sctp_association *asoc, uint32_t mtu)
2496 {
2497 	/*
2498 	 * Reset the P-MTU size on this association, this involves changing
2499 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2500 	 * allow the DF flag to be cleared.
2501 	 */
2502 	struct sctp_tmit_chunk *chk;
2503 	unsigned int eff_mtu, ovh;
2504 
2505 	asoc->smallest_mtu = mtu;
2506 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2507 		ovh = SCTP_MIN_OVERHEAD;
2508 	} else {
2509 		ovh = SCTP_MIN_V4_OVERHEAD;
2510 	}
2511 	eff_mtu = mtu - ovh;
2512 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2513 		if (chk->send_size > eff_mtu) {
2514 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2515 		}
2516 	}
2517 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2518 		if (chk->send_size > eff_mtu) {
2519 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2520 		}
2521 	}
2522 }
2523 
2524 
2525 /*
2526  * Given an association and starting time of the current RTT period, update
2527  * RTO in number of msecs. net should point to the current network.
2528  * Return 1, if an RTO update was performed, return 0 if no update was
2529  * performed due to invalid starting point.
2530  */
2531 
2532 int
2533 sctp_calculate_rto(struct sctp_tcb *stcb,
2534     struct sctp_association *asoc,
2535     struct sctp_nets *net,
2536     struct timeval *old,
2537     int rtt_from_sack)
2538 {
2539 	struct timeval now;
2540 	uint64_t rtt_us;	/* RTT in us */
2541 	int32_t rtt;		/* RTT in ms */
2542 	uint32_t new_rto;
2543 	int first_measure = 0;
2544 
2545 	/************************/
2546 	/* 1. calculate new RTT */
2547 	/************************/
2548 	/* get the current time */
2549 	if (stcb->asoc.use_precise_time) {
2550 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2551 	} else {
2552 		(void)SCTP_GETTIME_TIMEVAL(&now);
2553 	}
2554 	if ((old->tv_sec > now.tv_sec) ||
2555 	    ((old->tv_sec == now.tv_sec) && (old->tv_sec > now.tv_sec))) {
2556 		/* The starting point is in the future. */
2557 		return (0);
2558 	}
2559 	timevalsub(&now, old);
2560 	rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec;
2561 	if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) {
2562 		/* The RTT is larger than a sane value. */
2563 		return (0);
2564 	}
2565 	/* store the current RTT in us */
2566 	net->rtt = rtt_us;
2567 	/* compute rtt in ms */
2568 	rtt = (int32_t)(net->rtt / 1000);
2569 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2570 		/*
2571 		 * Tell the CC module that a new update has just occurred
2572 		 * from a sack
2573 		 */
2574 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2575 	}
2576 	/*
2577 	 * Do we need to determine the lan? We do this only on sacks i.e.
2578 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2579 	 */
2580 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2581 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2582 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2583 			net->lan_type = SCTP_LAN_INTERNET;
2584 		} else {
2585 			net->lan_type = SCTP_LAN_LOCAL;
2586 		}
2587 	}
2588 
2589 	/***************************/
2590 	/* 2. update RTTVAR & SRTT */
2591 	/***************************/
2592 	/*-
2593 	 * Compute the scaled average lastsa and the
2594 	 * scaled variance lastsv as described in van Jacobson
2595 	 * Paper "Congestion Avoidance and Control", Annex A.
2596 	 *
2597 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2598 	 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar
2599 	 */
2600 	if (net->RTO_measured) {
2601 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2602 		net->lastsa += rtt;
2603 		if (rtt < 0) {
2604 			rtt = -rtt;
2605 		}
2606 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2607 		net->lastsv += rtt;
2608 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2609 			rto_logging(net, SCTP_LOG_RTTVAR);
2610 		}
2611 	} else {
2612 		/* First RTO measurment */
2613 		net->RTO_measured = 1;
2614 		first_measure = 1;
2615 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2616 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2617 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2618 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2619 		}
2620 	}
2621 	if (net->lastsv == 0) {
2622 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2623 	}
2624 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2625 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2626 	    (stcb->asoc.sat_network_lockout == 0)) {
2627 		stcb->asoc.sat_network = 1;
2628 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2629 		stcb->asoc.sat_network = 0;
2630 		stcb->asoc.sat_network_lockout = 1;
2631 	}
2632 	/* bound it, per C6/C7 in Section 5.3.1 */
2633 	if (new_rto < stcb->asoc.minrto) {
2634 		new_rto = stcb->asoc.minrto;
2635 	}
2636 	if (new_rto > stcb->asoc.maxrto) {
2637 		new_rto = stcb->asoc.maxrto;
2638 	}
2639 	net->RTO = new_rto;
2640 	return (1);
2641 }
2642 
2643 /*
2644  * return a pointer to a contiguous piece of data from the given mbuf chain
2645  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2646  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2647  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2648  */
2649 caddr_t
2650 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2651 {
2652 	uint32_t count;
2653 	uint8_t *ptr;
2654 
2655 	ptr = in_ptr;
2656 	if ((off < 0) || (len <= 0))
2657 		return (NULL);
2658 
2659 	/* find the desired start location */
2660 	while ((m != NULL) && (off > 0)) {
2661 		if (off < SCTP_BUF_LEN(m))
2662 			break;
2663 		off -= SCTP_BUF_LEN(m);
2664 		m = SCTP_BUF_NEXT(m);
2665 	}
2666 	if (m == NULL)
2667 		return (NULL);
2668 
2669 	/* is the current mbuf large enough (eg. contiguous)? */
2670 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2671 		return (mtod(m, caddr_t)+off);
2672 	} else {
2673 		/* else, it spans more than one mbuf, so save a temp copy... */
2674 		while ((m != NULL) && (len > 0)) {
2675 			count = min(SCTP_BUF_LEN(m) - off, len);
2676 			memcpy(ptr, mtod(m, caddr_t)+off, count);
2677 			len -= count;
2678 			ptr += count;
2679 			off = 0;
2680 			m = SCTP_BUF_NEXT(m);
2681 		}
2682 		if ((m == NULL) && (len > 0))
2683 			return (NULL);
2684 		else
2685 			return ((caddr_t)in_ptr);
2686 	}
2687 }
2688 
2689 
2690 
2691 struct sctp_paramhdr *
2692 sctp_get_next_param(struct mbuf *m,
2693     int offset,
2694     struct sctp_paramhdr *pull,
2695     int pull_limit)
2696 {
2697 	/* This just provides a typed signature to Peter's Pull routine */
2698 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2699 	    (uint8_t *)pull));
2700 }
2701 
2702 
2703 struct mbuf *
2704 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2705 {
2706 	struct mbuf *m_last;
2707 	caddr_t dp;
2708 
2709 	if (padlen > 3) {
2710 		return (NULL);
2711 	}
2712 	if (padlen <= M_TRAILINGSPACE(m)) {
2713 		/*
2714 		 * The easy way. We hope the majority of the time we hit
2715 		 * here :)
2716 		 */
2717 		m_last = m;
2718 	} else {
2719 		/* Hard way we must grow the mbuf chain */
2720 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2721 		if (m_last == NULL) {
2722 			return (NULL);
2723 		}
2724 		SCTP_BUF_LEN(m_last) = 0;
2725 		SCTP_BUF_NEXT(m_last) = NULL;
2726 		SCTP_BUF_NEXT(m) = m_last;
2727 	}
2728 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2729 	SCTP_BUF_LEN(m_last) += padlen;
2730 	memset(dp, 0, padlen);
2731 	return (m_last);
2732 }
2733 
2734 struct mbuf *
2735 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2736 {
2737 	/* find the last mbuf in chain and pad it */
2738 	struct mbuf *m_at;
2739 
2740 	if (last_mbuf != NULL) {
2741 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2742 	} else {
2743 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2744 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2745 				return (sctp_add_pad_tombuf(m_at, padval));
2746 			}
2747 		}
2748 	}
2749 	return (NULL);
2750 }
2751 
2752 static void
2753 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2754     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2755 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2756     SCTP_UNUSED
2757 #endif
2758 )
2759 {
2760 	struct mbuf *m_notify;
2761 	struct sctp_assoc_change *sac;
2762 	struct sctp_queued_to_read *control;
2763 	unsigned int notif_len;
2764 	uint16_t abort_len;
2765 	unsigned int i;
2766 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2767 	struct socket *so;
2768 #endif
2769 
2770 	if (stcb == NULL) {
2771 		return;
2772 	}
2773 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2774 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2775 		if (abort != NULL) {
2776 			abort_len = ntohs(abort->ch.chunk_length);
2777 			/*
2778 			 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
2779 			 * contiguous.
2780 			 */
2781 			if (abort_len > SCTP_CHUNK_BUFFER_SIZE) {
2782 				abort_len = SCTP_CHUNK_BUFFER_SIZE;
2783 			}
2784 		} else {
2785 			abort_len = 0;
2786 		}
2787 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2788 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2789 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2790 			notif_len += abort_len;
2791 		}
2792 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2793 		if (m_notify == NULL) {
2794 			/* Retry with smaller value. */
2795 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2796 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2797 			if (m_notify == NULL) {
2798 				goto set_error;
2799 			}
2800 		}
2801 		SCTP_BUF_NEXT(m_notify) = NULL;
2802 		sac = mtod(m_notify, struct sctp_assoc_change *);
2803 		memset(sac, 0, notif_len);
2804 		sac->sac_type = SCTP_ASSOC_CHANGE;
2805 		sac->sac_flags = 0;
2806 		sac->sac_length = sizeof(struct sctp_assoc_change);
2807 		sac->sac_state = state;
2808 		sac->sac_error = error;
2809 		/* XXX verify these stream counts */
2810 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2811 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2812 		sac->sac_assoc_id = sctp_get_associd(stcb);
2813 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2814 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2815 				i = 0;
2816 				if (stcb->asoc.prsctp_supported == 1) {
2817 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2818 				}
2819 				if (stcb->asoc.auth_supported == 1) {
2820 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2821 				}
2822 				if (stcb->asoc.asconf_supported == 1) {
2823 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2824 				}
2825 				if (stcb->asoc.idata_supported == 1) {
2826 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2827 				}
2828 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2829 				if (stcb->asoc.reconfig_supported == 1) {
2830 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2831 				}
2832 				sac->sac_length += i;
2833 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2834 				memcpy(sac->sac_info, abort, abort_len);
2835 				sac->sac_length += abort_len;
2836 			}
2837 		}
2838 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2839 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2840 		    0, 0, stcb->asoc.context, 0, 0, 0,
2841 		    m_notify);
2842 		if (control != NULL) {
2843 			control->length = SCTP_BUF_LEN(m_notify);
2844 			control->spec_flags = M_NOTIFICATION;
2845 			/* not that we need this */
2846 			control->tail_mbuf = m_notify;
2847 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2848 			    control,
2849 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2850 			    so_locked);
2851 		} else {
2852 			sctp_m_freem(m_notify);
2853 		}
2854 	}
2855 	/*
2856 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2857 	 * comes in.
2858 	 */
2859 set_error:
2860 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2861 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2862 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2863 		SOCK_LOCK(stcb->sctp_socket);
2864 		if (from_peer) {
2865 			if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
2866 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2867 				stcb->sctp_socket->so_error = ECONNREFUSED;
2868 			} else {
2869 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2870 				stcb->sctp_socket->so_error = ECONNRESET;
2871 			}
2872 		} else {
2873 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
2874 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
2875 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2876 				stcb->sctp_socket->so_error = ETIMEDOUT;
2877 			} else {
2878 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2879 				stcb->sctp_socket->so_error = ECONNABORTED;
2880 			}
2881 		}
2882 		SOCK_UNLOCK(stcb->sctp_socket);
2883 	}
2884 	/* Wake ANY sleepers */
2885 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2886 	so = SCTP_INP_SO(stcb->sctp_ep);
2887 	if (!so_locked) {
2888 		atomic_add_int(&stcb->asoc.refcnt, 1);
2889 		SCTP_TCB_UNLOCK(stcb);
2890 		SCTP_SOCKET_LOCK(so, 1);
2891 		SCTP_TCB_LOCK(stcb);
2892 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2893 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2894 			SCTP_SOCKET_UNLOCK(so, 1);
2895 			return;
2896 		}
2897 	}
2898 #endif
2899 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2900 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2901 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2902 		socantrcvmore(stcb->sctp_socket);
2903 	}
2904 	sorwakeup(stcb->sctp_socket);
2905 	sowwakeup(stcb->sctp_socket);
2906 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2907 	if (!so_locked) {
2908 		SCTP_SOCKET_UNLOCK(so, 1);
2909 	}
2910 #endif
2911 }
2912 
2913 static void
2914 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2915     struct sockaddr *sa, uint32_t error, int so_locked
2916 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2917     SCTP_UNUSED
2918 #endif
2919 )
2920 {
2921 	struct mbuf *m_notify;
2922 	struct sctp_paddr_change *spc;
2923 	struct sctp_queued_to_read *control;
2924 
2925 	if ((stcb == NULL) ||
2926 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2927 		/* event not enabled */
2928 		return;
2929 	}
2930 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2931 	if (m_notify == NULL)
2932 		return;
2933 	SCTP_BUF_LEN(m_notify) = 0;
2934 	spc = mtod(m_notify, struct sctp_paddr_change *);
2935 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2936 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2937 	spc->spc_flags = 0;
2938 	spc->spc_length = sizeof(struct sctp_paddr_change);
2939 	switch (sa->sa_family) {
2940 #ifdef INET
2941 	case AF_INET:
2942 #ifdef INET6
2943 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2944 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2945 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2946 		} else {
2947 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2948 		}
2949 #else
2950 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2951 #endif
2952 		break;
2953 #endif
2954 #ifdef INET6
2955 	case AF_INET6:
2956 		{
2957 			struct sockaddr_in6 *sin6;
2958 
2959 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2960 
2961 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2962 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2963 				if (sin6->sin6_scope_id == 0) {
2964 					/* recover scope_id for user */
2965 					(void)sa6_recoverscope(sin6);
2966 				} else {
2967 					/* clear embedded scope_id for user */
2968 					in6_clearscope(&sin6->sin6_addr);
2969 				}
2970 			}
2971 			break;
2972 		}
2973 #endif
2974 	default:
2975 		/* TSNH */
2976 		break;
2977 	}
2978 	spc->spc_state = state;
2979 	spc->spc_error = error;
2980 	spc->spc_assoc_id = sctp_get_associd(stcb);
2981 
2982 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2983 	SCTP_BUF_NEXT(m_notify) = NULL;
2984 
2985 	/* append to socket */
2986 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2987 	    0, 0, stcb->asoc.context, 0, 0, 0,
2988 	    m_notify);
2989 	if (control == NULL) {
2990 		/* no memory */
2991 		sctp_m_freem(m_notify);
2992 		return;
2993 	}
2994 	control->length = SCTP_BUF_LEN(m_notify);
2995 	control->spec_flags = M_NOTIFICATION;
2996 	/* not that we need this */
2997 	control->tail_mbuf = m_notify;
2998 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2999 	    control,
3000 	    &stcb->sctp_socket->so_rcv, 1,
3001 	    SCTP_READ_LOCK_NOT_HELD,
3002 	    so_locked);
3003 }
3004 
3005 
3006 static void
3007 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
3008     struct sctp_tmit_chunk *chk, int so_locked
3009 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3010     SCTP_UNUSED
3011 #endif
3012 )
3013 {
3014 	struct mbuf *m_notify;
3015 	struct sctp_send_failed *ssf;
3016 	struct sctp_send_failed_event *ssfe;
3017 	struct sctp_queued_to_read *control;
3018 	struct sctp_chunkhdr *chkhdr;
3019 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
3020 
3021 	if ((stcb == NULL) ||
3022 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3023 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3024 		/* event not enabled */
3025 		return;
3026 	}
3027 
3028 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3029 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3030 	} else {
3031 		notifhdr_len = sizeof(struct sctp_send_failed);
3032 	}
3033 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3034 	if (m_notify == NULL)
3035 		/* no space left */
3036 		return;
3037 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3038 	if (stcb->asoc.idata_supported) {
3039 		chkhdr_len = sizeof(struct sctp_idata_chunk);
3040 	} else {
3041 		chkhdr_len = sizeof(struct sctp_data_chunk);
3042 	}
3043 	/* Use some defaults in case we can't access the chunk header */
3044 	if (chk->send_size >= chkhdr_len) {
3045 		payload_len = chk->send_size - chkhdr_len;
3046 	} else {
3047 		payload_len = 0;
3048 	}
3049 	padding_len = 0;
3050 	if (chk->data != NULL) {
3051 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
3052 		if (chkhdr != NULL) {
3053 			chk_len = ntohs(chkhdr->chunk_length);
3054 			if ((chk_len >= chkhdr_len) &&
3055 			    (chk->send_size >= chk_len) &&
3056 			    (chk->send_size - chk_len < 4)) {
3057 				padding_len = chk->send_size - chk_len;
3058 				payload_len = chk->send_size - chkhdr_len - padding_len;
3059 			}
3060 		}
3061 	}
3062 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3063 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3064 		memset(ssfe, 0, notifhdr_len);
3065 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3066 		if (sent) {
3067 			ssfe->ssfe_flags = SCTP_DATA_SENT;
3068 		} else {
3069 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3070 		}
3071 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
3072 		ssfe->ssfe_error = error;
3073 		/* not exactly what the user sent in, but should be close :) */
3074 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
3075 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
3076 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
3077 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
3078 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3079 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3080 	} else {
3081 		ssf = mtod(m_notify, struct sctp_send_failed *);
3082 		memset(ssf, 0, notifhdr_len);
3083 		ssf->ssf_type = SCTP_SEND_FAILED;
3084 		if (sent) {
3085 			ssf->ssf_flags = SCTP_DATA_SENT;
3086 		} else {
3087 			ssf->ssf_flags = SCTP_DATA_UNSENT;
3088 		}
3089 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3090 		ssf->ssf_error = error;
3091 		/* not exactly what the user sent in, but should be close :) */
3092 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3093 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3094 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3095 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3096 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3097 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3098 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3099 	}
3100 	if (chk->data != NULL) {
3101 		/* Trim off the sctp chunk header (it should be there) */
3102 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3103 			m_adj(chk->data, chkhdr_len);
3104 			m_adj(chk->data, -padding_len);
3105 			sctp_mbuf_crush(chk->data);
3106 			chk->send_size -= (chkhdr_len + padding_len);
3107 		}
3108 	}
3109 	SCTP_BUF_NEXT(m_notify) = chk->data;
3110 	/* Steal off the mbuf */
3111 	chk->data = NULL;
3112 	/*
3113 	 * For this case, we check the actual socket buffer, since the assoc
3114 	 * is going away we don't want to overfill the socket buffer for a
3115 	 * non-reader
3116 	 */
3117 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3118 		sctp_m_freem(m_notify);
3119 		return;
3120 	}
3121 	/* append to socket */
3122 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3123 	    0, 0, stcb->asoc.context, 0, 0, 0,
3124 	    m_notify);
3125 	if (control == NULL) {
3126 		/* no memory */
3127 		sctp_m_freem(m_notify);
3128 		return;
3129 	}
3130 	control->length = SCTP_BUF_LEN(m_notify);
3131 	control->spec_flags = M_NOTIFICATION;
3132 	/* not that we need this */
3133 	control->tail_mbuf = m_notify;
3134 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3135 	    control,
3136 	    &stcb->sctp_socket->so_rcv, 1,
3137 	    SCTP_READ_LOCK_NOT_HELD,
3138 	    so_locked);
3139 }
3140 
3141 
3142 static void
3143 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3144     struct sctp_stream_queue_pending *sp, int so_locked
3145 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3146     SCTP_UNUSED
3147 #endif
3148 )
3149 {
3150 	struct mbuf *m_notify;
3151 	struct sctp_send_failed *ssf;
3152 	struct sctp_send_failed_event *ssfe;
3153 	struct sctp_queued_to_read *control;
3154 	int notifhdr_len;
3155 
3156 	if ((stcb == NULL) ||
3157 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3158 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3159 		/* event not enabled */
3160 		return;
3161 	}
3162 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3163 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3164 	} else {
3165 		notifhdr_len = sizeof(struct sctp_send_failed);
3166 	}
3167 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3168 	if (m_notify == NULL) {
3169 		/* no space left */
3170 		return;
3171 	}
3172 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3173 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3174 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3175 		memset(ssfe, 0, notifhdr_len);
3176 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3177 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3178 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3179 		ssfe->ssfe_error = error;
3180 		/* not exactly what the user sent in, but should be close :) */
3181 		ssfe->ssfe_info.snd_sid = sp->sid;
3182 		if (sp->some_taken) {
3183 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3184 		} else {
3185 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3186 		}
3187 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3188 		ssfe->ssfe_info.snd_context = sp->context;
3189 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3190 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3191 	} else {
3192 		ssf = mtod(m_notify, struct sctp_send_failed *);
3193 		memset(ssf, 0, notifhdr_len);
3194 		ssf->ssf_type = SCTP_SEND_FAILED;
3195 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3196 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3197 		ssf->ssf_error = error;
3198 		/* not exactly what the user sent in, but should be close :) */
3199 		ssf->ssf_info.sinfo_stream = sp->sid;
3200 		ssf->ssf_info.sinfo_ssn = 0;
3201 		if (sp->some_taken) {
3202 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3203 		} else {
3204 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3205 		}
3206 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3207 		ssf->ssf_info.sinfo_context = sp->context;
3208 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3209 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3210 	}
3211 	SCTP_BUF_NEXT(m_notify) = sp->data;
3212 
3213 	/* Steal off the mbuf */
3214 	sp->data = NULL;
3215 	/*
3216 	 * For this case, we check the actual socket buffer, since the assoc
3217 	 * is going away we don't want to overfill the socket buffer for a
3218 	 * non-reader
3219 	 */
3220 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3221 		sctp_m_freem(m_notify);
3222 		return;
3223 	}
3224 	/* append to socket */
3225 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3226 	    0, 0, stcb->asoc.context, 0, 0, 0,
3227 	    m_notify);
3228 	if (control == NULL) {
3229 		/* no memory */
3230 		sctp_m_freem(m_notify);
3231 		return;
3232 	}
3233 	control->length = SCTP_BUF_LEN(m_notify);
3234 	control->spec_flags = M_NOTIFICATION;
3235 	/* not that we need this */
3236 	control->tail_mbuf = m_notify;
3237 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3238 	    control,
3239 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3240 }
3241 
3242 
3243 
3244 static void
3245 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3246 {
3247 	struct mbuf *m_notify;
3248 	struct sctp_adaptation_event *sai;
3249 	struct sctp_queued_to_read *control;
3250 
3251 	if ((stcb == NULL) ||
3252 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3253 		/* event not enabled */
3254 		return;
3255 	}
3256 
3257 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3258 	if (m_notify == NULL)
3259 		/* no space left */
3260 		return;
3261 	SCTP_BUF_LEN(m_notify) = 0;
3262 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3263 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3264 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3265 	sai->sai_flags = 0;
3266 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3267 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3268 	sai->sai_assoc_id = sctp_get_associd(stcb);
3269 
3270 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3271 	SCTP_BUF_NEXT(m_notify) = NULL;
3272 
3273 	/* append to socket */
3274 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3275 	    0, 0, stcb->asoc.context, 0, 0, 0,
3276 	    m_notify);
3277 	if (control == NULL) {
3278 		/* no memory */
3279 		sctp_m_freem(m_notify);
3280 		return;
3281 	}
3282 	control->length = SCTP_BUF_LEN(m_notify);
3283 	control->spec_flags = M_NOTIFICATION;
3284 	/* not that we need this */
3285 	control->tail_mbuf = m_notify;
3286 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3287 	    control,
3288 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3289 }
3290 
3291 /* This always must be called with the read-queue LOCKED in the INP */
3292 static void
3293 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3294     uint32_t val, int so_locked
3295 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3296     SCTP_UNUSED
3297 #endif
3298 )
3299 {
3300 	struct mbuf *m_notify;
3301 	struct sctp_pdapi_event *pdapi;
3302 	struct sctp_queued_to_read *control;
3303 	struct sockbuf *sb;
3304 
3305 	if ((stcb == NULL) ||
3306 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3307 		/* event not enabled */
3308 		return;
3309 	}
3310 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3311 		return;
3312 	}
3313 
3314 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3315 	if (m_notify == NULL)
3316 		/* no space left */
3317 		return;
3318 	SCTP_BUF_LEN(m_notify) = 0;
3319 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3320 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3321 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3322 	pdapi->pdapi_flags = 0;
3323 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3324 	pdapi->pdapi_indication = error;
3325 	pdapi->pdapi_stream = (val >> 16);
3326 	pdapi->pdapi_seq = (val & 0x0000ffff);
3327 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3328 
3329 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3330 	SCTP_BUF_NEXT(m_notify) = NULL;
3331 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3332 	    0, 0, stcb->asoc.context, 0, 0, 0,
3333 	    m_notify);
3334 	if (control == NULL) {
3335 		/* no memory */
3336 		sctp_m_freem(m_notify);
3337 		return;
3338 	}
3339 	control->length = SCTP_BUF_LEN(m_notify);
3340 	control->spec_flags = M_NOTIFICATION;
3341 	/* not that we need this */
3342 	control->tail_mbuf = m_notify;
3343 	sb = &stcb->sctp_socket->so_rcv;
3344 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3345 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3346 	}
3347 	sctp_sballoc(stcb, sb, m_notify);
3348 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3349 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3350 	}
3351 	control->end_added = 1;
3352 	if (stcb->asoc.control_pdapi)
3353 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3354 	else {
3355 		/* we really should not see this case */
3356 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3357 	}
3358 	if (stcb->sctp_ep && stcb->sctp_socket) {
3359 		/* This should always be the case */
3360 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3361 		struct socket *so;
3362 
3363 		so = SCTP_INP_SO(stcb->sctp_ep);
3364 		if (!so_locked) {
3365 			atomic_add_int(&stcb->asoc.refcnt, 1);
3366 			SCTP_TCB_UNLOCK(stcb);
3367 			SCTP_SOCKET_LOCK(so, 1);
3368 			SCTP_TCB_LOCK(stcb);
3369 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3370 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3371 				SCTP_SOCKET_UNLOCK(so, 1);
3372 				return;
3373 			}
3374 		}
3375 #endif
3376 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3377 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3378 		if (!so_locked) {
3379 			SCTP_SOCKET_UNLOCK(so, 1);
3380 		}
3381 #endif
3382 	}
3383 }
3384 
3385 static void
3386 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3387 {
3388 	struct mbuf *m_notify;
3389 	struct sctp_shutdown_event *sse;
3390 	struct sctp_queued_to_read *control;
3391 
3392 	/*
3393 	 * For TCP model AND UDP connected sockets we will send an error up
3394 	 * when an SHUTDOWN completes
3395 	 */
3396 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3397 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3398 		/* mark socket closed for read/write and wakeup! */
3399 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3400 		struct socket *so;
3401 
3402 		so = SCTP_INP_SO(stcb->sctp_ep);
3403 		atomic_add_int(&stcb->asoc.refcnt, 1);
3404 		SCTP_TCB_UNLOCK(stcb);
3405 		SCTP_SOCKET_LOCK(so, 1);
3406 		SCTP_TCB_LOCK(stcb);
3407 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3408 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3409 			SCTP_SOCKET_UNLOCK(so, 1);
3410 			return;
3411 		}
3412 #endif
3413 		socantsendmore(stcb->sctp_socket);
3414 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3415 		SCTP_SOCKET_UNLOCK(so, 1);
3416 #endif
3417 	}
3418 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3419 		/* event not enabled */
3420 		return;
3421 	}
3422 
3423 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3424 	if (m_notify == NULL)
3425 		/* no space left */
3426 		return;
3427 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3428 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3429 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3430 	sse->sse_flags = 0;
3431 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3432 	sse->sse_assoc_id = sctp_get_associd(stcb);
3433 
3434 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3435 	SCTP_BUF_NEXT(m_notify) = NULL;
3436 
3437 	/* append to socket */
3438 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3439 	    0, 0, stcb->asoc.context, 0, 0, 0,
3440 	    m_notify);
3441 	if (control == NULL) {
3442 		/* no memory */
3443 		sctp_m_freem(m_notify);
3444 		return;
3445 	}
3446 	control->length = SCTP_BUF_LEN(m_notify);
3447 	control->spec_flags = M_NOTIFICATION;
3448 	/* not that we need this */
3449 	control->tail_mbuf = m_notify;
3450 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3451 	    control,
3452 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3453 }
3454 
3455 static void
3456 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3457     int so_locked
3458 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3459     SCTP_UNUSED
3460 #endif
3461 )
3462 {
3463 	struct mbuf *m_notify;
3464 	struct sctp_sender_dry_event *event;
3465 	struct sctp_queued_to_read *control;
3466 
3467 	if ((stcb == NULL) ||
3468 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3469 		/* event not enabled */
3470 		return;
3471 	}
3472 
3473 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3474 	if (m_notify == NULL) {
3475 		/* no space left */
3476 		return;
3477 	}
3478 	SCTP_BUF_LEN(m_notify) = 0;
3479 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3480 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3481 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3482 	event->sender_dry_flags = 0;
3483 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3484 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3485 
3486 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3487 	SCTP_BUF_NEXT(m_notify) = NULL;
3488 
3489 	/* append to socket */
3490 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3491 	    0, 0, stcb->asoc.context, 0, 0, 0,
3492 	    m_notify);
3493 	if (control == NULL) {
3494 		/* no memory */
3495 		sctp_m_freem(m_notify);
3496 		return;
3497 	}
3498 	control->length = SCTP_BUF_LEN(m_notify);
3499 	control->spec_flags = M_NOTIFICATION;
3500 	/* not that we need this */
3501 	control->tail_mbuf = m_notify;
3502 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3503 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3504 }
3505 
3506 
3507 void
3508 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3509 {
3510 	struct mbuf *m_notify;
3511 	struct sctp_queued_to_read *control;
3512 	struct sctp_stream_change_event *stradd;
3513 
3514 	if ((stcb == NULL) ||
3515 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3516 		/* event not enabled */
3517 		return;
3518 	}
3519 	if ((stcb->asoc.peer_req_out) && flag) {
3520 		/* Peer made the request, don't tell the local user */
3521 		stcb->asoc.peer_req_out = 0;
3522 		return;
3523 	}
3524 	stcb->asoc.peer_req_out = 0;
3525 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3526 	if (m_notify == NULL)
3527 		/* no space left */
3528 		return;
3529 	SCTP_BUF_LEN(m_notify) = 0;
3530 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3531 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3532 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3533 	stradd->strchange_flags = flag;
3534 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3535 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3536 	stradd->strchange_instrms = numberin;
3537 	stradd->strchange_outstrms = numberout;
3538 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3539 	SCTP_BUF_NEXT(m_notify) = NULL;
3540 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3541 		/* no space */
3542 		sctp_m_freem(m_notify);
3543 		return;
3544 	}
3545 	/* append to socket */
3546 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3547 	    0, 0, stcb->asoc.context, 0, 0, 0,
3548 	    m_notify);
3549 	if (control == NULL) {
3550 		/* no memory */
3551 		sctp_m_freem(m_notify);
3552 		return;
3553 	}
3554 	control->length = SCTP_BUF_LEN(m_notify);
3555 	control->spec_flags = M_NOTIFICATION;
3556 	/* not that we need this */
3557 	control->tail_mbuf = m_notify;
3558 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3559 	    control,
3560 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3561 }
3562 
3563 void
3564 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3565 {
3566 	struct mbuf *m_notify;
3567 	struct sctp_queued_to_read *control;
3568 	struct sctp_assoc_reset_event *strasoc;
3569 
3570 	if ((stcb == NULL) ||
3571 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3572 		/* event not enabled */
3573 		return;
3574 	}
3575 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3576 	if (m_notify == NULL)
3577 		/* no space left */
3578 		return;
3579 	SCTP_BUF_LEN(m_notify) = 0;
3580 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3581 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3582 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3583 	strasoc->assocreset_flags = flag;
3584 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3585 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3586 	strasoc->assocreset_local_tsn = sending_tsn;
3587 	strasoc->assocreset_remote_tsn = recv_tsn;
3588 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3589 	SCTP_BUF_NEXT(m_notify) = NULL;
3590 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3591 		/* no space */
3592 		sctp_m_freem(m_notify);
3593 		return;
3594 	}
3595 	/* append to socket */
3596 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3597 	    0, 0, stcb->asoc.context, 0, 0, 0,
3598 	    m_notify);
3599 	if (control == NULL) {
3600 		/* no memory */
3601 		sctp_m_freem(m_notify);
3602 		return;
3603 	}
3604 	control->length = SCTP_BUF_LEN(m_notify);
3605 	control->spec_flags = M_NOTIFICATION;
3606 	/* not that we need this */
3607 	control->tail_mbuf = m_notify;
3608 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3609 	    control,
3610 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3611 }
3612 
3613 
3614 
3615 static void
3616 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3617     int number_entries, uint16_t *list, int flag)
3618 {
3619 	struct mbuf *m_notify;
3620 	struct sctp_queued_to_read *control;
3621 	struct sctp_stream_reset_event *strreset;
3622 	int len;
3623 
3624 	if ((stcb == NULL) ||
3625 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3626 		/* event not enabled */
3627 		return;
3628 	}
3629 
3630 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3631 	if (m_notify == NULL)
3632 		/* no space left */
3633 		return;
3634 	SCTP_BUF_LEN(m_notify) = 0;
3635 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3636 	if (len > M_TRAILINGSPACE(m_notify)) {
3637 		/* never enough room */
3638 		sctp_m_freem(m_notify);
3639 		return;
3640 	}
3641 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3642 	memset(strreset, 0, len);
3643 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3644 	strreset->strreset_flags = flag;
3645 	strreset->strreset_length = len;
3646 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3647 	if (number_entries) {
3648 		int i;
3649 
3650 		for (i = 0; i < number_entries; i++) {
3651 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3652 		}
3653 	}
3654 	SCTP_BUF_LEN(m_notify) = len;
3655 	SCTP_BUF_NEXT(m_notify) = NULL;
3656 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3657 		/* no space */
3658 		sctp_m_freem(m_notify);
3659 		return;
3660 	}
3661 	/* append to socket */
3662 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3663 	    0, 0, stcb->asoc.context, 0, 0, 0,
3664 	    m_notify);
3665 	if (control == NULL) {
3666 		/* no memory */
3667 		sctp_m_freem(m_notify);
3668 		return;
3669 	}
3670 	control->length = SCTP_BUF_LEN(m_notify);
3671 	control->spec_flags = M_NOTIFICATION;
3672 	/* not that we need this */
3673 	control->tail_mbuf = m_notify;
3674 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3675 	    control,
3676 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3677 }
3678 
3679 
3680 static void
3681 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3682 {
3683 	struct mbuf *m_notify;
3684 	struct sctp_remote_error *sre;
3685 	struct sctp_queued_to_read *control;
3686 	unsigned int notif_len;
3687 	uint16_t chunk_len;
3688 
3689 	if ((stcb == NULL) ||
3690 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3691 		return;
3692 	}
3693 	if (chunk != NULL) {
3694 		chunk_len = ntohs(chunk->ch.chunk_length);
3695 		/*
3696 		 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
3697 		 * contiguous.
3698 		 */
3699 		if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) {
3700 			chunk_len = SCTP_CHUNK_BUFFER_SIZE;
3701 		}
3702 	} else {
3703 		chunk_len = 0;
3704 	}
3705 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3706 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3707 	if (m_notify == NULL) {
3708 		/* Retry with smaller value. */
3709 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3710 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3711 		if (m_notify == NULL) {
3712 			return;
3713 		}
3714 	}
3715 	SCTP_BUF_NEXT(m_notify) = NULL;
3716 	sre = mtod(m_notify, struct sctp_remote_error *);
3717 	memset(sre, 0, notif_len);
3718 	sre->sre_type = SCTP_REMOTE_ERROR;
3719 	sre->sre_flags = 0;
3720 	sre->sre_length = sizeof(struct sctp_remote_error);
3721 	sre->sre_error = error;
3722 	sre->sre_assoc_id = sctp_get_associd(stcb);
3723 	if (notif_len > sizeof(struct sctp_remote_error)) {
3724 		memcpy(sre->sre_data, chunk, chunk_len);
3725 		sre->sre_length += chunk_len;
3726 	}
3727 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3728 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3729 	    0, 0, stcb->asoc.context, 0, 0, 0,
3730 	    m_notify);
3731 	if (control != NULL) {
3732 		control->length = SCTP_BUF_LEN(m_notify);
3733 		control->spec_flags = M_NOTIFICATION;
3734 		/* not that we need this */
3735 		control->tail_mbuf = m_notify;
3736 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3737 		    control,
3738 		    &stcb->sctp_socket->so_rcv, 1,
3739 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3740 	} else {
3741 		sctp_m_freem(m_notify);
3742 	}
3743 }
3744 
3745 
3746 void
3747 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3748     uint32_t error, void *data, int so_locked
3749 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3750     SCTP_UNUSED
3751 #endif
3752 )
3753 {
3754 	if ((stcb == NULL) ||
3755 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3756 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3757 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3758 		/* If the socket is gone we are out of here */
3759 		return;
3760 	}
3761 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3762 		return;
3763 	}
3764 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3765 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3766 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3767 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3768 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3769 			/* Don't report these in front states */
3770 			return;
3771 		}
3772 	}
3773 	switch (notification) {
3774 	case SCTP_NOTIFY_ASSOC_UP:
3775 		if (stcb->asoc.assoc_up_sent == 0) {
3776 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3777 			stcb->asoc.assoc_up_sent = 1;
3778 		}
3779 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3780 			sctp_notify_adaptation_layer(stcb);
3781 		}
3782 		if (stcb->asoc.auth_supported == 0) {
3783 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3784 			    NULL, so_locked);
3785 		}
3786 		break;
3787 	case SCTP_NOTIFY_ASSOC_DOWN:
3788 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3789 		break;
3790 	case SCTP_NOTIFY_INTERFACE_DOWN:
3791 		{
3792 			struct sctp_nets *net;
3793 
3794 			net = (struct sctp_nets *)data;
3795 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3796 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3797 			break;
3798 		}
3799 	case SCTP_NOTIFY_INTERFACE_UP:
3800 		{
3801 			struct sctp_nets *net;
3802 
3803 			net = (struct sctp_nets *)data;
3804 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3805 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3806 			break;
3807 		}
3808 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3809 		{
3810 			struct sctp_nets *net;
3811 
3812 			net = (struct sctp_nets *)data;
3813 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3814 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3815 			break;
3816 		}
3817 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3818 		sctp_notify_send_failed2(stcb, error,
3819 		    (struct sctp_stream_queue_pending *)data, so_locked);
3820 		break;
3821 	case SCTP_NOTIFY_SENT_DG_FAIL:
3822 		sctp_notify_send_failed(stcb, 1, error,
3823 		    (struct sctp_tmit_chunk *)data, so_locked);
3824 		break;
3825 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3826 		sctp_notify_send_failed(stcb, 0, error,
3827 		    (struct sctp_tmit_chunk *)data, so_locked);
3828 		break;
3829 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3830 		{
3831 			uint32_t val;
3832 
3833 			val = *((uint32_t *)data);
3834 
3835 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3836 			break;
3837 		}
3838 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3839 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3840 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3841 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3842 		} else {
3843 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3844 		}
3845 		break;
3846 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3847 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3848 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3849 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3850 		} else {
3851 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3852 		}
3853 		break;
3854 	case SCTP_NOTIFY_ASSOC_RESTART:
3855 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3856 		if (stcb->asoc.auth_supported == 0) {
3857 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3858 			    NULL, so_locked);
3859 		}
3860 		break;
3861 	case SCTP_NOTIFY_STR_RESET_SEND:
3862 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3863 		break;
3864 	case SCTP_NOTIFY_STR_RESET_RECV:
3865 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3866 		break;
3867 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3868 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3869 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3870 		break;
3871 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3872 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3873 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3874 		break;
3875 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3876 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3877 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3878 		break;
3879 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3880 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3881 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3882 		break;
3883 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3884 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3885 		    error, so_locked);
3886 		break;
3887 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3888 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3889 		    error, so_locked);
3890 		break;
3891 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3892 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3893 		    error, so_locked);
3894 		break;
3895 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3896 		sctp_notify_shutdown_event(stcb);
3897 		break;
3898 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3899 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3900 		    (uint16_t)(uintptr_t)data,
3901 		    so_locked);
3902 		break;
3903 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3904 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3905 		    (uint16_t)(uintptr_t)data,
3906 		    so_locked);
3907 		break;
3908 	case SCTP_NOTIFY_NO_PEER_AUTH:
3909 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3910 		    (uint16_t)(uintptr_t)data,
3911 		    so_locked);
3912 		break;
3913 	case SCTP_NOTIFY_SENDER_DRY:
3914 		sctp_notify_sender_dry_event(stcb, so_locked);
3915 		break;
3916 	case SCTP_NOTIFY_REMOTE_ERROR:
3917 		sctp_notify_remote_error(stcb, error, data);
3918 		break;
3919 	default:
3920 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3921 		    __func__, notification, notification);
3922 		break;
3923 	}			/* end switch */
3924 }
3925 
3926 void
3927 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3928 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3929     SCTP_UNUSED
3930 #endif
3931 )
3932 {
3933 	struct sctp_association *asoc;
3934 	struct sctp_stream_out *outs;
3935 	struct sctp_tmit_chunk *chk, *nchk;
3936 	struct sctp_stream_queue_pending *sp, *nsp;
3937 	int i;
3938 
3939 	if (stcb == NULL) {
3940 		return;
3941 	}
3942 	asoc = &stcb->asoc;
3943 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3944 		/* already being freed */
3945 		return;
3946 	}
3947 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3948 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3949 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3950 		return;
3951 	}
3952 	/* now through all the gunk freeing chunks */
3953 	if (holds_lock == 0) {
3954 		SCTP_TCB_SEND_LOCK(stcb);
3955 	}
3956 	/* sent queue SHOULD be empty */
3957 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3958 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3959 		asoc->sent_queue_cnt--;
3960 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3961 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3962 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3963 #ifdef INVARIANTS
3964 			} else {
3965 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3966 #endif
3967 			}
3968 		}
3969 		if (chk->data != NULL) {
3970 			sctp_free_bufspace(stcb, asoc, chk, 1);
3971 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3972 			    error, chk, so_locked);
3973 			if (chk->data) {
3974 				sctp_m_freem(chk->data);
3975 				chk->data = NULL;
3976 			}
3977 		}
3978 		sctp_free_a_chunk(stcb, chk, so_locked);
3979 		/* sa_ignore FREED_MEMORY */
3980 	}
3981 	/* pending send queue SHOULD be empty */
3982 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3983 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3984 		asoc->send_queue_cnt--;
3985 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3986 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3987 #ifdef INVARIANTS
3988 		} else {
3989 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3990 #endif
3991 		}
3992 		if (chk->data != NULL) {
3993 			sctp_free_bufspace(stcb, asoc, chk, 1);
3994 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3995 			    error, chk, so_locked);
3996 			if (chk->data) {
3997 				sctp_m_freem(chk->data);
3998 				chk->data = NULL;
3999 			}
4000 		}
4001 		sctp_free_a_chunk(stcb, chk, so_locked);
4002 		/* sa_ignore FREED_MEMORY */
4003 	}
4004 	for (i = 0; i < asoc->streamoutcnt; i++) {
4005 		/* For each stream */
4006 		outs = &asoc->strmout[i];
4007 		/* clean up any sends there */
4008 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
4009 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
4010 			TAILQ_REMOVE(&outs->outqueue, sp, next);
4011 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
4012 			sctp_free_spbufspace(stcb, asoc, sp);
4013 			if (sp->data) {
4014 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
4015 				    error, (void *)sp, so_locked);
4016 				if (sp->data) {
4017 					sctp_m_freem(sp->data);
4018 					sp->data = NULL;
4019 					sp->tail_mbuf = NULL;
4020 					sp->length = 0;
4021 				}
4022 			}
4023 			if (sp->net) {
4024 				sctp_free_remote_addr(sp->net);
4025 				sp->net = NULL;
4026 			}
4027 			/* Free the chunk */
4028 			sctp_free_a_strmoq(stcb, sp, so_locked);
4029 			/* sa_ignore FREED_MEMORY */
4030 		}
4031 	}
4032 
4033 	if (holds_lock == 0) {
4034 		SCTP_TCB_SEND_UNLOCK(stcb);
4035 	}
4036 }
4037 
4038 void
4039 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
4040     struct sctp_abort_chunk *abort, int so_locked
4041 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4042     SCTP_UNUSED
4043 #endif
4044 )
4045 {
4046 	if (stcb == NULL) {
4047 		return;
4048 	}
4049 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
4050 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4051 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
4052 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
4053 	}
4054 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4055 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4056 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4057 		return;
4058 	}
4059 	/* Tell them we lost the asoc */
4060 	sctp_report_all_outbound(stcb, error, 0, so_locked);
4061 	if (from_peer) {
4062 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
4063 	} else {
4064 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
4065 	}
4066 }
4067 
4068 void
4069 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4070     struct mbuf *m, int iphlen,
4071     struct sockaddr *src, struct sockaddr *dst,
4072     struct sctphdr *sh, struct mbuf *op_err,
4073     uint8_t mflowtype, uint32_t mflowid,
4074     uint32_t vrf_id, uint16_t port)
4075 {
4076 	uint32_t vtag;
4077 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4078 	struct socket *so;
4079 #endif
4080 
4081 	vtag = 0;
4082 	if (stcb != NULL) {
4083 		vtag = stcb->asoc.peer_vtag;
4084 		vrf_id = stcb->asoc.vrf_id;
4085 	}
4086 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4087 	    mflowtype, mflowid, inp->fibnum,
4088 	    vrf_id, port);
4089 	if (stcb != NULL) {
4090 		/* We have a TCB to abort, send notification too */
4091 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4092 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4093 		/* Ok, now lets free it */
4094 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4095 		so = SCTP_INP_SO(inp);
4096 		atomic_add_int(&stcb->asoc.refcnt, 1);
4097 		SCTP_TCB_UNLOCK(stcb);
4098 		SCTP_SOCKET_LOCK(so, 1);
4099 		SCTP_TCB_LOCK(stcb);
4100 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4101 #endif
4102 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4103 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4104 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4105 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4106 		}
4107 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4108 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4109 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4110 		SCTP_SOCKET_UNLOCK(so, 1);
4111 #endif
4112 	}
4113 }
4114 #ifdef SCTP_ASOCLOG_OF_TSNS
4115 void
4116 sctp_print_out_track_log(struct sctp_tcb *stcb)
4117 {
4118 #ifdef NOSIY_PRINTS
4119 	int i;
4120 
4121 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4122 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4123 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4124 		SCTP_PRINTF("None rcvd\n");
4125 		goto none_in;
4126 	}
4127 	if (stcb->asoc.tsn_in_wrapped) {
4128 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4129 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4130 			    stcb->asoc.in_tsnlog[i].tsn,
4131 			    stcb->asoc.in_tsnlog[i].strm,
4132 			    stcb->asoc.in_tsnlog[i].seq,
4133 			    stcb->asoc.in_tsnlog[i].flgs,
4134 			    stcb->asoc.in_tsnlog[i].sz);
4135 		}
4136 	}
4137 	if (stcb->asoc.tsn_in_at) {
4138 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4139 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4140 			    stcb->asoc.in_tsnlog[i].tsn,
4141 			    stcb->asoc.in_tsnlog[i].strm,
4142 			    stcb->asoc.in_tsnlog[i].seq,
4143 			    stcb->asoc.in_tsnlog[i].flgs,
4144 			    stcb->asoc.in_tsnlog[i].sz);
4145 		}
4146 	}
4147 none_in:
4148 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4149 	if ((stcb->asoc.tsn_out_at == 0) &&
4150 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4151 		SCTP_PRINTF("None sent\n");
4152 	}
4153 	if (stcb->asoc.tsn_out_wrapped) {
4154 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4155 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4156 			    stcb->asoc.out_tsnlog[i].tsn,
4157 			    stcb->asoc.out_tsnlog[i].strm,
4158 			    stcb->asoc.out_tsnlog[i].seq,
4159 			    stcb->asoc.out_tsnlog[i].flgs,
4160 			    stcb->asoc.out_tsnlog[i].sz);
4161 		}
4162 	}
4163 	if (stcb->asoc.tsn_out_at) {
4164 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4165 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4166 			    stcb->asoc.out_tsnlog[i].tsn,
4167 			    stcb->asoc.out_tsnlog[i].strm,
4168 			    stcb->asoc.out_tsnlog[i].seq,
4169 			    stcb->asoc.out_tsnlog[i].flgs,
4170 			    stcb->asoc.out_tsnlog[i].sz);
4171 		}
4172 	}
4173 #endif
4174 }
4175 #endif
4176 
4177 void
4178 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4179     struct mbuf *op_err,
4180     int so_locked
4181 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4182     SCTP_UNUSED
4183 #endif
4184 )
4185 {
4186 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4187 	struct socket *so;
4188 #endif
4189 
4190 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4191 	so = SCTP_INP_SO(inp);
4192 #endif
4193 	if (stcb == NULL) {
4194 		/* Got to have a TCB */
4195 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4196 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4197 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4198 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4199 			}
4200 		}
4201 		return;
4202 	} else {
4203 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4204 	}
4205 	/* notify the peer */
4206 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4207 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4208 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4209 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4210 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4211 	}
4212 	/* notify the ulp */
4213 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4214 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4215 	}
4216 	/* now free the asoc */
4217 #ifdef SCTP_ASOCLOG_OF_TSNS
4218 	sctp_print_out_track_log(stcb);
4219 #endif
4220 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4221 	if (!so_locked) {
4222 		atomic_add_int(&stcb->asoc.refcnt, 1);
4223 		SCTP_TCB_UNLOCK(stcb);
4224 		SCTP_SOCKET_LOCK(so, 1);
4225 		SCTP_TCB_LOCK(stcb);
4226 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4227 	}
4228 #endif
4229 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4230 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4231 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4232 	if (!so_locked) {
4233 		SCTP_SOCKET_UNLOCK(so, 1);
4234 	}
4235 #endif
4236 }
4237 
4238 void
4239 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4240     struct sockaddr *src, struct sockaddr *dst,
4241     struct sctphdr *sh, struct sctp_inpcb *inp,
4242     struct mbuf *cause,
4243     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4244     uint32_t vrf_id, uint16_t port)
4245 {
4246 	struct sctp_chunkhdr *ch, chunk_buf;
4247 	unsigned int chk_length;
4248 	int contains_init_chunk;
4249 
4250 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4251 	/* Generate a TO address for future reference */
4252 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4253 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4254 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4255 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4256 		}
4257 	}
4258 	contains_init_chunk = 0;
4259 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4260 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4261 	while (ch != NULL) {
4262 		chk_length = ntohs(ch->chunk_length);
4263 		if (chk_length < sizeof(*ch)) {
4264 			/* break to abort land */
4265 			break;
4266 		}
4267 		switch (ch->chunk_type) {
4268 		case SCTP_INIT:
4269 			contains_init_chunk = 1;
4270 			break;
4271 		case SCTP_PACKET_DROPPED:
4272 			/* we don't respond to pkt-dropped */
4273 			return;
4274 		case SCTP_ABORT_ASSOCIATION:
4275 			/* we don't respond with an ABORT to an ABORT */
4276 			return;
4277 		case SCTP_SHUTDOWN_COMPLETE:
4278 			/*
4279 			 * we ignore it since we are not waiting for it and
4280 			 * peer is gone
4281 			 */
4282 			return;
4283 		case SCTP_SHUTDOWN_ACK:
4284 			sctp_send_shutdown_complete2(src, dst, sh,
4285 			    mflowtype, mflowid, fibnum,
4286 			    vrf_id, port);
4287 			return;
4288 		default:
4289 			break;
4290 		}
4291 		offset += SCTP_SIZE32(chk_length);
4292 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4293 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4294 	}
4295 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4296 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4297 	    (contains_init_chunk == 0))) {
4298 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4299 		    mflowtype, mflowid, fibnum,
4300 		    vrf_id, port);
4301 	}
4302 }
4303 
4304 /*
4305  * check the inbound datagram to make sure there is not an abort inside it,
4306  * if there is return 1, else return 0.
4307  */
4308 int
4309 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4310 {
4311 	struct sctp_chunkhdr *ch;
4312 	struct sctp_init_chunk *init_chk, chunk_buf;
4313 	int offset;
4314 	unsigned int chk_length;
4315 
4316 	offset = iphlen + sizeof(struct sctphdr);
4317 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4318 	    (uint8_t *)&chunk_buf);
4319 	while (ch != NULL) {
4320 		chk_length = ntohs(ch->chunk_length);
4321 		if (chk_length < sizeof(*ch)) {
4322 			/* packet is probably corrupt */
4323 			break;
4324 		}
4325 		/* we seem to be ok, is it an abort? */
4326 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4327 			/* yep, tell them */
4328 			return (1);
4329 		}
4330 		if (ch->chunk_type == SCTP_INITIATION) {
4331 			/* need to update the Vtag */
4332 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4333 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4334 			if (init_chk != NULL) {
4335 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4336 			}
4337 		}
4338 		/* Nope, move to the next chunk */
4339 		offset += SCTP_SIZE32(chk_length);
4340 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4341 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4342 	}
4343 	return (0);
4344 }
4345 
4346 /*
4347  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4348  * set (i.e. it's 0) so, create this function to compare link local scopes
4349  */
4350 #ifdef INET6
4351 uint32_t
4352 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4353 {
4354 	struct sockaddr_in6 a, b;
4355 
4356 	/* save copies */
4357 	a = *addr1;
4358 	b = *addr2;
4359 
4360 	if (a.sin6_scope_id == 0)
4361 		if (sa6_recoverscope(&a)) {
4362 			/* can't get scope, so can't match */
4363 			return (0);
4364 		}
4365 	if (b.sin6_scope_id == 0)
4366 		if (sa6_recoverscope(&b)) {
4367 			/* can't get scope, so can't match */
4368 			return (0);
4369 		}
4370 	if (a.sin6_scope_id != b.sin6_scope_id)
4371 		return (0);
4372 
4373 	return (1);
4374 }
4375 
4376 /*
4377  * returns a sockaddr_in6 with embedded scope recovered and removed
4378  */
4379 struct sockaddr_in6 *
4380 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4381 {
4382 	/* check and strip embedded scope junk */
4383 	if (addr->sin6_family == AF_INET6) {
4384 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4385 			if (addr->sin6_scope_id == 0) {
4386 				*store = *addr;
4387 				if (!sa6_recoverscope(store)) {
4388 					/* use the recovered scope */
4389 					addr = store;
4390 				}
4391 			} else {
4392 				/* else, return the original "to" addr */
4393 				in6_clearscope(&addr->sin6_addr);
4394 			}
4395 		}
4396 	}
4397 	return (addr);
4398 }
4399 #endif
4400 
4401 /*
4402  * are the two addresses the same?  currently a "scopeless" check returns: 1
4403  * if same, 0 if not
4404  */
4405 int
4406 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4407 {
4408 
4409 	/* must be valid */
4410 	if (sa1 == NULL || sa2 == NULL)
4411 		return (0);
4412 
4413 	/* must be the same family */
4414 	if (sa1->sa_family != sa2->sa_family)
4415 		return (0);
4416 
4417 	switch (sa1->sa_family) {
4418 #ifdef INET6
4419 	case AF_INET6:
4420 		{
4421 			/* IPv6 addresses */
4422 			struct sockaddr_in6 *sin6_1, *sin6_2;
4423 
4424 			sin6_1 = (struct sockaddr_in6 *)sa1;
4425 			sin6_2 = (struct sockaddr_in6 *)sa2;
4426 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4427 			    sin6_2));
4428 		}
4429 #endif
4430 #ifdef INET
4431 	case AF_INET:
4432 		{
4433 			/* IPv4 addresses */
4434 			struct sockaddr_in *sin_1, *sin_2;
4435 
4436 			sin_1 = (struct sockaddr_in *)sa1;
4437 			sin_2 = (struct sockaddr_in *)sa2;
4438 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4439 		}
4440 #endif
4441 	default:
4442 		/* we don't do these... */
4443 		return (0);
4444 	}
4445 }
4446 
4447 void
4448 sctp_print_address(struct sockaddr *sa)
4449 {
4450 #ifdef INET6
4451 	char ip6buf[INET6_ADDRSTRLEN];
4452 #endif
4453 
4454 	switch (sa->sa_family) {
4455 #ifdef INET6
4456 	case AF_INET6:
4457 		{
4458 			struct sockaddr_in6 *sin6;
4459 
4460 			sin6 = (struct sockaddr_in6 *)sa;
4461 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4462 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4463 			    ntohs(sin6->sin6_port),
4464 			    sin6->sin6_scope_id);
4465 			break;
4466 		}
4467 #endif
4468 #ifdef INET
4469 	case AF_INET:
4470 		{
4471 			struct sockaddr_in *sin;
4472 			unsigned char *p;
4473 
4474 			sin = (struct sockaddr_in *)sa;
4475 			p = (unsigned char *)&sin->sin_addr;
4476 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4477 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4478 			break;
4479 		}
4480 #endif
4481 	default:
4482 		SCTP_PRINTF("?\n");
4483 		break;
4484 	}
4485 }
4486 
4487 void
4488 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4489     struct sctp_inpcb *new_inp,
4490     struct sctp_tcb *stcb,
4491     int waitflags)
4492 {
4493 	/*
4494 	 * go through our old INP and pull off any control structures that
4495 	 * belong to stcb and move then to the new inp.
4496 	 */
4497 	struct socket *old_so, *new_so;
4498 	struct sctp_queued_to_read *control, *nctl;
4499 	struct sctp_readhead tmp_queue;
4500 	struct mbuf *m;
4501 	int error = 0;
4502 
4503 	old_so = old_inp->sctp_socket;
4504 	new_so = new_inp->sctp_socket;
4505 	TAILQ_INIT(&tmp_queue);
4506 	error = sblock(&old_so->so_rcv, waitflags);
4507 	if (error) {
4508 		/*
4509 		 * Gak, can't get sblock, we have a problem. data will be
4510 		 * left stranded.. and we don't dare look at it since the
4511 		 * other thread may be reading something. Oh well, its a
4512 		 * screwed up app that does a peeloff OR a accept while
4513 		 * reading from the main socket... actually its only the
4514 		 * peeloff() case, since I think read will fail on a
4515 		 * listening socket..
4516 		 */
4517 		return;
4518 	}
4519 	/* lock the socket buffers */
4520 	SCTP_INP_READ_LOCK(old_inp);
4521 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4522 		/* Pull off all for out target stcb */
4523 		if (control->stcb == stcb) {
4524 			/* remove it we want it */
4525 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4526 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4527 			m = control->data;
4528 			while (m) {
4529 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4530 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4531 				}
4532 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4533 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4534 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4535 				}
4536 				m = SCTP_BUF_NEXT(m);
4537 			}
4538 		}
4539 	}
4540 	SCTP_INP_READ_UNLOCK(old_inp);
4541 	/* Remove the sb-lock on the old socket */
4542 
4543 	sbunlock(&old_so->so_rcv);
4544 	/* Now we move them over to the new socket buffer */
4545 	SCTP_INP_READ_LOCK(new_inp);
4546 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4547 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4548 		m = control->data;
4549 		while (m) {
4550 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4551 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4552 			}
4553 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4554 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4555 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4556 			}
4557 			m = SCTP_BUF_NEXT(m);
4558 		}
4559 	}
4560 	SCTP_INP_READ_UNLOCK(new_inp);
4561 }
4562 
4563 void
4564 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4565     struct sctp_tcb *stcb,
4566     int so_locked
4567 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4568     SCTP_UNUSED
4569 #endif
4570 )
4571 {
4572 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4573 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4574 		struct socket *so;
4575 
4576 		so = SCTP_INP_SO(inp);
4577 		if (!so_locked) {
4578 			if (stcb) {
4579 				atomic_add_int(&stcb->asoc.refcnt, 1);
4580 				SCTP_TCB_UNLOCK(stcb);
4581 			}
4582 			SCTP_SOCKET_LOCK(so, 1);
4583 			if (stcb) {
4584 				SCTP_TCB_LOCK(stcb);
4585 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4586 			}
4587 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4588 				SCTP_SOCKET_UNLOCK(so, 1);
4589 				return;
4590 			}
4591 		}
4592 #endif
4593 		sctp_sorwakeup(inp, inp->sctp_socket);
4594 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4595 		if (!so_locked) {
4596 			SCTP_SOCKET_UNLOCK(so, 1);
4597 		}
4598 #endif
4599 	}
4600 }
4601 
4602 void
4603 sctp_add_to_readq(struct sctp_inpcb *inp,
4604     struct sctp_tcb *stcb,
4605     struct sctp_queued_to_read *control,
4606     struct sockbuf *sb,
4607     int end,
4608     int inp_read_lock_held,
4609     int so_locked
4610 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4611     SCTP_UNUSED
4612 #endif
4613 )
4614 {
4615 	/*
4616 	 * Here we must place the control on the end of the socket read
4617 	 * queue AND increment sb_cc so that select will work properly on
4618 	 * read.
4619 	 */
4620 	struct mbuf *m, *prev = NULL;
4621 
4622 	if (inp == NULL) {
4623 		/* Gak, TSNH!! */
4624 #ifdef INVARIANTS
4625 		panic("Gak, inp NULL on add_to_readq");
4626 #endif
4627 		return;
4628 	}
4629 	if (inp_read_lock_held == 0)
4630 		SCTP_INP_READ_LOCK(inp);
4631 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4632 		if (!control->on_strm_q) {
4633 			sctp_free_remote_addr(control->whoFrom);
4634 			if (control->data) {
4635 				sctp_m_freem(control->data);
4636 				control->data = NULL;
4637 			}
4638 			sctp_free_a_readq(stcb, control);
4639 		}
4640 		if (inp_read_lock_held == 0)
4641 			SCTP_INP_READ_UNLOCK(inp);
4642 		return;
4643 	}
4644 	if (!(control->spec_flags & M_NOTIFICATION)) {
4645 		atomic_add_int(&inp->total_recvs, 1);
4646 		if (!control->do_not_ref_stcb) {
4647 			atomic_add_int(&stcb->total_recvs, 1);
4648 		}
4649 	}
4650 	m = control->data;
4651 	control->held_length = 0;
4652 	control->length = 0;
4653 	while (m) {
4654 		if (SCTP_BUF_LEN(m) == 0) {
4655 			/* Skip mbufs with NO length */
4656 			if (prev == NULL) {
4657 				/* First one */
4658 				control->data = sctp_m_free(m);
4659 				m = control->data;
4660 			} else {
4661 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4662 				m = SCTP_BUF_NEXT(prev);
4663 			}
4664 			if (m == NULL) {
4665 				control->tail_mbuf = prev;
4666 			}
4667 			continue;
4668 		}
4669 		prev = m;
4670 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4671 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4672 		}
4673 		sctp_sballoc(stcb, sb, m);
4674 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4675 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4676 		}
4677 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4678 		m = SCTP_BUF_NEXT(m);
4679 	}
4680 	if (prev != NULL) {
4681 		control->tail_mbuf = prev;
4682 	} else {
4683 		/* Everything got collapsed out?? */
4684 		if (!control->on_strm_q) {
4685 			sctp_free_remote_addr(control->whoFrom);
4686 			sctp_free_a_readq(stcb, control);
4687 		}
4688 		if (inp_read_lock_held == 0)
4689 			SCTP_INP_READ_UNLOCK(inp);
4690 		return;
4691 	}
4692 	if (end) {
4693 		control->end_added = 1;
4694 	}
4695 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4696 	control->on_read_q = 1;
4697 	if (inp_read_lock_held == 0)
4698 		SCTP_INP_READ_UNLOCK(inp);
4699 	if (inp && inp->sctp_socket) {
4700 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4701 	}
4702 }
4703 
4704 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4705  *************ALTERNATE ROUTING CODE
4706  */
4707 
4708 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4709  *************ALTERNATE ROUTING CODE
4710  */
4711 
4712 struct mbuf *
4713 sctp_generate_cause(uint16_t code, char *info)
4714 {
4715 	struct mbuf *m;
4716 	struct sctp_gen_error_cause *cause;
4717 	size_t info_len;
4718 	uint16_t len;
4719 
4720 	if ((code == 0) || (info == NULL)) {
4721 		return (NULL);
4722 	}
4723 	info_len = strlen(info);
4724 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4725 		return (NULL);
4726 	}
4727 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4728 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4729 	if (m != NULL) {
4730 		SCTP_BUF_LEN(m) = len;
4731 		cause = mtod(m, struct sctp_gen_error_cause *);
4732 		cause->code = htons(code);
4733 		cause->length = htons(len);
4734 		memcpy(cause->info, info, info_len);
4735 	}
4736 	return (m);
4737 }
4738 
4739 struct mbuf *
4740 sctp_generate_no_user_data_cause(uint32_t tsn)
4741 {
4742 	struct mbuf *m;
4743 	struct sctp_error_no_user_data *no_user_data_cause;
4744 	uint16_t len;
4745 
4746 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4747 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4748 	if (m != NULL) {
4749 		SCTP_BUF_LEN(m) = len;
4750 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4751 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4752 		no_user_data_cause->cause.length = htons(len);
4753 		no_user_data_cause->tsn = htonl(tsn);
4754 	}
4755 	return (m);
4756 }
4757 
4758 #ifdef SCTP_MBCNT_LOGGING
4759 void
4760 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4761     struct sctp_tmit_chunk *tp1, int chk_cnt)
4762 {
4763 	if (tp1->data == NULL) {
4764 		return;
4765 	}
4766 	asoc->chunks_on_out_queue -= chk_cnt;
4767 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4768 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4769 		    asoc->total_output_queue_size,
4770 		    tp1->book_size,
4771 		    0,
4772 		    tp1->mbcnt);
4773 	}
4774 	if (asoc->total_output_queue_size >= tp1->book_size) {
4775 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4776 	} else {
4777 		asoc->total_output_queue_size = 0;
4778 	}
4779 
4780 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4781 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4782 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4783 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4784 		} else {
4785 			stcb->sctp_socket->so_snd.sb_cc = 0;
4786 
4787 		}
4788 	}
4789 }
4790 
4791 #endif
4792 
4793 int
4794 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4795     uint8_t sent, int so_locked
4796 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4797     SCTP_UNUSED
4798 #endif
4799 )
4800 {
4801 	struct sctp_stream_out *strq;
4802 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4803 	struct sctp_stream_queue_pending *sp;
4804 	uint32_t mid;
4805 	uint16_t sid;
4806 	uint8_t foundeom = 0;
4807 	int ret_sz = 0;
4808 	int notdone;
4809 	int do_wakeup_routine = 0;
4810 
4811 	sid = tp1->rec.data.sid;
4812 	mid = tp1->rec.data.mid;
4813 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4814 		stcb->asoc.abandoned_sent[0]++;
4815 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4816 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
4817 #if defined(SCTP_DETAILED_STR_STATS)
4818 		stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4819 #endif
4820 	} else {
4821 		stcb->asoc.abandoned_unsent[0]++;
4822 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4823 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4824 #if defined(SCTP_DETAILED_STR_STATS)
4825 		stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4826 #endif
4827 	}
4828 	do {
4829 		ret_sz += tp1->book_size;
4830 		if (tp1->data != NULL) {
4831 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4832 				sctp_flight_size_decrease(tp1);
4833 				sctp_total_flight_decrease(stcb, tp1);
4834 			}
4835 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4836 			stcb->asoc.peers_rwnd += tp1->send_size;
4837 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4838 			if (sent) {
4839 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4840 			} else {
4841 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4842 			}
4843 			if (tp1->data) {
4844 				sctp_m_freem(tp1->data);
4845 				tp1->data = NULL;
4846 			}
4847 			do_wakeup_routine = 1;
4848 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4849 				stcb->asoc.sent_queue_cnt_removeable--;
4850 			}
4851 		}
4852 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4853 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4854 		    SCTP_DATA_NOT_FRAG) {
4855 			/* not frag'ed we ae done   */
4856 			notdone = 0;
4857 			foundeom = 1;
4858 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4859 			/* end of frag, we are done */
4860 			notdone = 0;
4861 			foundeom = 1;
4862 		} else {
4863 			/*
4864 			 * Its a begin or middle piece, we must mark all of
4865 			 * it
4866 			 */
4867 			notdone = 1;
4868 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4869 		}
4870 	} while (tp1 && notdone);
4871 	if (foundeom == 0) {
4872 		/*
4873 		 * The multi-part message was scattered across the send and
4874 		 * sent queue.
4875 		 */
4876 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4877 			if ((tp1->rec.data.sid != sid) ||
4878 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4879 				break;
4880 			}
4881 			/*
4882 			 * save to chk in case we have some on stream out
4883 			 * queue. If so and we have an un-transmitted one we
4884 			 * don't have to fudge the TSN.
4885 			 */
4886 			chk = tp1;
4887 			ret_sz += tp1->book_size;
4888 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4889 			if (sent) {
4890 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4891 			} else {
4892 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4893 			}
4894 			if (tp1->data) {
4895 				sctp_m_freem(tp1->data);
4896 				tp1->data = NULL;
4897 			}
4898 			/* No flight involved here book the size to 0 */
4899 			tp1->book_size = 0;
4900 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4901 				foundeom = 1;
4902 			}
4903 			do_wakeup_routine = 1;
4904 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4905 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4906 			/*
4907 			 * on to the sent queue so we can wait for it to be
4908 			 * passed by.
4909 			 */
4910 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4911 			    sctp_next);
4912 			stcb->asoc.send_queue_cnt--;
4913 			stcb->asoc.sent_queue_cnt++;
4914 		}
4915 	}
4916 	if (foundeom == 0) {
4917 		/*
4918 		 * Still no eom found. That means there is stuff left on the
4919 		 * stream out queue.. yuck.
4920 		 */
4921 		SCTP_TCB_SEND_LOCK(stcb);
4922 		strq = &stcb->asoc.strmout[sid];
4923 		sp = TAILQ_FIRST(&strq->outqueue);
4924 		if (sp != NULL) {
4925 			sp->discard_rest = 1;
4926 			/*
4927 			 * We may need to put a chunk on the queue that
4928 			 * holds the TSN that would have been sent with the
4929 			 * LAST bit.
4930 			 */
4931 			if (chk == NULL) {
4932 				/* Yep, we have to */
4933 				sctp_alloc_a_chunk(stcb, chk);
4934 				if (chk == NULL) {
4935 					/*
4936 					 * we are hosed. All we can do is
4937 					 * nothing.. which will cause an
4938 					 * abort if the peer is paying
4939 					 * attention.
4940 					 */
4941 					goto oh_well;
4942 				}
4943 				memset(chk, 0, sizeof(*chk));
4944 				chk->rec.data.rcv_flags = 0;
4945 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4946 				chk->asoc = &stcb->asoc;
4947 				if (stcb->asoc.idata_supported == 0) {
4948 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4949 						chk->rec.data.mid = 0;
4950 					} else {
4951 						chk->rec.data.mid = strq->next_mid_ordered;
4952 					}
4953 				} else {
4954 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4955 						chk->rec.data.mid = strq->next_mid_unordered;
4956 					} else {
4957 						chk->rec.data.mid = strq->next_mid_ordered;
4958 					}
4959 				}
4960 				chk->rec.data.sid = sp->sid;
4961 				chk->rec.data.ppid = sp->ppid;
4962 				chk->rec.data.context = sp->context;
4963 				chk->flags = sp->act_flags;
4964 				chk->whoTo = NULL;
4965 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4966 				strq->chunks_on_queues++;
4967 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4968 				stcb->asoc.sent_queue_cnt++;
4969 				stcb->asoc.pr_sctp_cnt++;
4970 			}
4971 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4972 			if (sp->sinfo_flags & SCTP_UNORDERED) {
4973 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4974 			}
4975 			if (stcb->asoc.idata_supported == 0) {
4976 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4977 					strq->next_mid_ordered++;
4978 				}
4979 			} else {
4980 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4981 					strq->next_mid_unordered++;
4982 				} else {
4983 					strq->next_mid_ordered++;
4984 				}
4985 			}
4986 	oh_well:
4987 			if (sp->data) {
4988 				/*
4989 				 * Pull any data to free up the SB and allow
4990 				 * sender to "add more" while we will throw
4991 				 * away :-)
4992 				 */
4993 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4994 				ret_sz += sp->length;
4995 				do_wakeup_routine = 1;
4996 				sp->some_taken = 1;
4997 				sctp_m_freem(sp->data);
4998 				sp->data = NULL;
4999 				sp->tail_mbuf = NULL;
5000 				sp->length = 0;
5001 			}
5002 		}
5003 		SCTP_TCB_SEND_UNLOCK(stcb);
5004 	}
5005 	if (do_wakeup_routine) {
5006 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5007 		struct socket *so;
5008 
5009 		so = SCTP_INP_SO(stcb->sctp_ep);
5010 		if (!so_locked) {
5011 			atomic_add_int(&stcb->asoc.refcnt, 1);
5012 			SCTP_TCB_UNLOCK(stcb);
5013 			SCTP_SOCKET_LOCK(so, 1);
5014 			SCTP_TCB_LOCK(stcb);
5015 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
5016 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5017 				/* assoc was freed while we were unlocked */
5018 				SCTP_SOCKET_UNLOCK(so, 1);
5019 				return (ret_sz);
5020 			}
5021 		}
5022 #endif
5023 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5024 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5025 		if (!so_locked) {
5026 			SCTP_SOCKET_UNLOCK(so, 1);
5027 		}
5028 #endif
5029 	}
5030 	return (ret_sz);
5031 }
5032 
5033 /*
5034  * checks to see if the given address, sa, is one that is currently known by
5035  * the kernel note: can't distinguish the same address on multiple interfaces
5036  * and doesn't handle multiple addresses with different zone/scope id's note:
5037  * ifa_ifwithaddr() compares the entire sockaddr struct
5038  */
5039 struct sctp_ifa *
5040 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5041     int holds_lock)
5042 {
5043 	struct sctp_laddr *laddr;
5044 
5045 	if (holds_lock == 0) {
5046 		SCTP_INP_RLOCK(inp);
5047 	}
5048 
5049 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5050 		if (laddr->ifa == NULL)
5051 			continue;
5052 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5053 			continue;
5054 #ifdef INET
5055 		if (addr->sa_family == AF_INET) {
5056 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5057 			    laddr->ifa->address.sin.sin_addr.s_addr) {
5058 				/* found him. */
5059 				if (holds_lock == 0) {
5060 					SCTP_INP_RUNLOCK(inp);
5061 				}
5062 				return (laddr->ifa);
5063 				break;
5064 			}
5065 		}
5066 #endif
5067 #ifdef INET6
5068 		if (addr->sa_family == AF_INET6) {
5069 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5070 			    &laddr->ifa->address.sin6)) {
5071 				/* found him. */
5072 				if (holds_lock == 0) {
5073 					SCTP_INP_RUNLOCK(inp);
5074 				}
5075 				return (laddr->ifa);
5076 				break;
5077 			}
5078 		}
5079 #endif
5080 	}
5081 	if (holds_lock == 0) {
5082 		SCTP_INP_RUNLOCK(inp);
5083 	}
5084 	return (NULL);
5085 }
5086 
5087 uint32_t
5088 sctp_get_ifa_hash_val(struct sockaddr *addr)
5089 {
5090 	switch (addr->sa_family) {
5091 #ifdef INET
5092 	case AF_INET:
5093 		{
5094 			struct sockaddr_in *sin;
5095 
5096 			sin = (struct sockaddr_in *)addr;
5097 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5098 		}
5099 #endif
5100 #ifdef INET6
5101 	case AF_INET6:
5102 		{
5103 			struct sockaddr_in6 *sin6;
5104 			uint32_t hash_of_addr;
5105 
5106 			sin6 = (struct sockaddr_in6 *)addr;
5107 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5108 			    sin6->sin6_addr.s6_addr32[1] +
5109 			    sin6->sin6_addr.s6_addr32[2] +
5110 			    sin6->sin6_addr.s6_addr32[3]);
5111 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5112 			return (hash_of_addr);
5113 		}
5114 #endif
5115 	default:
5116 		break;
5117 	}
5118 	return (0);
5119 }
5120 
5121 struct sctp_ifa *
5122 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5123 {
5124 	struct sctp_ifa *sctp_ifap;
5125 	struct sctp_vrf *vrf;
5126 	struct sctp_ifalist *hash_head;
5127 	uint32_t hash_of_addr;
5128 
5129 	if (holds_lock == 0)
5130 		SCTP_IPI_ADDR_RLOCK();
5131 
5132 	vrf = sctp_find_vrf(vrf_id);
5133 	if (vrf == NULL) {
5134 		if (holds_lock == 0)
5135 			SCTP_IPI_ADDR_RUNLOCK();
5136 		return (NULL);
5137 	}
5138 
5139 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5140 
5141 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5142 	if (hash_head == NULL) {
5143 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5144 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5145 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5146 		sctp_print_address(addr);
5147 		SCTP_PRINTF("No such bucket for address\n");
5148 		if (holds_lock == 0)
5149 			SCTP_IPI_ADDR_RUNLOCK();
5150 
5151 		return (NULL);
5152 	}
5153 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5154 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5155 			continue;
5156 #ifdef INET
5157 		if (addr->sa_family == AF_INET) {
5158 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5159 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5160 				/* found him. */
5161 				if (holds_lock == 0)
5162 					SCTP_IPI_ADDR_RUNLOCK();
5163 				return (sctp_ifap);
5164 				break;
5165 			}
5166 		}
5167 #endif
5168 #ifdef INET6
5169 		if (addr->sa_family == AF_INET6) {
5170 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5171 			    &sctp_ifap->address.sin6)) {
5172 				/* found him. */
5173 				if (holds_lock == 0)
5174 					SCTP_IPI_ADDR_RUNLOCK();
5175 				return (sctp_ifap);
5176 				break;
5177 			}
5178 		}
5179 #endif
5180 	}
5181 	if (holds_lock == 0)
5182 		SCTP_IPI_ADDR_RUNLOCK();
5183 	return (NULL);
5184 }
5185 
5186 static void
5187 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5188     uint32_t rwnd_req)
5189 {
5190 	/* User pulled some data, do we need a rwnd update? */
5191 	struct epoch_tracker et;
5192 	int r_unlocked = 0;
5193 	uint32_t dif, rwnd;
5194 	struct socket *so = NULL;
5195 
5196 	if (stcb == NULL)
5197 		return;
5198 
5199 	atomic_add_int(&stcb->asoc.refcnt, 1);
5200 
5201 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
5202 	    (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) {
5203 		/* Pre-check If we are freeing no update */
5204 		goto no_lock;
5205 	}
5206 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5207 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5208 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5209 		goto out;
5210 	}
5211 	so = stcb->sctp_socket;
5212 	if (so == NULL) {
5213 		goto out;
5214 	}
5215 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5216 	/* Have you have freed enough to look */
5217 	*freed_so_far = 0;
5218 	/* Yep, its worth a look and the lock overhead */
5219 
5220 	/* Figure out what the rwnd would be */
5221 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5222 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5223 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5224 	} else {
5225 		dif = 0;
5226 	}
5227 	if (dif >= rwnd_req) {
5228 		if (hold_rlock) {
5229 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5230 			r_unlocked = 1;
5231 		}
5232 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5233 			/*
5234 			 * One last check before we allow the guy possibly
5235 			 * to get in. There is a race, where the guy has not
5236 			 * reached the gate. In that case
5237 			 */
5238 			goto out;
5239 		}
5240 		SCTP_TCB_LOCK(stcb);
5241 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5242 			/* No reports here */
5243 			SCTP_TCB_UNLOCK(stcb);
5244 			goto out;
5245 		}
5246 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5247 		NET_EPOCH_ENTER(et);
5248 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5249 
5250 		sctp_chunk_output(stcb->sctp_ep, stcb,
5251 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5252 		/* make sure no timer is running */
5253 		NET_EPOCH_EXIT(et);
5254 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5255 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5256 		SCTP_TCB_UNLOCK(stcb);
5257 	} else {
5258 		/* Update how much we have pending */
5259 		stcb->freed_by_sorcv_sincelast = dif;
5260 	}
5261 out:
5262 	if (so && r_unlocked && hold_rlock) {
5263 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5264 	}
5265 
5266 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5267 no_lock:
5268 	atomic_add_int(&stcb->asoc.refcnt, -1);
5269 	return;
5270 }
5271 
5272 int
5273 sctp_sorecvmsg(struct socket *so,
5274     struct uio *uio,
5275     struct mbuf **mp,
5276     struct sockaddr *from,
5277     int fromlen,
5278     int *msg_flags,
5279     struct sctp_sndrcvinfo *sinfo,
5280     int filling_sinfo)
5281 {
5282 	/*
5283 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5284 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5285 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5286 	 * On the way out we may send out any combination of:
5287 	 * MSG_NOTIFICATION MSG_EOR
5288 	 *
5289 	 */
5290 	struct sctp_inpcb *inp = NULL;
5291 	ssize_t my_len = 0;
5292 	ssize_t cp_len = 0;
5293 	int error = 0;
5294 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5295 	struct mbuf *m = NULL;
5296 	struct sctp_tcb *stcb = NULL;
5297 	int wakeup_read_socket = 0;
5298 	int freecnt_applied = 0;
5299 	int out_flags = 0, in_flags = 0;
5300 	int block_allowed = 1;
5301 	uint32_t freed_so_far = 0;
5302 	ssize_t copied_so_far = 0;
5303 	int in_eeor_mode = 0;
5304 	int no_rcv_needed = 0;
5305 	uint32_t rwnd_req = 0;
5306 	int hold_sblock = 0;
5307 	int hold_rlock = 0;
5308 	ssize_t slen = 0;
5309 	uint32_t held_length = 0;
5310 	int sockbuf_lock = 0;
5311 
5312 	if (uio == NULL) {
5313 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5314 		return (EINVAL);
5315 	}
5316 
5317 	if (msg_flags) {
5318 		in_flags = *msg_flags;
5319 		if (in_flags & MSG_PEEK)
5320 			SCTP_STAT_INCR(sctps_read_peeks);
5321 	} else {
5322 		in_flags = 0;
5323 	}
5324 	slen = uio->uio_resid;
5325 
5326 	/* Pull in and set up our int flags */
5327 	if (in_flags & MSG_OOB) {
5328 		/* Out of band's NOT supported */
5329 		return (EOPNOTSUPP);
5330 	}
5331 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5332 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5333 		return (EINVAL);
5334 	}
5335 	if ((in_flags & (MSG_DONTWAIT
5336 	    | MSG_NBIO
5337 	    )) ||
5338 	    SCTP_SO_IS_NBIO(so)) {
5339 		block_allowed = 0;
5340 	}
5341 	/* setup the endpoint */
5342 	inp = (struct sctp_inpcb *)so->so_pcb;
5343 	if (inp == NULL) {
5344 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5345 		return (EFAULT);
5346 	}
5347 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5348 	/* Must be at least a MTU's worth */
5349 	if (rwnd_req < SCTP_MIN_RWND)
5350 		rwnd_req = SCTP_MIN_RWND;
5351 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5352 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5353 		sctp_misc_ints(SCTP_SORECV_ENTER,
5354 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5355 	}
5356 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5357 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5358 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5359 	}
5360 
5361 
5362 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5363 	if (error) {
5364 		goto release_unlocked;
5365 	}
5366 	sockbuf_lock = 1;
5367 restart:
5368 
5369 
5370 restart_nosblocks:
5371 	if (hold_sblock == 0) {
5372 		SOCKBUF_LOCK(&so->so_rcv);
5373 		hold_sblock = 1;
5374 	}
5375 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5376 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5377 		goto out;
5378 	}
5379 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5380 		if (so->so_error) {
5381 			error = so->so_error;
5382 			if ((in_flags & MSG_PEEK) == 0)
5383 				so->so_error = 0;
5384 			goto out;
5385 		} else {
5386 			if (so->so_rcv.sb_cc == 0) {
5387 				/* indicate EOF */
5388 				error = 0;
5389 				goto out;
5390 			}
5391 		}
5392 	}
5393 	if (so->so_rcv.sb_cc <= held_length) {
5394 		if (so->so_error) {
5395 			error = so->so_error;
5396 			if ((in_flags & MSG_PEEK) == 0) {
5397 				so->so_error = 0;
5398 			}
5399 			goto out;
5400 		}
5401 		if ((so->so_rcv.sb_cc == 0) &&
5402 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5403 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5404 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5405 				/*
5406 				 * For active open side clear flags for
5407 				 * re-use passive open is blocked by
5408 				 * connect.
5409 				 */
5410 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5411 					/*
5412 					 * You were aborted, passive side
5413 					 * always hits here
5414 					 */
5415 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5416 					error = ECONNRESET;
5417 				}
5418 				so->so_state &= ~(SS_ISCONNECTING |
5419 				    SS_ISDISCONNECTING |
5420 				    SS_ISCONFIRMING |
5421 				    SS_ISCONNECTED);
5422 				if (error == 0) {
5423 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5424 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5425 						error = ENOTCONN;
5426 					}
5427 				}
5428 				goto out;
5429 			}
5430 		}
5431 		if (block_allowed) {
5432 			error = sbwait(&so->so_rcv);
5433 			if (error) {
5434 				goto out;
5435 			}
5436 			held_length = 0;
5437 			goto restart_nosblocks;
5438 		} else {
5439 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5440 			error = EWOULDBLOCK;
5441 			goto out;
5442 		}
5443 	}
5444 	if (hold_sblock == 1) {
5445 		SOCKBUF_UNLOCK(&so->so_rcv);
5446 		hold_sblock = 0;
5447 	}
5448 	/* we possibly have data we can read */
5449 	/* sa_ignore FREED_MEMORY */
5450 	control = TAILQ_FIRST(&inp->read_queue);
5451 	if (control == NULL) {
5452 		/*
5453 		 * This could be happening since the appender did the
5454 		 * increment but as not yet did the tailq insert onto the
5455 		 * read_queue
5456 		 */
5457 		if (hold_rlock == 0) {
5458 			SCTP_INP_READ_LOCK(inp);
5459 		}
5460 		control = TAILQ_FIRST(&inp->read_queue);
5461 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5462 #ifdef INVARIANTS
5463 			panic("Huh, its non zero and nothing on control?");
5464 #endif
5465 			so->so_rcv.sb_cc = 0;
5466 		}
5467 		SCTP_INP_READ_UNLOCK(inp);
5468 		hold_rlock = 0;
5469 		goto restart;
5470 	}
5471 
5472 	if ((control->length == 0) &&
5473 	    (control->do_not_ref_stcb)) {
5474 		/*
5475 		 * Clean up code for freeing assoc that left behind a
5476 		 * pdapi.. maybe a peer in EEOR that just closed after
5477 		 * sending and never indicated a EOR.
5478 		 */
5479 		if (hold_rlock == 0) {
5480 			hold_rlock = 1;
5481 			SCTP_INP_READ_LOCK(inp);
5482 		}
5483 		control->held_length = 0;
5484 		if (control->data) {
5485 			/* Hmm there is data here .. fix */
5486 			struct mbuf *m_tmp;
5487 			int cnt = 0;
5488 
5489 			m_tmp = control->data;
5490 			while (m_tmp) {
5491 				cnt += SCTP_BUF_LEN(m_tmp);
5492 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5493 					control->tail_mbuf = m_tmp;
5494 					control->end_added = 1;
5495 				}
5496 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5497 			}
5498 			control->length = cnt;
5499 		} else {
5500 			/* remove it */
5501 			TAILQ_REMOVE(&inp->read_queue, control, next);
5502 			/* Add back any hiddend data */
5503 			sctp_free_remote_addr(control->whoFrom);
5504 			sctp_free_a_readq(stcb, control);
5505 		}
5506 		if (hold_rlock) {
5507 			hold_rlock = 0;
5508 			SCTP_INP_READ_UNLOCK(inp);
5509 		}
5510 		goto restart;
5511 	}
5512 	if ((control->length == 0) &&
5513 	    (control->end_added == 1)) {
5514 		/*
5515 		 * Do we also need to check for (control->pdapi_aborted ==
5516 		 * 1)?
5517 		 */
5518 		if (hold_rlock == 0) {
5519 			hold_rlock = 1;
5520 			SCTP_INP_READ_LOCK(inp);
5521 		}
5522 		TAILQ_REMOVE(&inp->read_queue, control, next);
5523 		if (control->data) {
5524 #ifdef INVARIANTS
5525 			panic("control->data not null but control->length == 0");
5526 #else
5527 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5528 			sctp_m_freem(control->data);
5529 			control->data = NULL;
5530 #endif
5531 		}
5532 		if (control->aux_data) {
5533 			sctp_m_free(control->aux_data);
5534 			control->aux_data = NULL;
5535 		}
5536 #ifdef INVARIANTS
5537 		if (control->on_strm_q) {
5538 			panic("About to free ctl:%p so:%p and its in %d",
5539 			    control, so, control->on_strm_q);
5540 		}
5541 #endif
5542 		sctp_free_remote_addr(control->whoFrom);
5543 		sctp_free_a_readq(stcb, control);
5544 		if (hold_rlock) {
5545 			hold_rlock = 0;
5546 			SCTP_INP_READ_UNLOCK(inp);
5547 		}
5548 		goto restart;
5549 	}
5550 	if (control->length == 0) {
5551 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5552 		    (filling_sinfo)) {
5553 			/* find a more suitable one then this */
5554 			ctl = TAILQ_NEXT(control, next);
5555 			while (ctl) {
5556 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5557 				    (ctl->some_taken ||
5558 				    (ctl->spec_flags & M_NOTIFICATION) ||
5559 				    ((ctl->do_not_ref_stcb == 0) &&
5560 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5561 				    ) {
5562 					/*-
5563 					 * If we have a different TCB next, and there is data
5564 					 * present. If we have already taken some (pdapi), OR we can
5565 					 * ref the tcb and no delivery as started on this stream, we
5566 					 * take it. Note we allow a notification on a different
5567 					 * assoc to be delivered..
5568 					 */
5569 					control = ctl;
5570 					goto found_one;
5571 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5572 					    (ctl->length) &&
5573 					    ((ctl->some_taken) ||
5574 					    ((ctl->do_not_ref_stcb == 0) &&
5575 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5576 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5577 					/*-
5578 					 * If we have the same tcb, and there is data present, and we
5579 					 * have the strm interleave feature present. Then if we have
5580 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5581 					 * not started a delivery for this stream, we can take it.
5582 					 * Note we do NOT allow a notificaiton on the same assoc to
5583 					 * be delivered.
5584 					 */
5585 					control = ctl;
5586 					goto found_one;
5587 				}
5588 				ctl = TAILQ_NEXT(ctl, next);
5589 			}
5590 		}
5591 		/*
5592 		 * if we reach here, not suitable replacement is available
5593 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5594 		 * into the our held count, and its time to sleep again.
5595 		 */
5596 		held_length = so->so_rcv.sb_cc;
5597 		control->held_length = so->so_rcv.sb_cc;
5598 		goto restart;
5599 	}
5600 	/* Clear the held length since there is something to read */
5601 	control->held_length = 0;
5602 found_one:
5603 	/*
5604 	 * If we reach here, control has a some data for us to read off.
5605 	 * Note that stcb COULD be NULL.
5606 	 */
5607 	if (hold_rlock == 0) {
5608 		hold_rlock = 1;
5609 		SCTP_INP_READ_LOCK(inp);
5610 	}
5611 	control->some_taken++;
5612 	stcb = control->stcb;
5613 	if (stcb) {
5614 		if ((control->do_not_ref_stcb == 0) &&
5615 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5616 			if (freecnt_applied == 0)
5617 				stcb = NULL;
5618 		} else if (control->do_not_ref_stcb == 0) {
5619 			/* you can't free it on me please */
5620 			/*
5621 			 * The lock on the socket buffer protects us so the
5622 			 * free code will stop. But since we used the
5623 			 * socketbuf lock and the sender uses the tcb_lock
5624 			 * to increment, we need to use the atomic add to
5625 			 * the refcnt
5626 			 */
5627 			if (freecnt_applied) {
5628 #ifdef INVARIANTS
5629 				panic("refcnt already incremented");
5630 #else
5631 				SCTP_PRINTF("refcnt already incremented?\n");
5632 #endif
5633 			} else {
5634 				atomic_add_int(&stcb->asoc.refcnt, 1);
5635 				freecnt_applied = 1;
5636 			}
5637 			/*
5638 			 * Setup to remember how much we have not yet told
5639 			 * the peer our rwnd has opened up. Note we grab the
5640 			 * value from the tcb from last time. Note too that
5641 			 * sack sending clears this when a sack is sent,
5642 			 * which is fine. Once we hit the rwnd_req, we then
5643 			 * will go to the sctp_user_rcvd() that will not
5644 			 * lock until it KNOWs it MUST send a WUP-SACK.
5645 			 */
5646 			freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast;
5647 			stcb->freed_by_sorcv_sincelast = 0;
5648 		}
5649 	}
5650 	if (stcb &&
5651 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5652 	    control->do_not_ref_stcb == 0) {
5653 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5654 	}
5655 
5656 	/* First lets get off the sinfo and sockaddr info */
5657 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5658 		sinfo->sinfo_stream = control->sinfo_stream;
5659 		sinfo->sinfo_ssn = (uint16_t)control->mid;
5660 		sinfo->sinfo_flags = control->sinfo_flags;
5661 		sinfo->sinfo_ppid = control->sinfo_ppid;
5662 		sinfo->sinfo_context = control->sinfo_context;
5663 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5664 		sinfo->sinfo_tsn = control->sinfo_tsn;
5665 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5666 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5667 		nxt = TAILQ_NEXT(control, next);
5668 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5669 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5670 			struct sctp_extrcvinfo *s_extra;
5671 
5672 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5673 			if ((nxt) &&
5674 			    (nxt->length)) {
5675 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5676 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5677 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5678 				}
5679 				if (nxt->spec_flags & M_NOTIFICATION) {
5680 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5681 				}
5682 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5683 				s_extra->serinfo_next_length = nxt->length;
5684 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5685 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5686 				if (nxt->tail_mbuf != NULL) {
5687 					if (nxt->end_added) {
5688 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5689 					}
5690 				}
5691 			} else {
5692 				/*
5693 				 * we explicitly 0 this, since the memcpy
5694 				 * got some other things beyond the older
5695 				 * sinfo_ that is on the control's structure
5696 				 * :-D
5697 				 */
5698 				nxt = NULL;
5699 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5700 				s_extra->serinfo_next_aid = 0;
5701 				s_extra->serinfo_next_length = 0;
5702 				s_extra->serinfo_next_ppid = 0;
5703 				s_extra->serinfo_next_stream = 0;
5704 			}
5705 		}
5706 		/*
5707 		 * update off the real current cum-ack, if we have an stcb.
5708 		 */
5709 		if ((control->do_not_ref_stcb == 0) && stcb)
5710 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5711 		/*
5712 		 * mask off the high bits, we keep the actual chunk bits in
5713 		 * there.
5714 		 */
5715 		sinfo->sinfo_flags &= 0x00ff;
5716 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5717 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5718 		}
5719 	}
5720 #ifdef SCTP_ASOCLOG_OF_TSNS
5721 	{
5722 		int index, newindex;
5723 		struct sctp_pcbtsn_rlog *entry;
5724 
5725 		do {
5726 			index = inp->readlog_index;
5727 			newindex = index + 1;
5728 			if (newindex >= SCTP_READ_LOG_SIZE) {
5729 				newindex = 0;
5730 			}
5731 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5732 		entry = &inp->readlog[index];
5733 		entry->vtag = control->sinfo_assoc_id;
5734 		entry->strm = control->sinfo_stream;
5735 		entry->seq = (uint16_t)control->mid;
5736 		entry->sz = control->length;
5737 		entry->flgs = control->sinfo_flags;
5738 	}
5739 #endif
5740 	if ((fromlen > 0) && (from != NULL)) {
5741 		union sctp_sockstore store;
5742 		size_t len;
5743 
5744 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5745 #ifdef INET6
5746 		case AF_INET6:
5747 			len = sizeof(struct sockaddr_in6);
5748 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5749 			store.sin6.sin6_port = control->port_from;
5750 			break;
5751 #endif
5752 #ifdef INET
5753 		case AF_INET:
5754 #ifdef INET6
5755 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5756 				len = sizeof(struct sockaddr_in6);
5757 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5758 				    &store.sin6);
5759 				store.sin6.sin6_port = control->port_from;
5760 			} else {
5761 				len = sizeof(struct sockaddr_in);
5762 				store.sin = control->whoFrom->ro._l_addr.sin;
5763 				store.sin.sin_port = control->port_from;
5764 			}
5765 #else
5766 			len = sizeof(struct sockaddr_in);
5767 			store.sin = control->whoFrom->ro._l_addr.sin;
5768 			store.sin.sin_port = control->port_from;
5769 #endif
5770 			break;
5771 #endif
5772 		default:
5773 			len = 0;
5774 			break;
5775 		}
5776 		memcpy(from, &store, min((size_t)fromlen, len));
5777 #ifdef INET6
5778 		{
5779 			struct sockaddr_in6 lsa6, *from6;
5780 
5781 			from6 = (struct sockaddr_in6 *)from;
5782 			sctp_recover_scope_mac(from6, (&lsa6));
5783 		}
5784 #endif
5785 	}
5786 	if (hold_rlock) {
5787 		SCTP_INP_READ_UNLOCK(inp);
5788 		hold_rlock = 0;
5789 	}
5790 	if (hold_sblock) {
5791 		SOCKBUF_UNLOCK(&so->so_rcv);
5792 		hold_sblock = 0;
5793 	}
5794 	/* now copy out what data we can */
5795 	if (mp == NULL) {
5796 		/* copy out each mbuf in the chain up to length */
5797 get_more_data:
5798 		m = control->data;
5799 		while (m) {
5800 			/* Move out all we can */
5801 			cp_len = uio->uio_resid;
5802 			my_len = SCTP_BUF_LEN(m);
5803 			if (cp_len > my_len) {
5804 				/* not enough in this buf */
5805 				cp_len = my_len;
5806 			}
5807 			if (hold_rlock) {
5808 				SCTP_INP_READ_UNLOCK(inp);
5809 				hold_rlock = 0;
5810 			}
5811 			if (cp_len > 0)
5812 				error = uiomove(mtod(m, char *), (int)cp_len, uio);
5813 			/* re-read */
5814 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5815 				goto release;
5816 			}
5817 
5818 			if ((control->do_not_ref_stcb == 0) && stcb &&
5819 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5820 				no_rcv_needed = 1;
5821 			}
5822 			if (error) {
5823 				/* error we are out of here */
5824 				goto release;
5825 			}
5826 			SCTP_INP_READ_LOCK(inp);
5827 			hold_rlock = 1;
5828 			if (cp_len == SCTP_BUF_LEN(m)) {
5829 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5830 				    (control->end_added)) {
5831 					out_flags |= MSG_EOR;
5832 					if ((control->do_not_ref_stcb == 0) &&
5833 					    (control->stcb != NULL) &&
5834 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5835 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5836 				}
5837 				if (control->spec_flags & M_NOTIFICATION) {
5838 					out_flags |= MSG_NOTIFICATION;
5839 				}
5840 				/* we ate up the mbuf */
5841 				if (in_flags & MSG_PEEK) {
5842 					/* just looking */
5843 					m = SCTP_BUF_NEXT(m);
5844 					copied_so_far += cp_len;
5845 				} else {
5846 					/* dispose of the mbuf */
5847 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5848 						sctp_sblog(&so->so_rcv,
5849 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5850 					}
5851 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5852 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5853 						sctp_sblog(&so->so_rcv,
5854 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5855 					}
5856 					copied_so_far += cp_len;
5857 					freed_so_far += (uint32_t)cp_len;
5858 					freed_so_far += MSIZE;
5859 					atomic_subtract_int(&control->length, cp_len);
5860 					control->data = sctp_m_free(m);
5861 					m = control->data;
5862 					/*
5863 					 * been through it all, must hold sb
5864 					 * lock ok to null tail
5865 					 */
5866 					if (control->data == NULL) {
5867 #ifdef INVARIANTS
5868 						if ((control->end_added == 0) ||
5869 						    (TAILQ_NEXT(control, next) == NULL)) {
5870 							/*
5871 							 * If the end is not
5872 							 * added, OR the
5873 							 * next is NOT null
5874 							 * we MUST have the
5875 							 * lock.
5876 							 */
5877 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5878 								panic("Hmm we don't own the lock?");
5879 							}
5880 						}
5881 #endif
5882 						control->tail_mbuf = NULL;
5883 #ifdef INVARIANTS
5884 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5885 							panic("end_added, nothing left and no MSG_EOR");
5886 						}
5887 #endif
5888 					}
5889 				}
5890 			} else {
5891 				/* Do we need to trim the mbuf? */
5892 				if (control->spec_flags & M_NOTIFICATION) {
5893 					out_flags |= MSG_NOTIFICATION;
5894 				}
5895 				if ((in_flags & MSG_PEEK) == 0) {
5896 					SCTP_BUF_RESV_UF(m, cp_len);
5897 					SCTP_BUF_LEN(m) -= (int)cp_len;
5898 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5899 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len);
5900 					}
5901 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5902 					if ((control->do_not_ref_stcb == 0) &&
5903 					    stcb) {
5904 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5905 					}
5906 					copied_so_far += cp_len;
5907 					freed_so_far += (uint32_t)cp_len;
5908 					freed_so_far += MSIZE;
5909 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5910 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5911 						    SCTP_LOG_SBRESULT, 0);
5912 					}
5913 					atomic_subtract_int(&control->length, cp_len);
5914 				} else {
5915 					copied_so_far += cp_len;
5916 				}
5917 			}
5918 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5919 				break;
5920 			}
5921 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5922 			    (control->do_not_ref_stcb == 0) &&
5923 			    (freed_so_far >= rwnd_req)) {
5924 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5925 			}
5926 		}		/* end while(m) */
5927 		/*
5928 		 * At this point we have looked at it all and we either have
5929 		 * a MSG_EOR/or read all the user wants... <OR>
5930 		 * control->length == 0.
5931 		 */
5932 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5933 			/* we are done with this control */
5934 			if (control->length == 0) {
5935 				if (control->data) {
5936 #ifdef INVARIANTS
5937 					panic("control->data not null at read eor?");
5938 #else
5939 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5940 					sctp_m_freem(control->data);
5941 					control->data = NULL;
5942 #endif
5943 				}
5944 		done_with_control:
5945 				if (hold_rlock == 0) {
5946 					SCTP_INP_READ_LOCK(inp);
5947 					hold_rlock = 1;
5948 				}
5949 				TAILQ_REMOVE(&inp->read_queue, control, next);
5950 				/* Add back any hiddend data */
5951 				if (control->held_length) {
5952 					held_length = 0;
5953 					control->held_length = 0;
5954 					wakeup_read_socket = 1;
5955 				}
5956 				if (control->aux_data) {
5957 					sctp_m_free(control->aux_data);
5958 					control->aux_data = NULL;
5959 				}
5960 				no_rcv_needed = control->do_not_ref_stcb;
5961 				sctp_free_remote_addr(control->whoFrom);
5962 				control->data = NULL;
5963 #ifdef INVARIANTS
5964 				if (control->on_strm_q) {
5965 					panic("About to free ctl:%p so:%p and its in %d",
5966 					    control, so, control->on_strm_q);
5967 				}
5968 #endif
5969 				sctp_free_a_readq(stcb, control);
5970 				control = NULL;
5971 				if ((freed_so_far >= rwnd_req) &&
5972 				    (no_rcv_needed == 0))
5973 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5974 
5975 			} else {
5976 				/*
5977 				 * The user did not read all of this
5978 				 * message, turn off the returned MSG_EOR
5979 				 * since we are leaving more behind on the
5980 				 * control to read.
5981 				 */
5982 #ifdef INVARIANTS
5983 				if (control->end_added &&
5984 				    (control->data == NULL) &&
5985 				    (control->tail_mbuf == NULL)) {
5986 					panic("Gak, control->length is corrupt?");
5987 				}
5988 #endif
5989 				no_rcv_needed = control->do_not_ref_stcb;
5990 				out_flags &= ~MSG_EOR;
5991 			}
5992 		}
5993 		if (out_flags & MSG_EOR) {
5994 			goto release;
5995 		}
5996 		if ((uio->uio_resid == 0) ||
5997 		    ((in_eeor_mode) &&
5998 		    (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) {
5999 			goto release;
6000 		}
6001 		/*
6002 		 * If I hit here the receiver wants more and this message is
6003 		 * NOT done (pd-api). So two questions. Can we block? if not
6004 		 * we are done. Did the user NOT set MSG_WAITALL?
6005 		 */
6006 		if (block_allowed == 0) {
6007 			goto release;
6008 		}
6009 		/*
6010 		 * We need to wait for more data a few things: - We don't
6011 		 * sbunlock() so we don't get someone else reading. - We
6012 		 * must be sure to account for the case where what is added
6013 		 * is NOT to our control when we wakeup.
6014 		 */
6015 
6016 		/*
6017 		 * Do we need to tell the transport a rwnd update might be
6018 		 * needed before we go to sleep?
6019 		 */
6020 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6021 		    ((freed_so_far >= rwnd_req) &&
6022 		    (control->do_not_ref_stcb == 0) &&
6023 		    (no_rcv_needed == 0))) {
6024 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6025 		}
6026 wait_some_more:
6027 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6028 			goto release;
6029 		}
6030 
6031 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6032 			goto release;
6033 
6034 		if (hold_rlock == 1) {
6035 			SCTP_INP_READ_UNLOCK(inp);
6036 			hold_rlock = 0;
6037 		}
6038 		if (hold_sblock == 0) {
6039 			SOCKBUF_LOCK(&so->so_rcv);
6040 			hold_sblock = 1;
6041 		}
6042 		if ((copied_so_far) && (control->length == 0) &&
6043 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6044 			goto release;
6045 		}
6046 		if (so->so_rcv.sb_cc <= control->held_length) {
6047 			error = sbwait(&so->so_rcv);
6048 			if (error) {
6049 				goto release;
6050 			}
6051 			control->held_length = 0;
6052 		}
6053 		if (hold_sblock) {
6054 			SOCKBUF_UNLOCK(&so->so_rcv);
6055 			hold_sblock = 0;
6056 		}
6057 		if (control->length == 0) {
6058 			/* still nothing here */
6059 			if (control->end_added == 1) {
6060 				/* he aborted, or is done i.e.did a shutdown */
6061 				out_flags |= MSG_EOR;
6062 				if (control->pdapi_aborted) {
6063 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6064 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6065 
6066 					out_flags |= MSG_TRUNC;
6067 				} else {
6068 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6069 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6070 				}
6071 				goto done_with_control;
6072 			}
6073 			if (so->so_rcv.sb_cc > held_length) {
6074 				control->held_length = so->so_rcv.sb_cc;
6075 				held_length = 0;
6076 			}
6077 			goto wait_some_more;
6078 		} else if (control->data == NULL) {
6079 			/*
6080 			 * we must re-sync since data is probably being
6081 			 * added
6082 			 */
6083 			SCTP_INP_READ_LOCK(inp);
6084 			if ((control->length > 0) && (control->data == NULL)) {
6085 				/*
6086 				 * big trouble.. we have the lock and its
6087 				 * corrupt?
6088 				 */
6089 #ifdef INVARIANTS
6090 				panic("Impossible data==NULL length !=0");
6091 #endif
6092 				out_flags |= MSG_EOR;
6093 				out_flags |= MSG_TRUNC;
6094 				control->length = 0;
6095 				SCTP_INP_READ_UNLOCK(inp);
6096 				goto done_with_control;
6097 			}
6098 			SCTP_INP_READ_UNLOCK(inp);
6099 			/* We will fall around to get more data */
6100 		}
6101 		goto get_more_data;
6102 	} else {
6103 		/*-
6104 		 * Give caller back the mbuf chain,
6105 		 * store in uio_resid the length
6106 		 */
6107 		wakeup_read_socket = 0;
6108 		if ((control->end_added == 0) ||
6109 		    (TAILQ_NEXT(control, next) == NULL)) {
6110 			/* Need to get rlock */
6111 			if (hold_rlock == 0) {
6112 				SCTP_INP_READ_LOCK(inp);
6113 				hold_rlock = 1;
6114 			}
6115 		}
6116 		if (control->end_added) {
6117 			out_flags |= MSG_EOR;
6118 			if ((control->do_not_ref_stcb == 0) &&
6119 			    (control->stcb != NULL) &&
6120 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6121 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6122 		}
6123 		if (control->spec_flags & M_NOTIFICATION) {
6124 			out_flags |= MSG_NOTIFICATION;
6125 		}
6126 		uio->uio_resid = control->length;
6127 		*mp = control->data;
6128 		m = control->data;
6129 		while (m) {
6130 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6131 				sctp_sblog(&so->so_rcv,
6132 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6133 			}
6134 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6135 			freed_so_far += (uint32_t)SCTP_BUF_LEN(m);
6136 			freed_so_far += MSIZE;
6137 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6138 				sctp_sblog(&so->so_rcv,
6139 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6140 			}
6141 			m = SCTP_BUF_NEXT(m);
6142 		}
6143 		control->data = control->tail_mbuf = NULL;
6144 		control->length = 0;
6145 		if (out_flags & MSG_EOR) {
6146 			/* Done with this control */
6147 			goto done_with_control;
6148 		}
6149 	}
6150 release:
6151 	if (hold_rlock == 1) {
6152 		SCTP_INP_READ_UNLOCK(inp);
6153 		hold_rlock = 0;
6154 	}
6155 	if (hold_sblock == 1) {
6156 		SOCKBUF_UNLOCK(&so->so_rcv);
6157 		hold_sblock = 0;
6158 	}
6159 
6160 	sbunlock(&so->so_rcv);
6161 	sockbuf_lock = 0;
6162 
6163 release_unlocked:
6164 	if (hold_sblock) {
6165 		SOCKBUF_UNLOCK(&so->so_rcv);
6166 		hold_sblock = 0;
6167 	}
6168 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6169 		if ((freed_so_far >= rwnd_req) &&
6170 		    (control && (control->do_not_ref_stcb == 0)) &&
6171 		    (no_rcv_needed == 0))
6172 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6173 	}
6174 out:
6175 	if (msg_flags) {
6176 		*msg_flags = out_flags;
6177 	}
6178 	if (((out_flags & MSG_EOR) == 0) &&
6179 	    ((in_flags & MSG_PEEK) == 0) &&
6180 	    (sinfo) &&
6181 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6182 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6183 		struct sctp_extrcvinfo *s_extra;
6184 
6185 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6186 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6187 	}
6188 	if (hold_rlock == 1) {
6189 		SCTP_INP_READ_UNLOCK(inp);
6190 	}
6191 	if (hold_sblock) {
6192 		SOCKBUF_UNLOCK(&so->so_rcv);
6193 	}
6194 	if (sockbuf_lock) {
6195 		sbunlock(&so->so_rcv);
6196 	}
6197 
6198 	if (freecnt_applied) {
6199 		/*
6200 		 * The lock on the socket buffer protects us so the free
6201 		 * code will stop. But since we used the socketbuf lock and
6202 		 * the sender uses the tcb_lock to increment, we need to use
6203 		 * the atomic add to the refcnt.
6204 		 */
6205 		if (stcb == NULL) {
6206 #ifdef INVARIANTS
6207 			panic("stcb for refcnt has gone NULL?");
6208 			goto stage_left;
6209 #else
6210 			goto stage_left;
6211 #endif
6212 		}
6213 		/* Save the value back for next time */
6214 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6215 		atomic_add_int(&stcb->asoc.refcnt, -1);
6216 	}
6217 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6218 		if (stcb) {
6219 			sctp_misc_ints(SCTP_SORECV_DONE,
6220 			    freed_so_far,
6221 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6222 			    stcb->asoc.my_rwnd,
6223 			    so->so_rcv.sb_cc);
6224 		} else {
6225 			sctp_misc_ints(SCTP_SORECV_DONE,
6226 			    freed_so_far,
6227 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6228 			    0,
6229 			    so->so_rcv.sb_cc);
6230 		}
6231 	}
6232 stage_left:
6233 	if (wakeup_read_socket) {
6234 		sctp_sorwakeup(inp, so);
6235 	}
6236 	return (error);
6237 }
6238 
6239 
6240 #ifdef SCTP_MBUF_LOGGING
6241 struct mbuf *
6242 sctp_m_free(struct mbuf *m)
6243 {
6244 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6245 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6246 	}
6247 	return (m_free(m));
6248 }
6249 
6250 void
6251 sctp_m_freem(struct mbuf *mb)
6252 {
6253 	while (mb != NULL)
6254 		mb = sctp_m_free(mb);
6255 }
6256 
6257 #endif
6258 
6259 int
6260 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6261 {
6262 	/*
6263 	 * Given a local address. For all associations that holds the
6264 	 * address, request a peer-set-primary.
6265 	 */
6266 	struct sctp_ifa *ifa;
6267 	struct sctp_laddr *wi;
6268 
6269 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6270 	if (ifa == NULL) {
6271 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6272 		return (EADDRNOTAVAIL);
6273 	}
6274 	/*
6275 	 * Now that we have the ifa we must awaken the iterator with this
6276 	 * message.
6277 	 */
6278 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6279 	if (wi == NULL) {
6280 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6281 		return (ENOMEM);
6282 	}
6283 	/* Now incr the count and int wi structure */
6284 	SCTP_INCR_LADDR_COUNT();
6285 	memset(wi, 0, sizeof(*wi));
6286 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6287 	wi->ifa = ifa;
6288 	wi->action = SCTP_SET_PRIM_ADDR;
6289 	atomic_add_int(&ifa->refcount, 1);
6290 
6291 	/* Now add it to the work queue */
6292 	SCTP_WQ_ADDR_LOCK();
6293 	/*
6294 	 * Should this really be a tailq? As it is we will process the
6295 	 * newest first :-0
6296 	 */
6297 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6298 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6299 	    (struct sctp_inpcb *)NULL,
6300 	    (struct sctp_tcb *)NULL,
6301 	    (struct sctp_nets *)NULL);
6302 	SCTP_WQ_ADDR_UNLOCK();
6303 	return (0);
6304 }
6305 
6306 
6307 int
6308 sctp_soreceive(struct socket *so,
6309     struct sockaddr **psa,
6310     struct uio *uio,
6311     struct mbuf **mp0,
6312     struct mbuf **controlp,
6313     int *flagsp)
6314 {
6315 	int error, fromlen;
6316 	uint8_t sockbuf[256];
6317 	struct sockaddr *from;
6318 	struct sctp_extrcvinfo sinfo;
6319 	int filling_sinfo = 1;
6320 	int flags;
6321 	struct sctp_inpcb *inp;
6322 
6323 	inp = (struct sctp_inpcb *)so->so_pcb;
6324 	/* pickup the assoc we are reading from */
6325 	if (inp == NULL) {
6326 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6327 		return (EINVAL);
6328 	}
6329 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6330 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6331 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6332 	    (controlp == NULL)) {
6333 		/* user does not want the sndrcv ctl */
6334 		filling_sinfo = 0;
6335 	}
6336 	if (psa) {
6337 		from = (struct sockaddr *)sockbuf;
6338 		fromlen = sizeof(sockbuf);
6339 		from->sa_len = 0;
6340 	} else {
6341 		from = NULL;
6342 		fromlen = 0;
6343 	}
6344 
6345 	if (filling_sinfo) {
6346 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6347 	}
6348 	if (flagsp != NULL) {
6349 		flags = *flagsp;
6350 	} else {
6351 		flags = 0;
6352 	}
6353 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags,
6354 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6355 	if (flagsp != NULL) {
6356 		*flagsp = flags;
6357 	}
6358 	if (controlp != NULL) {
6359 		/* copy back the sinfo in a CMSG format */
6360 		if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) {
6361 			*controlp = sctp_build_ctl_nchunk(inp,
6362 			    (struct sctp_sndrcvinfo *)&sinfo);
6363 		} else {
6364 			*controlp = NULL;
6365 		}
6366 	}
6367 	if (psa) {
6368 		/* copy back the address info */
6369 		if (from && from->sa_len) {
6370 			*psa = sodupsockaddr(from, M_NOWAIT);
6371 		} else {
6372 			*psa = NULL;
6373 		}
6374 	}
6375 	return (error);
6376 }
6377 
6378 
6379 
6380 
6381 
6382 int
6383 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6384     int totaddr, int *error)
6385 {
6386 	int added = 0;
6387 	int i;
6388 	struct sctp_inpcb *inp;
6389 	struct sockaddr *sa;
6390 	size_t incr = 0;
6391 #ifdef INET
6392 	struct sockaddr_in *sin;
6393 #endif
6394 #ifdef INET6
6395 	struct sockaddr_in6 *sin6;
6396 #endif
6397 
6398 	sa = addr;
6399 	inp = stcb->sctp_ep;
6400 	*error = 0;
6401 	for (i = 0; i < totaddr; i++) {
6402 		switch (sa->sa_family) {
6403 #ifdef INET
6404 		case AF_INET:
6405 			incr = sizeof(struct sockaddr_in);
6406 			sin = (struct sockaddr_in *)sa;
6407 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6408 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6409 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6410 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6411 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6412 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6413 				*error = EINVAL;
6414 				goto out_now;
6415 			}
6416 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6417 			    SCTP_DONOT_SETSCOPE,
6418 			    SCTP_ADDR_IS_CONFIRMED)) {
6419 				/* assoc gone no un-lock */
6420 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6421 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6422 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6423 				*error = ENOBUFS;
6424 				goto out_now;
6425 			}
6426 			added++;
6427 			break;
6428 #endif
6429 #ifdef INET6
6430 		case AF_INET6:
6431 			incr = sizeof(struct sockaddr_in6);
6432 			sin6 = (struct sockaddr_in6 *)sa;
6433 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6434 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6435 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6436 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6437 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6438 				*error = EINVAL;
6439 				goto out_now;
6440 			}
6441 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6442 			    SCTP_DONOT_SETSCOPE,
6443 			    SCTP_ADDR_IS_CONFIRMED)) {
6444 				/* assoc gone no un-lock */
6445 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6446 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6447 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6448 				*error = ENOBUFS;
6449 				goto out_now;
6450 			}
6451 			added++;
6452 			break;
6453 #endif
6454 		default:
6455 			break;
6456 		}
6457 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6458 	}
6459 out_now:
6460 	return (added);
6461 }
6462 
6463 int
6464 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6465     unsigned int totaddr,
6466     unsigned int *num_v4, unsigned int *num_v6,
6467     unsigned int limit)
6468 {
6469 	struct sockaddr *sa;
6470 	struct sctp_tcb *stcb;
6471 	unsigned int incr, at, i;
6472 
6473 	at = 0;
6474 	sa = addr;
6475 	*num_v6 = *num_v4 = 0;
6476 	/* account and validate addresses */
6477 	if (totaddr == 0) {
6478 		return (EINVAL);
6479 	}
6480 	for (i = 0; i < totaddr; i++) {
6481 		if (at + sizeof(struct sockaddr) > limit) {
6482 			return (EINVAL);
6483 		}
6484 		switch (sa->sa_family) {
6485 #ifdef INET
6486 		case AF_INET:
6487 			incr = (unsigned int)sizeof(struct sockaddr_in);
6488 			if (sa->sa_len != incr) {
6489 				return (EINVAL);
6490 			}
6491 			(*num_v4) += 1;
6492 			break;
6493 #endif
6494 #ifdef INET6
6495 		case AF_INET6:
6496 			{
6497 				struct sockaddr_in6 *sin6;
6498 
6499 				sin6 = (struct sockaddr_in6 *)sa;
6500 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6501 					/* Must be non-mapped for connectx */
6502 					return (EINVAL);
6503 				}
6504 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6505 				if (sa->sa_len != incr) {
6506 					return (EINVAL);
6507 				}
6508 				(*num_v6) += 1;
6509 				break;
6510 			}
6511 #endif
6512 		default:
6513 			return (EINVAL);
6514 		}
6515 		if ((at + incr) > limit) {
6516 			return (EINVAL);
6517 		}
6518 		SCTP_INP_INCR_REF(inp);
6519 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6520 		if (stcb != NULL) {
6521 			SCTP_TCB_UNLOCK(stcb);
6522 			return (EALREADY);
6523 		} else {
6524 			SCTP_INP_DECR_REF(inp);
6525 		}
6526 		at += incr;
6527 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6528 	}
6529 	return (0);
6530 }
6531 
6532 /*
6533  * sctp_bindx(ADD) for one address.
6534  * assumes all arguments are valid/checked by caller.
6535  */
6536 void
6537 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6538     struct sockaddr *sa, sctp_assoc_t assoc_id,
6539     uint32_t vrf_id, int *error, void *p)
6540 {
6541 	struct sockaddr *addr_touse;
6542 #if defined(INET) && defined(INET6)
6543 	struct sockaddr_in sin;
6544 #endif
6545 
6546 	/* see if we're bound all already! */
6547 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6548 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6549 		*error = EINVAL;
6550 		return;
6551 	}
6552 	addr_touse = sa;
6553 #ifdef INET6
6554 	if (sa->sa_family == AF_INET6) {
6555 #ifdef INET
6556 		struct sockaddr_in6 *sin6;
6557 
6558 #endif
6559 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6560 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6561 			*error = EINVAL;
6562 			return;
6563 		}
6564 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6565 			/* can only bind v6 on PF_INET6 sockets */
6566 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6567 			*error = EINVAL;
6568 			return;
6569 		}
6570 #ifdef INET
6571 		sin6 = (struct sockaddr_in6 *)addr_touse;
6572 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6573 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6574 			    SCTP_IPV6_V6ONLY(inp)) {
6575 				/* can't bind v4-mapped on PF_INET sockets */
6576 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6577 				*error = EINVAL;
6578 				return;
6579 			}
6580 			in6_sin6_2_sin(&sin, sin6);
6581 			addr_touse = (struct sockaddr *)&sin;
6582 		}
6583 #endif
6584 	}
6585 #endif
6586 #ifdef INET
6587 	if (sa->sa_family == AF_INET) {
6588 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6589 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6590 			*error = EINVAL;
6591 			return;
6592 		}
6593 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6594 		    SCTP_IPV6_V6ONLY(inp)) {
6595 			/* can't bind v4 on PF_INET sockets */
6596 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6597 			*error = EINVAL;
6598 			return;
6599 		}
6600 	}
6601 #endif
6602 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6603 		if (p == NULL) {
6604 			/* Can't get proc for Net/Open BSD */
6605 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6606 			*error = EINVAL;
6607 			return;
6608 		}
6609 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6610 		return;
6611 	}
6612 	/*
6613 	 * No locks required here since bind and mgmt_ep_sa all do their own
6614 	 * locking. If we do something for the FIX: below we may need to
6615 	 * lock in that case.
6616 	 */
6617 	if (assoc_id == 0) {
6618 		/* add the address */
6619 		struct sctp_inpcb *lep;
6620 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6621 
6622 		/* validate the incoming port */
6623 		if ((lsin->sin_port != 0) &&
6624 		    (lsin->sin_port != inp->sctp_lport)) {
6625 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6626 			*error = EINVAL;
6627 			return;
6628 		} else {
6629 			/* user specified 0 port, set it to existing port */
6630 			lsin->sin_port = inp->sctp_lport;
6631 		}
6632 
6633 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6634 		if (lep != NULL) {
6635 			/*
6636 			 * We must decrement the refcount since we have the
6637 			 * ep already and are binding. No remove going on
6638 			 * here.
6639 			 */
6640 			SCTP_INP_DECR_REF(lep);
6641 		}
6642 		if (lep == inp) {
6643 			/* already bound to it.. ok */
6644 			return;
6645 		} else if (lep == NULL) {
6646 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6647 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6648 			    SCTP_ADD_IP_ADDRESS,
6649 			    vrf_id, NULL);
6650 		} else {
6651 			*error = EADDRINUSE;
6652 		}
6653 		if (*error)
6654 			return;
6655 	} else {
6656 		/*
6657 		 * FIX: decide whether we allow assoc based bindx
6658 		 */
6659 	}
6660 }
6661 
6662 /*
6663  * sctp_bindx(DELETE) for one address.
6664  * assumes all arguments are valid/checked by caller.
6665  */
6666 void
6667 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6668     struct sockaddr *sa, sctp_assoc_t assoc_id,
6669     uint32_t vrf_id, int *error)
6670 {
6671 	struct sockaddr *addr_touse;
6672 #if defined(INET) && defined(INET6)
6673 	struct sockaddr_in sin;
6674 #endif
6675 
6676 	/* see if we're bound all already! */
6677 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6678 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6679 		*error = EINVAL;
6680 		return;
6681 	}
6682 	addr_touse = sa;
6683 #ifdef INET6
6684 	if (sa->sa_family == AF_INET6) {
6685 #ifdef INET
6686 		struct sockaddr_in6 *sin6;
6687 #endif
6688 
6689 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6690 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6691 			*error = EINVAL;
6692 			return;
6693 		}
6694 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6695 			/* can only bind v6 on PF_INET6 sockets */
6696 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6697 			*error = EINVAL;
6698 			return;
6699 		}
6700 #ifdef INET
6701 		sin6 = (struct sockaddr_in6 *)addr_touse;
6702 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6703 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6704 			    SCTP_IPV6_V6ONLY(inp)) {
6705 				/* can't bind mapped-v4 on PF_INET sockets */
6706 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6707 				*error = EINVAL;
6708 				return;
6709 			}
6710 			in6_sin6_2_sin(&sin, sin6);
6711 			addr_touse = (struct sockaddr *)&sin;
6712 		}
6713 #endif
6714 	}
6715 #endif
6716 #ifdef INET
6717 	if (sa->sa_family == AF_INET) {
6718 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6719 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6720 			*error = EINVAL;
6721 			return;
6722 		}
6723 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6724 		    SCTP_IPV6_V6ONLY(inp)) {
6725 			/* can't bind v4 on PF_INET sockets */
6726 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6727 			*error = EINVAL;
6728 			return;
6729 		}
6730 	}
6731 #endif
6732 	/*
6733 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6734 	 * below is ever changed we may need to lock before calling
6735 	 * association level binding.
6736 	 */
6737 	if (assoc_id == 0) {
6738 		/* delete the address */
6739 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6740 		    SCTP_DEL_IP_ADDRESS,
6741 		    vrf_id, NULL);
6742 	} else {
6743 		/*
6744 		 * FIX: decide whether we allow assoc based bindx
6745 		 */
6746 	}
6747 }
6748 
6749 /*
6750  * returns the valid local address count for an assoc, taking into account
6751  * all scoping rules
6752  */
6753 int
6754 sctp_local_addr_count(struct sctp_tcb *stcb)
6755 {
6756 	int loopback_scope;
6757 #if defined(INET)
6758 	int ipv4_local_scope, ipv4_addr_legal;
6759 #endif
6760 #if defined (INET6)
6761 	int local_scope, site_scope, ipv6_addr_legal;
6762 #endif
6763 	struct sctp_vrf *vrf;
6764 	struct sctp_ifn *sctp_ifn;
6765 	struct sctp_ifa *sctp_ifa;
6766 	int count = 0;
6767 
6768 	/* Turn on all the appropriate scopes */
6769 	loopback_scope = stcb->asoc.scope.loopback_scope;
6770 #if defined(INET)
6771 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6772 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6773 #endif
6774 #if defined(INET6)
6775 	local_scope = stcb->asoc.scope.local_scope;
6776 	site_scope = stcb->asoc.scope.site_scope;
6777 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6778 #endif
6779 	SCTP_IPI_ADDR_RLOCK();
6780 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6781 	if (vrf == NULL) {
6782 		/* no vrf, no addresses */
6783 		SCTP_IPI_ADDR_RUNLOCK();
6784 		return (0);
6785 	}
6786 
6787 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6788 		/*
6789 		 * bound all case: go through all ifns on the vrf
6790 		 */
6791 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6792 			if ((loopback_scope == 0) &&
6793 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6794 				continue;
6795 			}
6796 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6797 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6798 					continue;
6799 				switch (sctp_ifa->address.sa.sa_family) {
6800 #ifdef INET
6801 				case AF_INET:
6802 					if (ipv4_addr_legal) {
6803 						struct sockaddr_in *sin;
6804 
6805 						sin = &sctp_ifa->address.sin;
6806 						if (sin->sin_addr.s_addr == 0) {
6807 							/*
6808 							 * skip unspecified
6809 							 * addrs
6810 							 */
6811 							continue;
6812 						}
6813 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6814 						    &sin->sin_addr) != 0) {
6815 							continue;
6816 						}
6817 						if ((ipv4_local_scope == 0) &&
6818 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6819 							continue;
6820 						}
6821 						/* count this one */
6822 						count++;
6823 					} else {
6824 						continue;
6825 					}
6826 					break;
6827 #endif
6828 #ifdef INET6
6829 				case AF_INET6:
6830 					if (ipv6_addr_legal) {
6831 						struct sockaddr_in6 *sin6;
6832 
6833 						sin6 = &sctp_ifa->address.sin6;
6834 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6835 							continue;
6836 						}
6837 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6838 						    &sin6->sin6_addr) != 0) {
6839 							continue;
6840 						}
6841 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6842 							if (local_scope == 0)
6843 								continue;
6844 							if (sin6->sin6_scope_id == 0) {
6845 								if (sa6_recoverscope(sin6) != 0)
6846 									/*
6847 									 *
6848 									 * bad
6849 									 * link
6850 									 *
6851 									 * local
6852 									 *
6853 									 * address
6854 									 */
6855 									continue;
6856 							}
6857 						}
6858 						if ((site_scope == 0) &&
6859 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6860 							continue;
6861 						}
6862 						/* count this one */
6863 						count++;
6864 					}
6865 					break;
6866 #endif
6867 				default:
6868 					/* TSNH */
6869 					break;
6870 				}
6871 			}
6872 		}
6873 	} else {
6874 		/*
6875 		 * subset bound case
6876 		 */
6877 		struct sctp_laddr *laddr;
6878 
6879 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6880 		    sctp_nxt_addr) {
6881 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6882 				continue;
6883 			}
6884 			/* count this one */
6885 			count++;
6886 		}
6887 	}
6888 	SCTP_IPI_ADDR_RUNLOCK();
6889 	return (count);
6890 }
6891 
6892 #if defined(SCTP_LOCAL_TRACE_BUF)
6893 
6894 void
6895 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6896 {
6897 	uint32_t saveindex, newindex;
6898 
6899 	do {
6900 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6901 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6902 			newindex = 1;
6903 		} else {
6904 			newindex = saveindex + 1;
6905 		}
6906 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6907 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6908 		saveindex = 0;
6909 	}
6910 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6911 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6912 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6913 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6914 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6915 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6916 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6917 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6918 }
6919 
6920 #endif
6921 static void
6922 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6923     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6924 {
6925 	struct ip *iph;
6926 #ifdef INET6
6927 	struct ip6_hdr *ip6;
6928 #endif
6929 	struct mbuf *sp, *last;
6930 	struct udphdr *uhdr;
6931 	uint16_t port;
6932 
6933 	if ((m->m_flags & M_PKTHDR) == 0) {
6934 		/* Can't handle one that is not a pkt hdr */
6935 		goto out;
6936 	}
6937 	/* Pull the src port */
6938 	iph = mtod(m, struct ip *);
6939 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6940 	port = uhdr->uh_sport;
6941 	/*
6942 	 * Split out the mbuf chain. Leave the IP header in m, place the
6943 	 * rest in the sp.
6944 	 */
6945 	sp = m_split(m, off, M_NOWAIT);
6946 	if (sp == NULL) {
6947 		/* Gak, drop packet, we can't do a split */
6948 		goto out;
6949 	}
6950 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6951 		/* Gak, packet can't have an SCTP header in it - too small */
6952 		m_freem(sp);
6953 		goto out;
6954 	}
6955 	/* Now pull up the UDP header and SCTP header together */
6956 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6957 	if (sp == NULL) {
6958 		/* Gak pullup failed */
6959 		goto out;
6960 	}
6961 	/* Trim out the UDP header */
6962 	m_adj(sp, sizeof(struct udphdr));
6963 
6964 	/* Now reconstruct the mbuf chain */
6965 	for (last = m; last->m_next; last = last->m_next);
6966 	last->m_next = sp;
6967 	m->m_pkthdr.len += sp->m_pkthdr.len;
6968 	/*
6969 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6970 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6971 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6972 	 * SCTP checksum. Therefore, clear the bit.
6973 	 */
6974 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6975 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6976 	    m->m_pkthdr.len,
6977 	    if_name(m->m_pkthdr.rcvif),
6978 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6979 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6980 	iph = mtod(m, struct ip *);
6981 	switch (iph->ip_v) {
6982 #ifdef INET
6983 	case IPVERSION:
6984 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6985 		sctp_input_with_port(m, off, port);
6986 		break;
6987 #endif
6988 #ifdef INET6
6989 	case IPV6_VERSION >> 4:
6990 		ip6 = mtod(m, struct ip6_hdr *);
6991 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6992 		sctp6_input_with_port(&m, &off, port);
6993 		break;
6994 #endif
6995 	default:
6996 		goto out;
6997 		break;
6998 	}
6999 	return;
7000 out:
7001 	m_freem(m);
7002 }
7003 
7004 #ifdef INET
7005 static void
7006 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
7007 {
7008 	struct ip *outer_ip, *inner_ip;
7009 	struct sctphdr *sh;
7010 	struct icmp *icmp;
7011 	struct udphdr *udp;
7012 	struct sctp_inpcb *inp;
7013 	struct sctp_tcb *stcb;
7014 	struct sctp_nets *net;
7015 	struct sctp_init_chunk *ch;
7016 	struct sockaddr_in src, dst;
7017 	uint8_t type, code;
7018 
7019 	inner_ip = (struct ip *)vip;
7020 	icmp = (struct icmp *)((caddr_t)inner_ip -
7021 	    (sizeof(struct icmp) - sizeof(struct ip)));
7022 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
7023 	if (ntohs(outer_ip->ip_len) <
7024 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
7025 		return;
7026 	}
7027 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
7028 	sh = (struct sctphdr *)(udp + 1);
7029 	memset(&src, 0, sizeof(struct sockaddr_in));
7030 	src.sin_family = AF_INET;
7031 	src.sin_len = sizeof(struct sockaddr_in);
7032 	src.sin_port = sh->src_port;
7033 	src.sin_addr = inner_ip->ip_src;
7034 	memset(&dst, 0, sizeof(struct sockaddr_in));
7035 	dst.sin_family = AF_INET;
7036 	dst.sin_len = sizeof(struct sockaddr_in);
7037 	dst.sin_port = sh->dest_port;
7038 	dst.sin_addr = inner_ip->ip_dst;
7039 	/*
7040 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
7041 	 * holds our local endpoint address. Thus we reverse the dst and the
7042 	 * src in the lookup.
7043 	 */
7044 	inp = NULL;
7045 	net = NULL;
7046 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7047 	    (struct sockaddr *)&src,
7048 	    &inp, &net, 1,
7049 	    SCTP_DEFAULT_VRFID);
7050 	if ((stcb != NULL) &&
7051 	    (net != NULL) &&
7052 	    (inp != NULL)) {
7053 		/* Check the UDP port numbers */
7054 		if ((udp->uh_dport != net->port) ||
7055 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7056 			SCTP_TCB_UNLOCK(stcb);
7057 			return;
7058 		}
7059 		/* Check the verification tag */
7060 		if (ntohl(sh->v_tag) != 0) {
7061 			/*
7062 			 * This must be the verification tag used for
7063 			 * sending out packets. We don't consider packets
7064 			 * reflecting the verification tag.
7065 			 */
7066 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
7067 				SCTP_TCB_UNLOCK(stcb);
7068 				return;
7069 			}
7070 		} else {
7071 			if (ntohs(outer_ip->ip_len) >=
7072 			    sizeof(struct ip) +
7073 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
7074 				/*
7075 				 * In this case we can check if we got an
7076 				 * INIT chunk and if the initiate tag
7077 				 * matches.
7078 				 */
7079 				ch = (struct sctp_init_chunk *)(sh + 1);
7080 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
7081 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
7082 					SCTP_TCB_UNLOCK(stcb);
7083 					return;
7084 				}
7085 			} else {
7086 				SCTP_TCB_UNLOCK(stcb);
7087 				return;
7088 			}
7089 		}
7090 		type = icmp->icmp_type;
7091 		code = icmp->icmp_code;
7092 		if ((type == ICMP_UNREACH) &&
7093 		    (code == ICMP_UNREACH_PORT)) {
7094 			code = ICMP_UNREACH_PROTOCOL;
7095 		}
7096 		sctp_notify(inp, stcb, net, type, code,
7097 		    ntohs(inner_ip->ip_len),
7098 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
7099 	} else {
7100 		if ((stcb == NULL) && (inp != NULL)) {
7101 			/* reduce ref-count */
7102 			SCTP_INP_WLOCK(inp);
7103 			SCTP_INP_DECR_REF(inp);
7104 			SCTP_INP_WUNLOCK(inp);
7105 		}
7106 		if (stcb) {
7107 			SCTP_TCB_UNLOCK(stcb);
7108 		}
7109 	}
7110 	return;
7111 }
7112 #endif
7113 
7114 #ifdef INET6
7115 static void
7116 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7117 {
7118 	struct ip6ctlparam *ip6cp;
7119 	struct sctp_inpcb *inp;
7120 	struct sctp_tcb *stcb;
7121 	struct sctp_nets *net;
7122 	struct sctphdr sh;
7123 	struct udphdr udp;
7124 	struct sockaddr_in6 src, dst;
7125 	uint8_t type, code;
7126 
7127 	ip6cp = (struct ip6ctlparam *)d;
7128 	/*
7129 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7130 	 */
7131 	if (ip6cp->ip6c_m == NULL) {
7132 		return;
7133 	}
7134 	/*
7135 	 * Check if we can safely examine the ports and the verification tag
7136 	 * of the SCTP common header.
7137 	 */
7138 	if (ip6cp->ip6c_m->m_pkthdr.len <
7139 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7140 		return;
7141 	}
7142 	/* Copy out the UDP header. */
7143 	memset(&udp, 0, sizeof(struct udphdr));
7144 	m_copydata(ip6cp->ip6c_m,
7145 	    ip6cp->ip6c_off,
7146 	    sizeof(struct udphdr),
7147 	    (caddr_t)&udp);
7148 	/* Copy out the port numbers and the verification tag. */
7149 	memset(&sh, 0, sizeof(struct sctphdr));
7150 	m_copydata(ip6cp->ip6c_m,
7151 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7152 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7153 	    (caddr_t)&sh);
7154 	memset(&src, 0, sizeof(struct sockaddr_in6));
7155 	src.sin6_family = AF_INET6;
7156 	src.sin6_len = sizeof(struct sockaddr_in6);
7157 	src.sin6_port = sh.src_port;
7158 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7159 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7160 		return;
7161 	}
7162 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7163 	dst.sin6_family = AF_INET6;
7164 	dst.sin6_len = sizeof(struct sockaddr_in6);
7165 	dst.sin6_port = sh.dest_port;
7166 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7167 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7168 		return;
7169 	}
7170 	inp = NULL;
7171 	net = NULL;
7172 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7173 	    (struct sockaddr *)&src,
7174 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7175 	if ((stcb != NULL) &&
7176 	    (net != NULL) &&
7177 	    (inp != NULL)) {
7178 		/* Check the UDP port numbers */
7179 		if ((udp.uh_dport != net->port) ||
7180 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7181 			SCTP_TCB_UNLOCK(stcb);
7182 			return;
7183 		}
7184 		/* Check the verification tag */
7185 		if (ntohl(sh.v_tag) != 0) {
7186 			/*
7187 			 * This must be the verification tag used for
7188 			 * sending out packets. We don't consider packets
7189 			 * reflecting the verification tag.
7190 			 */
7191 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7192 				SCTP_TCB_UNLOCK(stcb);
7193 				return;
7194 			}
7195 		} else {
7196 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7197 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7198 			    sizeof(struct sctphdr) +
7199 			    sizeof(struct sctp_chunkhdr) +
7200 			    offsetof(struct sctp_init, a_rwnd)) {
7201 				/*
7202 				 * In this case we can check if we got an
7203 				 * INIT chunk and if the initiate tag
7204 				 * matches.
7205 				 */
7206 				uint32_t initiate_tag;
7207 				uint8_t chunk_type;
7208 
7209 				m_copydata(ip6cp->ip6c_m,
7210 				    ip6cp->ip6c_off +
7211 				    sizeof(struct udphdr) +
7212 				    sizeof(struct sctphdr),
7213 				    sizeof(uint8_t),
7214 				    (caddr_t)&chunk_type);
7215 				m_copydata(ip6cp->ip6c_m,
7216 				    ip6cp->ip6c_off +
7217 				    sizeof(struct udphdr) +
7218 				    sizeof(struct sctphdr) +
7219 				    sizeof(struct sctp_chunkhdr),
7220 				    sizeof(uint32_t),
7221 				    (caddr_t)&initiate_tag);
7222 				if ((chunk_type != SCTP_INITIATION) ||
7223 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7224 					SCTP_TCB_UNLOCK(stcb);
7225 					return;
7226 				}
7227 			} else {
7228 				SCTP_TCB_UNLOCK(stcb);
7229 				return;
7230 			}
7231 		}
7232 		type = ip6cp->ip6c_icmp6->icmp6_type;
7233 		code = ip6cp->ip6c_icmp6->icmp6_code;
7234 		if ((type == ICMP6_DST_UNREACH) &&
7235 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7236 			type = ICMP6_PARAM_PROB;
7237 			code = ICMP6_PARAMPROB_NEXTHEADER;
7238 		}
7239 		sctp6_notify(inp, stcb, net, type, code,
7240 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7241 	} else {
7242 		if ((stcb == NULL) && (inp != NULL)) {
7243 			/* reduce inp's ref-count */
7244 			SCTP_INP_WLOCK(inp);
7245 			SCTP_INP_DECR_REF(inp);
7246 			SCTP_INP_WUNLOCK(inp);
7247 		}
7248 		if (stcb) {
7249 			SCTP_TCB_UNLOCK(stcb);
7250 		}
7251 	}
7252 }
7253 #endif
7254 
7255 void
7256 sctp_over_udp_stop(void)
7257 {
7258 	/*
7259 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7260 	 * for writting!
7261 	 */
7262 #ifdef INET
7263 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7264 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7265 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7266 	}
7267 #endif
7268 #ifdef INET6
7269 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7270 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7271 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7272 	}
7273 #endif
7274 }
7275 
7276 int
7277 sctp_over_udp_start(void)
7278 {
7279 	uint16_t port;
7280 	int ret;
7281 #ifdef INET
7282 	struct sockaddr_in sin;
7283 #endif
7284 #ifdef INET6
7285 	struct sockaddr_in6 sin6;
7286 #endif
7287 	/*
7288 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7289 	 * for writting!
7290 	 */
7291 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7292 	if (ntohs(port) == 0) {
7293 		/* Must have a port set */
7294 		return (EINVAL);
7295 	}
7296 #ifdef INET
7297 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7298 		/* Already running -- must stop first */
7299 		return (EALREADY);
7300 	}
7301 #endif
7302 #ifdef INET6
7303 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7304 		/* Already running -- must stop first */
7305 		return (EALREADY);
7306 	}
7307 #endif
7308 #ifdef INET
7309 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7310 	    SOCK_DGRAM, IPPROTO_UDP,
7311 	    curthread->td_ucred, curthread))) {
7312 		sctp_over_udp_stop();
7313 		return (ret);
7314 	}
7315 	/* Call the special UDP hook. */
7316 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7317 	    sctp_recv_udp_tunneled_packet,
7318 	    sctp_recv_icmp_tunneled_packet,
7319 	    NULL))) {
7320 		sctp_over_udp_stop();
7321 		return (ret);
7322 	}
7323 	/* Ok, we have a socket, bind it to the port. */
7324 	memset(&sin, 0, sizeof(struct sockaddr_in));
7325 	sin.sin_len = sizeof(struct sockaddr_in);
7326 	sin.sin_family = AF_INET;
7327 	sin.sin_port = htons(port);
7328 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7329 	    (struct sockaddr *)&sin, curthread))) {
7330 		sctp_over_udp_stop();
7331 		return (ret);
7332 	}
7333 #endif
7334 #ifdef INET6
7335 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7336 	    SOCK_DGRAM, IPPROTO_UDP,
7337 	    curthread->td_ucred, curthread))) {
7338 		sctp_over_udp_stop();
7339 		return (ret);
7340 	}
7341 	/* Call the special UDP hook. */
7342 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7343 	    sctp_recv_udp_tunneled_packet,
7344 	    sctp_recv_icmp6_tunneled_packet,
7345 	    NULL))) {
7346 		sctp_over_udp_stop();
7347 		return (ret);
7348 	}
7349 	/* Ok, we have a socket, bind it to the port. */
7350 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7351 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7352 	sin6.sin6_family = AF_INET6;
7353 	sin6.sin6_port = htons(port);
7354 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7355 	    (struct sockaddr *)&sin6, curthread))) {
7356 		sctp_over_udp_stop();
7357 		return (ret);
7358 	}
7359 #endif
7360 	return (0);
7361 }
7362 
7363 /*
7364  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7365  * If all arguments are zero, zero is returned.
7366  */
7367 uint32_t
7368 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7369 {
7370 	if (mtu1 > 0) {
7371 		if (mtu2 > 0) {
7372 			if (mtu3 > 0) {
7373 				return (min(mtu1, min(mtu2, mtu3)));
7374 			} else {
7375 				return (min(mtu1, mtu2));
7376 			}
7377 		} else {
7378 			if (mtu3 > 0) {
7379 				return (min(mtu1, mtu3));
7380 			} else {
7381 				return (mtu1);
7382 			}
7383 		}
7384 	} else {
7385 		if (mtu2 > 0) {
7386 			if (mtu3 > 0) {
7387 				return (min(mtu2, mtu3));
7388 			} else {
7389 				return (mtu2);
7390 			}
7391 		} else {
7392 			return (mtu3);
7393 		}
7394 	}
7395 }
7396 
7397 void
7398 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7399 {
7400 	struct in_conninfo inc;
7401 
7402 	memset(&inc, 0, sizeof(struct in_conninfo));
7403 	inc.inc_fibnum = fibnum;
7404 	switch (addr->sa.sa_family) {
7405 #ifdef INET
7406 	case AF_INET:
7407 		inc.inc_faddr = addr->sin.sin_addr;
7408 		break;
7409 #endif
7410 #ifdef INET6
7411 	case AF_INET6:
7412 		inc.inc_flags |= INC_ISIPV6;
7413 		inc.inc6_faddr = addr->sin6.sin6_addr;
7414 		break;
7415 #endif
7416 	default:
7417 		return;
7418 	}
7419 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7420 }
7421 
7422 uint32_t
7423 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7424 {
7425 	struct in_conninfo inc;
7426 
7427 	memset(&inc, 0, sizeof(struct in_conninfo));
7428 	inc.inc_fibnum = fibnum;
7429 	switch (addr->sa.sa_family) {
7430 #ifdef INET
7431 	case AF_INET:
7432 		inc.inc_faddr = addr->sin.sin_addr;
7433 		break;
7434 #endif
7435 #ifdef INET6
7436 	case AF_INET6:
7437 		inc.inc_flags |= INC_ISIPV6;
7438 		inc.inc6_faddr = addr->sin6.sin6_addr;
7439 		break;
7440 #endif
7441 	default:
7442 		return (0);
7443 	}
7444 	return ((uint32_t)tcp_hc_getmtu(&inc));
7445 }
7446 
7447 void
7448 sctp_set_state(struct sctp_tcb *stcb, int new_state)
7449 {
7450 #if defined(KDTRACE_HOOKS)
7451 	int old_state = stcb->asoc.state;
7452 #endif
7453 
7454 	KASSERT((new_state & ~SCTP_STATE_MASK) == 0,
7455 	    ("sctp_set_state: Can't set substate (new_state = %x)",
7456 	    new_state));
7457 	stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state;
7458 	if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) ||
7459 	    (new_state == SCTP_STATE_SHUTDOWN_SENT) ||
7460 	    (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7461 		SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
7462 	}
7463 #if defined(KDTRACE_HOOKS)
7464 	if (((old_state & SCTP_STATE_MASK) != new_state) &&
7465 	    !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) &&
7466 	    (new_state == SCTP_STATE_INUSE))) {
7467 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7468 	}
7469 #endif
7470 }
7471 
7472 void
7473 sctp_add_substate(struct sctp_tcb *stcb, int substate)
7474 {
7475 #if defined(KDTRACE_HOOKS)
7476 	int old_state = stcb->asoc.state;
7477 #endif
7478 
7479 	KASSERT((substate & SCTP_STATE_MASK) == 0,
7480 	    ("sctp_add_substate: Can't set state (substate = %x)",
7481 	    substate));
7482 	stcb->asoc.state |= substate;
7483 #if defined(KDTRACE_HOOKS)
7484 	if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) &&
7485 	    ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) ||
7486 	    ((substate & SCTP_STATE_SHUTDOWN_PENDING) &&
7487 	    ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) {
7488 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7489 	}
7490 #endif
7491 }
7492