xref: /freebsd/sys/netinet/sctputil.c (revision 05ab65497e06edd12683163480841aa9630b9d4c)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #include <netinet6/sctp6_var.h>
45 #endif
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_output.h>
48 #include <netinet/sctp_uio.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_auth.h>
52 #include <netinet/sctp_asconf.h>
53 #include <netinet/sctp_bsd_addr.h>
54 #include <netinet/sctp_kdtrace.h>
55 #if defined(INET6) || defined(INET)
56 #include <netinet/tcp_var.h>
57 #endif
58 #include <netinet/udp.h>
59 #include <netinet/udp_var.h>
60 #include <sys/proc.h>
61 #ifdef INET6
62 #include <netinet/icmp6.h>
63 #endif
64 
65 
66 #ifndef KTR_SCTP
67 #define KTR_SCTP KTR_SUBSYS
68 #endif
69 
70 extern const struct sctp_cc_functions sctp_cc_functions[];
71 extern const struct sctp_ss_functions sctp_ss_functions[];
72 
73 void
74 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
75 {
76 #if defined(SCTP_LOCAL_TRACE_BUF)
77 	struct sctp_cwnd_log sctp_clog;
78 
79 	sctp_clog.x.sb.stcb = stcb;
80 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
81 	if (stcb)
82 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
83 	else
84 		sctp_clog.x.sb.stcb_sbcc = 0;
85 	sctp_clog.x.sb.incr = incr;
86 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
87 	    SCTP_LOG_EVENT_SB,
88 	    from,
89 	    sctp_clog.x.misc.log1,
90 	    sctp_clog.x.misc.log2,
91 	    sctp_clog.x.misc.log3,
92 	    sctp_clog.x.misc.log4);
93 #endif
94 }
95 
96 void
97 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
98 {
99 #if defined(SCTP_LOCAL_TRACE_BUF)
100 	struct sctp_cwnd_log sctp_clog;
101 
102 	sctp_clog.x.close.inp = (void *)inp;
103 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
104 	if (stcb) {
105 		sctp_clog.x.close.stcb = (void *)stcb;
106 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
107 	} else {
108 		sctp_clog.x.close.stcb = 0;
109 		sctp_clog.x.close.state = 0;
110 	}
111 	sctp_clog.x.close.loc = loc;
112 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
113 	    SCTP_LOG_EVENT_CLOSE,
114 	    0,
115 	    sctp_clog.x.misc.log1,
116 	    sctp_clog.x.misc.log2,
117 	    sctp_clog.x.misc.log3,
118 	    sctp_clog.x.misc.log4);
119 #endif
120 }
121 
122 void
123 rto_logging(struct sctp_nets *net, int from)
124 {
125 #if defined(SCTP_LOCAL_TRACE_BUF)
126 	struct sctp_cwnd_log sctp_clog;
127 
128 	memset(&sctp_clog, 0, sizeof(sctp_clog));
129 	sctp_clog.x.rto.net = (void *)net;
130 	sctp_clog.x.rto.rtt = net->rtt / 1000;
131 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
132 	    SCTP_LOG_EVENT_RTT,
133 	    from,
134 	    sctp_clog.x.misc.log1,
135 	    sctp_clog.x.misc.log2,
136 	    sctp_clog.x.misc.log3,
137 	    sctp_clog.x.misc.log4);
138 #endif
139 }
140 
141 void
142 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
143 {
144 #if defined(SCTP_LOCAL_TRACE_BUF)
145 	struct sctp_cwnd_log sctp_clog;
146 
147 	sctp_clog.x.strlog.stcb = stcb;
148 	sctp_clog.x.strlog.n_tsn = tsn;
149 	sctp_clog.x.strlog.n_sseq = sseq;
150 	sctp_clog.x.strlog.e_tsn = 0;
151 	sctp_clog.x.strlog.e_sseq = 0;
152 	sctp_clog.x.strlog.strm = stream;
153 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
154 	    SCTP_LOG_EVENT_STRM,
155 	    from,
156 	    sctp_clog.x.misc.log1,
157 	    sctp_clog.x.misc.log2,
158 	    sctp_clog.x.misc.log3,
159 	    sctp_clog.x.misc.log4);
160 #endif
161 }
162 
163 void
164 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
165 {
166 #if defined(SCTP_LOCAL_TRACE_BUF)
167 	struct sctp_cwnd_log sctp_clog;
168 
169 	sctp_clog.x.nagle.stcb = (void *)stcb;
170 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
171 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
172 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
173 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
174 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
175 	    SCTP_LOG_EVENT_NAGLE,
176 	    action,
177 	    sctp_clog.x.misc.log1,
178 	    sctp_clog.x.misc.log2,
179 	    sctp_clog.x.misc.log3,
180 	    sctp_clog.x.misc.log4);
181 #endif
182 }
183 
184 void
185 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
186 {
187 #if defined(SCTP_LOCAL_TRACE_BUF)
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	sctp_clog.x.sack.cumack = cumack;
191 	sctp_clog.x.sack.oldcumack = old_cumack;
192 	sctp_clog.x.sack.tsn = tsn;
193 	sctp_clog.x.sack.numGaps = gaps;
194 	sctp_clog.x.sack.numDups = dups;
195 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
196 	    SCTP_LOG_EVENT_SACK,
197 	    from,
198 	    sctp_clog.x.misc.log1,
199 	    sctp_clog.x.misc.log2,
200 	    sctp_clog.x.misc.log3,
201 	    sctp_clog.x.misc.log4);
202 #endif
203 }
204 
205 void
206 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
207 {
208 #if defined(SCTP_LOCAL_TRACE_BUF)
209 	struct sctp_cwnd_log sctp_clog;
210 
211 	memset(&sctp_clog, 0, sizeof(sctp_clog));
212 	sctp_clog.x.map.base = map;
213 	sctp_clog.x.map.cum = cum;
214 	sctp_clog.x.map.high = high;
215 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
216 	    SCTP_LOG_EVENT_MAP,
217 	    from,
218 	    sctp_clog.x.misc.log1,
219 	    sctp_clog.x.misc.log2,
220 	    sctp_clog.x.misc.log3,
221 	    sctp_clog.x.misc.log4);
222 #endif
223 }
224 
225 void
226 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
227 {
228 #if defined(SCTP_LOCAL_TRACE_BUF)
229 	struct sctp_cwnd_log sctp_clog;
230 
231 	memset(&sctp_clog, 0, sizeof(sctp_clog));
232 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
233 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
234 	sctp_clog.x.fr.tsn = tsn;
235 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
236 	    SCTP_LOG_EVENT_FR,
237 	    from,
238 	    sctp_clog.x.misc.log1,
239 	    sctp_clog.x.misc.log2,
240 	    sctp_clog.x.misc.log3,
241 	    sctp_clog.x.misc.log4);
242 #endif
243 }
244 
245 #ifdef SCTP_MBUF_LOGGING
246 void
247 sctp_log_mb(struct mbuf *m, int from)
248 {
249 #if defined(SCTP_LOCAL_TRACE_BUF)
250 	struct sctp_cwnd_log sctp_clog;
251 
252 	sctp_clog.x.mb.mp = m;
253 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
254 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
255 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
256 	if (SCTP_BUF_IS_EXTENDED(m)) {
257 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
258 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
259 	} else {
260 		sctp_clog.x.mb.ext = 0;
261 		sctp_clog.x.mb.refcnt = 0;
262 	}
263 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
264 	    SCTP_LOG_EVENT_MBUF,
265 	    from,
266 	    sctp_clog.x.misc.log1,
267 	    sctp_clog.x.misc.log2,
268 	    sctp_clog.x.misc.log3,
269 	    sctp_clog.x.misc.log4);
270 #endif
271 }
272 
273 void
274 sctp_log_mbc(struct mbuf *m, int from)
275 {
276 	struct mbuf *mat;
277 
278 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
279 		sctp_log_mb(mat, from);
280 	}
281 }
282 #endif
283 
284 void
285 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
286 {
287 #if defined(SCTP_LOCAL_TRACE_BUF)
288 	struct sctp_cwnd_log sctp_clog;
289 
290 	if (control == NULL) {
291 		SCTP_PRINTF("Gak log of NULL?\n");
292 		return;
293 	}
294 	sctp_clog.x.strlog.stcb = control->stcb;
295 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
296 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
297 	sctp_clog.x.strlog.strm = control->sinfo_stream;
298 	if (poschk != NULL) {
299 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
300 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
301 	} else {
302 		sctp_clog.x.strlog.e_tsn = 0;
303 		sctp_clog.x.strlog.e_sseq = 0;
304 	}
305 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
306 	    SCTP_LOG_EVENT_STRM,
307 	    from,
308 	    sctp_clog.x.misc.log1,
309 	    sctp_clog.x.misc.log2,
310 	    sctp_clog.x.misc.log3,
311 	    sctp_clog.x.misc.log4);
312 #endif
313 }
314 
315 void
316 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
317 {
318 #if defined(SCTP_LOCAL_TRACE_BUF)
319 	struct sctp_cwnd_log sctp_clog;
320 
321 	sctp_clog.x.cwnd.net = net;
322 	if (stcb->asoc.send_queue_cnt > 255)
323 		sctp_clog.x.cwnd.cnt_in_send = 255;
324 	else
325 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
326 	if (stcb->asoc.stream_queue_cnt > 255)
327 		sctp_clog.x.cwnd.cnt_in_str = 255;
328 	else
329 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
330 
331 	if (net) {
332 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
333 		sctp_clog.x.cwnd.inflight = net->flight_size;
334 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
335 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
336 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
337 	}
338 	if (SCTP_CWNDLOG_PRESEND == from) {
339 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
340 	}
341 	sctp_clog.x.cwnd.cwnd_augment = augment;
342 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
343 	    SCTP_LOG_EVENT_CWND,
344 	    from,
345 	    sctp_clog.x.misc.log1,
346 	    sctp_clog.x.misc.log2,
347 	    sctp_clog.x.misc.log3,
348 	    sctp_clog.x.misc.log4);
349 #endif
350 }
351 
352 void
353 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
354 {
355 #if defined(SCTP_LOCAL_TRACE_BUF)
356 	struct sctp_cwnd_log sctp_clog;
357 
358 	memset(&sctp_clog, 0, sizeof(sctp_clog));
359 	if (inp) {
360 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
361 
362 	} else {
363 		sctp_clog.x.lock.sock = (void *)NULL;
364 	}
365 	sctp_clog.x.lock.inp = (void *)inp;
366 	if (stcb) {
367 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
368 	} else {
369 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
370 	}
371 	if (inp) {
372 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
373 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
374 	} else {
375 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
376 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
377 	}
378 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
379 	if (inp && (inp->sctp_socket)) {
380 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
381 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
382 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
383 	} else {
384 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
385 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
386 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
387 	}
388 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
389 	    SCTP_LOG_LOCK_EVENT,
390 	    from,
391 	    sctp_clog.x.misc.log1,
392 	    sctp_clog.x.misc.log2,
393 	    sctp_clog.x.misc.log3,
394 	    sctp_clog.x.misc.log4);
395 #endif
396 }
397 
398 void
399 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
400 {
401 #if defined(SCTP_LOCAL_TRACE_BUF)
402 	struct sctp_cwnd_log sctp_clog;
403 
404 	memset(&sctp_clog, 0, sizeof(sctp_clog));
405 	sctp_clog.x.cwnd.net = net;
406 	sctp_clog.x.cwnd.cwnd_new_value = error;
407 	sctp_clog.x.cwnd.inflight = net->flight_size;
408 	sctp_clog.x.cwnd.cwnd_augment = burst;
409 	if (stcb->asoc.send_queue_cnt > 255)
410 		sctp_clog.x.cwnd.cnt_in_send = 255;
411 	else
412 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
413 	if (stcb->asoc.stream_queue_cnt > 255)
414 		sctp_clog.x.cwnd.cnt_in_str = 255;
415 	else
416 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
417 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
418 	    SCTP_LOG_EVENT_MAXBURST,
419 	    from,
420 	    sctp_clog.x.misc.log1,
421 	    sctp_clog.x.misc.log2,
422 	    sctp_clog.x.misc.log3,
423 	    sctp_clog.x.misc.log4);
424 #endif
425 }
426 
427 void
428 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
429 {
430 #if defined(SCTP_LOCAL_TRACE_BUF)
431 	struct sctp_cwnd_log sctp_clog;
432 
433 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
434 	sctp_clog.x.rwnd.send_size = snd_size;
435 	sctp_clog.x.rwnd.overhead = overhead;
436 	sctp_clog.x.rwnd.new_rwnd = 0;
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_EVENT_RWND,
439 	    from,
440 	    sctp_clog.x.misc.log1,
441 	    sctp_clog.x.misc.log2,
442 	    sctp_clog.x.misc.log3,
443 	    sctp_clog.x.misc.log4);
444 #endif
445 }
446 
447 void
448 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
449 {
450 #if defined(SCTP_LOCAL_TRACE_BUF)
451 	struct sctp_cwnd_log sctp_clog;
452 
453 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
454 	sctp_clog.x.rwnd.send_size = flight_size;
455 	sctp_clog.x.rwnd.overhead = overhead;
456 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
457 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
458 	    SCTP_LOG_EVENT_RWND,
459 	    from,
460 	    sctp_clog.x.misc.log1,
461 	    sctp_clog.x.misc.log2,
462 	    sctp_clog.x.misc.log3,
463 	    sctp_clog.x.misc.log4);
464 #endif
465 }
466 
467 #ifdef SCTP_MBCNT_LOGGING
468 static void
469 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
470 {
471 #if defined(SCTP_LOCAL_TRACE_BUF)
472 	struct sctp_cwnd_log sctp_clog;
473 
474 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
475 	sctp_clog.x.mbcnt.size_change = book;
476 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
477 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
478 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
479 	    SCTP_LOG_EVENT_MBCNT,
480 	    from,
481 	    sctp_clog.x.misc.log1,
482 	    sctp_clog.x.misc.log2,
483 	    sctp_clog.x.misc.log3,
484 	    sctp_clog.x.misc.log4);
485 #endif
486 }
487 #endif
488 
489 void
490 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
491 {
492 #if defined(SCTP_LOCAL_TRACE_BUF)
493 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
494 	    SCTP_LOG_MISC_EVENT,
495 	    from,
496 	    a, b, c, d);
497 #endif
498 }
499 
500 void
501 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
502 {
503 #if defined(SCTP_LOCAL_TRACE_BUF)
504 	struct sctp_cwnd_log sctp_clog;
505 
506 	sctp_clog.x.wake.stcb = (void *)stcb;
507 	sctp_clog.x.wake.wake_cnt = wake_cnt;
508 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
509 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
510 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
511 
512 	if (stcb->asoc.stream_queue_cnt < 0xff)
513 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
514 	else
515 		sctp_clog.x.wake.stream_qcnt = 0xff;
516 
517 	if (stcb->asoc.chunks_on_out_queue < 0xff)
518 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
519 	else
520 		sctp_clog.x.wake.chunks_on_oque = 0xff;
521 
522 	sctp_clog.x.wake.sctpflags = 0;
523 	/* set in the defered mode stuff */
524 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
525 		sctp_clog.x.wake.sctpflags |= 1;
526 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
527 		sctp_clog.x.wake.sctpflags |= 2;
528 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
529 		sctp_clog.x.wake.sctpflags |= 4;
530 	/* what about the sb */
531 	if (stcb->sctp_socket) {
532 		struct socket *so = stcb->sctp_socket;
533 
534 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
535 	} else {
536 		sctp_clog.x.wake.sbflags = 0xff;
537 	}
538 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
539 	    SCTP_LOG_EVENT_WAKE,
540 	    from,
541 	    sctp_clog.x.misc.log1,
542 	    sctp_clog.x.misc.log2,
543 	    sctp_clog.x.misc.log3,
544 	    sctp_clog.x.misc.log4);
545 #endif
546 }
547 
548 void
549 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen)
550 {
551 #if defined(SCTP_LOCAL_TRACE_BUF)
552 	struct sctp_cwnd_log sctp_clog;
553 
554 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
555 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
556 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
557 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
558 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
559 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
560 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
561 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
562 	    SCTP_LOG_EVENT_BLOCK,
563 	    from,
564 	    sctp_clog.x.misc.log1,
565 	    sctp_clog.x.misc.log2,
566 	    sctp_clog.x.misc.log3,
567 	    sctp_clog.x.misc.log4);
568 #endif
569 }
570 
571 int
572 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
573 {
574 	/* May need to fix this if ktrdump does not work */
575 	return (0);
576 }
577 
578 #ifdef SCTP_AUDITING_ENABLED
579 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
580 static int sctp_audit_indx = 0;
581 
582 static
583 void
584 sctp_print_audit_report(void)
585 {
586 	int i;
587 	int cnt;
588 
589 	cnt = 0;
590 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
591 		if ((sctp_audit_data[i][0] == 0xe0) &&
592 		    (sctp_audit_data[i][1] == 0x01)) {
593 			cnt = 0;
594 			SCTP_PRINTF("\n");
595 		} else if (sctp_audit_data[i][0] == 0xf0) {
596 			cnt = 0;
597 			SCTP_PRINTF("\n");
598 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
599 		    (sctp_audit_data[i][1] == 0x01)) {
600 			SCTP_PRINTF("\n");
601 			cnt = 0;
602 		}
603 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
604 		    (uint32_t)sctp_audit_data[i][1]);
605 		cnt++;
606 		if ((cnt % 14) == 0)
607 			SCTP_PRINTF("\n");
608 	}
609 	for (i = 0; i < sctp_audit_indx; i++) {
610 		if ((sctp_audit_data[i][0] == 0xe0) &&
611 		    (sctp_audit_data[i][1] == 0x01)) {
612 			cnt = 0;
613 			SCTP_PRINTF("\n");
614 		} else if (sctp_audit_data[i][0] == 0xf0) {
615 			cnt = 0;
616 			SCTP_PRINTF("\n");
617 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
618 		    (sctp_audit_data[i][1] == 0x01)) {
619 			SCTP_PRINTF("\n");
620 			cnt = 0;
621 		}
622 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
623 		    (uint32_t)sctp_audit_data[i][1]);
624 		cnt++;
625 		if ((cnt % 14) == 0)
626 			SCTP_PRINTF("\n");
627 	}
628 	SCTP_PRINTF("\n");
629 }
630 
631 void
632 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
633     struct sctp_nets *net)
634 {
635 	int resend_cnt, tot_out, rep, tot_book_cnt;
636 	struct sctp_nets *lnet;
637 	struct sctp_tmit_chunk *chk;
638 
639 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
640 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
641 	sctp_audit_indx++;
642 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
643 		sctp_audit_indx = 0;
644 	}
645 	if (inp == NULL) {
646 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
647 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
648 		sctp_audit_indx++;
649 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
650 			sctp_audit_indx = 0;
651 		}
652 		return;
653 	}
654 	if (stcb == NULL) {
655 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
656 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
657 		sctp_audit_indx++;
658 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
659 			sctp_audit_indx = 0;
660 		}
661 		return;
662 	}
663 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
664 	sctp_audit_data[sctp_audit_indx][1] =
665 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
666 	sctp_audit_indx++;
667 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
668 		sctp_audit_indx = 0;
669 	}
670 	rep = 0;
671 	tot_book_cnt = 0;
672 	resend_cnt = tot_out = 0;
673 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
674 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
675 			resend_cnt++;
676 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
677 			tot_out += chk->book_size;
678 			tot_book_cnt++;
679 		}
680 	}
681 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
682 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
683 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
684 		sctp_audit_indx++;
685 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
686 			sctp_audit_indx = 0;
687 		}
688 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
689 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
690 		rep = 1;
691 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
692 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
693 		sctp_audit_data[sctp_audit_indx][1] =
694 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
695 		sctp_audit_indx++;
696 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
697 			sctp_audit_indx = 0;
698 		}
699 	}
700 	if (tot_out != stcb->asoc.total_flight) {
701 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
702 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
703 		sctp_audit_indx++;
704 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
705 			sctp_audit_indx = 0;
706 		}
707 		rep = 1;
708 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
709 		    (int)stcb->asoc.total_flight);
710 		stcb->asoc.total_flight = tot_out;
711 	}
712 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
713 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
714 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
715 		sctp_audit_indx++;
716 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
717 			sctp_audit_indx = 0;
718 		}
719 		rep = 1;
720 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
721 
722 		stcb->asoc.total_flight_count = tot_book_cnt;
723 	}
724 	tot_out = 0;
725 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
726 		tot_out += lnet->flight_size;
727 	}
728 	if (tot_out != stcb->asoc.total_flight) {
729 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
730 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
731 		sctp_audit_indx++;
732 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
733 			sctp_audit_indx = 0;
734 		}
735 		rep = 1;
736 		SCTP_PRINTF("real flight:%d net total was %d\n",
737 		    stcb->asoc.total_flight, tot_out);
738 		/* now corrective action */
739 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
740 
741 			tot_out = 0;
742 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
743 				if ((chk->whoTo == lnet) &&
744 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
745 					tot_out += chk->book_size;
746 				}
747 			}
748 			if (lnet->flight_size != tot_out) {
749 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
750 				    (void *)lnet, lnet->flight_size,
751 				    tot_out);
752 				lnet->flight_size = tot_out;
753 			}
754 		}
755 	}
756 	if (rep) {
757 		sctp_print_audit_report();
758 	}
759 }
760 
761 void
762 sctp_audit_log(uint8_t ev, uint8_t fd)
763 {
764 
765 	sctp_audit_data[sctp_audit_indx][0] = ev;
766 	sctp_audit_data[sctp_audit_indx][1] = fd;
767 	sctp_audit_indx++;
768 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
769 		sctp_audit_indx = 0;
770 	}
771 }
772 
773 #endif
774 
775 /*
776  * sctp_stop_timers_for_shutdown() should be called
777  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
778  * state to make sure that all timers are stopped.
779  */
780 void
781 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
782 {
783 	struct sctp_inpcb *inp;
784 	struct sctp_nets *net;
785 
786 	inp = stcb->sctp_ep;
787 
788 	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
789 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_12);
790 	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
791 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_13);
792 	sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
793 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_14);
794 	sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
795 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_15);
796 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
797 		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
798 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_16);
799 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
800 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_17);
801 	}
802 }
803 
804 void
805 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer)
806 {
807 	struct sctp_inpcb *inp;
808 	struct sctp_nets *net;
809 
810 	inp = stcb->sctp_ep;
811 	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
812 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_18);
813 	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
814 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_19);
815 	if (stop_assoc_kill_timer) {
816 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
817 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_20);
818 	}
819 	sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
820 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_21);
821 	sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
822 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_22);
823 	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL,
824 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_23);
825 	/* Mobility adaptation */
826 	sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL,
827 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_24);
828 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
829 		sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
830 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_25);
831 		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net,
832 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_26);
833 		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net,
834 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_27);
835 		sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net,
836 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_28);
837 		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net,
838 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_29);
839 		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
840 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_30);
841 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
842 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_31);
843 	}
844 }
845 
846 /*
847  * A list of sizes based on typical mtu's, used only if next hop size not
848  * returned. These values MUST be multiples of 4 and MUST be ordered.
849  */
850 static uint32_t sctp_mtu_sizes[] = {
851 	68,
852 	296,
853 	508,
854 	512,
855 	544,
856 	576,
857 	1004,
858 	1492,
859 	1500,
860 	1536,
861 	2000,
862 	2048,
863 	4352,
864 	4464,
865 	8168,
866 	17912,
867 	32000,
868 	65532
869 };
870 
871 /*
872  * Return the largest MTU in sctp_mtu_sizes smaller than val.
873  * If val is smaller than the minimum, just return the largest
874  * multiple of 4 smaller or equal to val.
875  * Ensure that the result is a multiple of 4.
876  */
877 uint32_t
878 sctp_get_prev_mtu(uint32_t val)
879 {
880 	uint32_t i;
881 
882 	val &= 0xfffffffc;
883 	if (val <= sctp_mtu_sizes[0]) {
884 		return (val);
885 	}
886 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
887 		if (val <= sctp_mtu_sizes[i]) {
888 			break;
889 		}
890 	}
891 	KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0,
892 	    ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1));
893 	return (sctp_mtu_sizes[i - 1]);
894 }
895 
896 /*
897  * Return the smallest MTU in sctp_mtu_sizes larger than val.
898  * If val is larger than the maximum, just return the largest multiple of 4 smaller
899  * or equal to val.
900  * Ensure that the result is a multiple of 4.
901  */
902 uint32_t
903 sctp_get_next_mtu(uint32_t val)
904 {
905 	/* select another MTU that is just bigger than this one */
906 	uint32_t i;
907 
908 	val &= 0xfffffffc;
909 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
910 		if (val < sctp_mtu_sizes[i]) {
911 			KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0,
912 			    ("sctp_mtu_sizes[%u] not a multiple of 4", i));
913 			return (sctp_mtu_sizes[i]);
914 		}
915 	}
916 	return (val);
917 }
918 
919 void
920 sctp_fill_random_store(struct sctp_pcb *m)
921 {
922 	/*
923 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
924 	 * our counter. The result becomes our good random numbers and we
925 	 * then setup to give these out. Note that we do no locking to
926 	 * protect this. This is ok, since if competing folks call this we
927 	 * will get more gobbled gook in the random store which is what we
928 	 * want. There is a danger that two guys will use the same random
929 	 * numbers, but thats ok too since that is random as well :->
930 	 */
931 	m->store_at = 0;
932 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
933 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
934 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
935 	m->random_counter++;
936 }
937 
938 uint32_t
939 sctp_select_initial_TSN(struct sctp_pcb *inp)
940 {
941 	/*
942 	 * A true implementation should use random selection process to get
943 	 * the initial stream sequence number, using RFC1750 as a good
944 	 * guideline
945 	 */
946 	uint32_t x, *xp;
947 	uint8_t *p;
948 	int store_at, new_store;
949 
950 	if (inp->initial_sequence_debug != 0) {
951 		uint32_t ret;
952 
953 		ret = inp->initial_sequence_debug;
954 		inp->initial_sequence_debug++;
955 		return (ret);
956 	}
957 retry:
958 	store_at = inp->store_at;
959 	new_store = store_at + sizeof(uint32_t);
960 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
961 		new_store = 0;
962 	}
963 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
964 		goto retry;
965 	}
966 	if (new_store == 0) {
967 		/* Refill the random store */
968 		sctp_fill_random_store(inp);
969 	}
970 	p = &inp->random_store[store_at];
971 	xp = (uint32_t *)p;
972 	x = *xp;
973 	return (x);
974 }
975 
976 uint32_t
977 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
978 {
979 	uint32_t x;
980 	struct timeval now;
981 
982 	if (check) {
983 		(void)SCTP_GETTIME_TIMEVAL(&now);
984 	}
985 	for (;;) {
986 		x = sctp_select_initial_TSN(&inp->sctp_ep);
987 		if (x == 0) {
988 			/* we never use 0 */
989 			continue;
990 		}
991 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
992 			break;
993 		}
994 	}
995 	return (x);
996 }
997 
998 int32_t
999 sctp_map_assoc_state(int kernel_state)
1000 {
1001 	int32_t user_state;
1002 
1003 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
1004 		user_state = SCTP_CLOSED;
1005 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
1006 		user_state = SCTP_SHUTDOWN_PENDING;
1007 	} else {
1008 		switch (kernel_state & SCTP_STATE_MASK) {
1009 		case SCTP_STATE_EMPTY:
1010 			user_state = SCTP_CLOSED;
1011 			break;
1012 		case SCTP_STATE_INUSE:
1013 			user_state = SCTP_CLOSED;
1014 			break;
1015 		case SCTP_STATE_COOKIE_WAIT:
1016 			user_state = SCTP_COOKIE_WAIT;
1017 			break;
1018 		case SCTP_STATE_COOKIE_ECHOED:
1019 			user_state = SCTP_COOKIE_ECHOED;
1020 			break;
1021 		case SCTP_STATE_OPEN:
1022 			user_state = SCTP_ESTABLISHED;
1023 			break;
1024 		case SCTP_STATE_SHUTDOWN_SENT:
1025 			user_state = SCTP_SHUTDOWN_SENT;
1026 			break;
1027 		case SCTP_STATE_SHUTDOWN_RECEIVED:
1028 			user_state = SCTP_SHUTDOWN_RECEIVED;
1029 			break;
1030 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
1031 			user_state = SCTP_SHUTDOWN_ACK_SENT;
1032 			break;
1033 		default:
1034 			user_state = SCTP_CLOSED;
1035 			break;
1036 		}
1037 	}
1038 	return (user_state);
1039 }
1040 
1041 int
1042 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1043     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
1044 {
1045 	struct sctp_association *asoc;
1046 
1047 	/*
1048 	 * Anything set to zero is taken care of by the allocation routine's
1049 	 * bzero
1050 	 */
1051 
1052 	/*
1053 	 * Up front select what scoping to apply on addresses I tell my peer
1054 	 * Not sure what to do with these right now, we will need to come up
1055 	 * with a way to set them. We may need to pass them through from the
1056 	 * caller in the sctp_aloc_assoc() function.
1057 	 */
1058 	int i;
1059 #if defined(SCTP_DETAILED_STR_STATS)
1060 	int j;
1061 #endif
1062 
1063 	asoc = &stcb->asoc;
1064 	/* init all variables to a known value. */
1065 	SCTP_SET_STATE(stcb, SCTP_STATE_INUSE);
1066 	asoc->max_burst = inp->sctp_ep.max_burst;
1067 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
1068 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
1069 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
1070 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
1071 	asoc->ecn_supported = inp->ecn_supported;
1072 	asoc->prsctp_supported = inp->prsctp_supported;
1073 	asoc->idata_supported = inp->idata_supported;
1074 	asoc->auth_supported = inp->auth_supported;
1075 	asoc->asconf_supported = inp->asconf_supported;
1076 	asoc->reconfig_supported = inp->reconfig_supported;
1077 	asoc->nrsack_supported = inp->nrsack_supported;
1078 	asoc->pktdrop_supported = inp->pktdrop_supported;
1079 	asoc->idata_supported = inp->idata_supported;
1080 	asoc->sctp_cmt_pf = (uint8_t)0;
1081 	asoc->sctp_frag_point = inp->sctp_frag_point;
1082 	asoc->sctp_features = inp->sctp_features;
1083 	asoc->default_dscp = inp->sctp_ep.default_dscp;
1084 	asoc->max_cwnd = inp->max_cwnd;
1085 #ifdef INET6
1086 	if (inp->sctp_ep.default_flowlabel) {
1087 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
1088 	} else {
1089 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
1090 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
1091 			asoc->default_flowlabel &= 0x000fffff;
1092 			asoc->default_flowlabel |= 0x80000000;
1093 		} else {
1094 			asoc->default_flowlabel = 0;
1095 		}
1096 	}
1097 #endif
1098 	asoc->sb_send_resv = 0;
1099 	if (override_tag) {
1100 		asoc->my_vtag = override_tag;
1101 	} else {
1102 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1103 	}
1104 	/* Get the nonce tags */
1105 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1106 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1107 	asoc->vrf_id = vrf_id;
1108 
1109 #ifdef SCTP_ASOCLOG_OF_TSNS
1110 	asoc->tsn_in_at = 0;
1111 	asoc->tsn_out_at = 0;
1112 	asoc->tsn_in_wrapped = 0;
1113 	asoc->tsn_out_wrapped = 0;
1114 	asoc->cumack_log_at = 0;
1115 	asoc->cumack_log_atsnt = 0;
1116 #endif
1117 #ifdef SCTP_FS_SPEC_LOG
1118 	asoc->fs_index = 0;
1119 #endif
1120 	asoc->refcnt = 0;
1121 	asoc->assoc_up_sent = 0;
1122 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1123 	    sctp_select_initial_TSN(&inp->sctp_ep);
1124 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1125 	/* we are optimisitic here */
1126 	asoc->peer_supports_nat = 0;
1127 	asoc->sent_queue_retran_cnt = 0;
1128 
1129 	/* for CMT */
1130 	asoc->last_net_cmt_send_started = NULL;
1131 
1132 	/* This will need to be adjusted */
1133 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1134 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1135 	asoc->asconf_seq_in = asoc->last_acked_seq;
1136 
1137 	/* here we are different, we hold the next one we expect */
1138 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1139 
1140 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1141 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1142 
1143 	asoc->default_mtu = inp->sctp_ep.default_mtu;
1144 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1145 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1146 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1147 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1148 	asoc->free_chunk_cnt = 0;
1149 
1150 	asoc->iam_blocking = 0;
1151 	asoc->context = inp->sctp_context;
1152 	asoc->local_strreset_support = inp->local_strreset_support;
1153 	asoc->def_send = inp->def_send;
1154 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1155 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1156 	asoc->pr_sctp_cnt = 0;
1157 	asoc->total_output_queue_size = 0;
1158 
1159 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1160 		asoc->scope.ipv6_addr_legal = 1;
1161 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1162 			asoc->scope.ipv4_addr_legal = 1;
1163 		} else {
1164 			asoc->scope.ipv4_addr_legal = 0;
1165 		}
1166 	} else {
1167 		asoc->scope.ipv6_addr_legal = 0;
1168 		asoc->scope.ipv4_addr_legal = 1;
1169 	}
1170 
1171 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1172 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1173 
1174 	asoc->smallest_mtu = inp->sctp_frag_point;
1175 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1176 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1177 
1178 	asoc->stream_locked_on = 0;
1179 	asoc->ecn_echo_cnt_onq = 0;
1180 	asoc->stream_locked = 0;
1181 
1182 	asoc->send_sack = 1;
1183 
1184 	LIST_INIT(&asoc->sctp_restricted_addrs);
1185 
1186 	TAILQ_INIT(&asoc->nets);
1187 	TAILQ_INIT(&asoc->pending_reply_queue);
1188 	TAILQ_INIT(&asoc->asconf_ack_sent);
1189 	/* Setup to fill the hb random cache at first HB */
1190 	asoc->hb_random_idx = 4;
1191 
1192 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1193 
1194 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1195 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1196 
1197 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1198 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1199 
1200 	/*
1201 	 * Now the stream parameters, here we allocate space for all streams
1202 	 * that we request by default.
1203 	 */
1204 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1205 	    o_strms;
1206 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1207 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1208 	    SCTP_M_STRMO);
1209 	if (asoc->strmout == NULL) {
1210 		/* big trouble no memory */
1211 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1212 		return (ENOMEM);
1213 	}
1214 	for (i = 0; i < asoc->streamoutcnt; i++) {
1215 		/*
1216 		 * inbound side must be set to 0xffff, also NOTE when we get
1217 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1218 		 * count (streamoutcnt) but first check if we sent to any of
1219 		 * the upper streams that were dropped (if some were). Those
1220 		 * that were dropped must be notified to the upper layer as
1221 		 * failed to send.
1222 		 */
1223 		asoc->strmout[i].next_mid_ordered = 0;
1224 		asoc->strmout[i].next_mid_unordered = 0;
1225 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1226 		asoc->strmout[i].chunks_on_queues = 0;
1227 #if defined(SCTP_DETAILED_STR_STATS)
1228 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1229 			asoc->strmout[i].abandoned_sent[j] = 0;
1230 			asoc->strmout[i].abandoned_unsent[j] = 0;
1231 		}
1232 #else
1233 		asoc->strmout[i].abandoned_sent[0] = 0;
1234 		asoc->strmout[i].abandoned_unsent[0] = 0;
1235 #endif
1236 		asoc->strmout[i].sid = i;
1237 		asoc->strmout[i].last_msg_incomplete = 0;
1238 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1239 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1240 	}
1241 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1242 
1243 	/* Now the mapping array */
1244 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1245 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1246 	    SCTP_M_MAP);
1247 	if (asoc->mapping_array == NULL) {
1248 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1249 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1250 		return (ENOMEM);
1251 	}
1252 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1253 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1254 	    SCTP_M_MAP);
1255 	if (asoc->nr_mapping_array == NULL) {
1256 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1257 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1258 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1259 		return (ENOMEM);
1260 	}
1261 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1262 
1263 	/* Now the init of the other outqueues */
1264 	TAILQ_INIT(&asoc->free_chunks);
1265 	TAILQ_INIT(&asoc->control_send_queue);
1266 	TAILQ_INIT(&asoc->asconf_send_queue);
1267 	TAILQ_INIT(&asoc->send_queue);
1268 	TAILQ_INIT(&asoc->sent_queue);
1269 	TAILQ_INIT(&asoc->resetHead);
1270 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1271 	TAILQ_INIT(&asoc->asconf_queue);
1272 	/* authentication fields */
1273 	asoc->authinfo.random = NULL;
1274 	asoc->authinfo.active_keyid = 0;
1275 	asoc->authinfo.assoc_key = NULL;
1276 	asoc->authinfo.assoc_keyid = 0;
1277 	asoc->authinfo.recv_key = NULL;
1278 	asoc->authinfo.recv_keyid = 0;
1279 	LIST_INIT(&asoc->shared_keys);
1280 	asoc->marked_retrans = 0;
1281 	asoc->port = inp->sctp_ep.port;
1282 	asoc->timoinit = 0;
1283 	asoc->timodata = 0;
1284 	asoc->timosack = 0;
1285 	asoc->timoshutdown = 0;
1286 	asoc->timoheartbeat = 0;
1287 	asoc->timocookie = 0;
1288 	asoc->timoshutdownack = 0;
1289 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1290 	asoc->discontinuity_time = asoc->start_time;
1291 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1292 		asoc->abandoned_unsent[i] = 0;
1293 		asoc->abandoned_sent[i] = 0;
1294 	}
1295 	/*
1296 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1297 	 * freed later when the association is freed.
1298 	 */
1299 	return (0);
1300 }
1301 
1302 void
1303 sctp_print_mapping_array(struct sctp_association *asoc)
1304 {
1305 	unsigned int i, limit;
1306 
1307 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1308 	    asoc->mapping_array_size,
1309 	    asoc->mapping_array_base_tsn,
1310 	    asoc->cumulative_tsn,
1311 	    asoc->highest_tsn_inside_map,
1312 	    asoc->highest_tsn_inside_nr_map);
1313 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1314 		if (asoc->mapping_array[limit - 1] != 0) {
1315 			break;
1316 		}
1317 	}
1318 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1319 	for (i = 0; i < limit; i++) {
1320 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1321 	}
1322 	if (limit % 16)
1323 		SCTP_PRINTF("\n");
1324 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1325 		if (asoc->nr_mapping_array[limit - 1]) {
1326 			break;
1327 		}
1328 	}
1329 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1330 	for (i = 0; i < limit; i++) {
1331 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1332 	}
1333 	if (limit % 16)
1334 		SCTP_PRINTF("\n");
1335 }
1336 
1337 int
1338 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1339 {
1340 	/* mapping array needs to grow */
1341 	uint8_t *new_array1, *new_array2;
1342 	uint32_t new_size;
1343 
1344 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1345 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1346 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1347 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1348 		/* can't get more, forget it */
1349 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1350 		if (new_array1) {
1351 			SCTP_FREE(new_array1, SCTP_M_MAP);
1352 		}
1353 		if (new_array2) {
1354 			SCTP_FREE(new_array2, SCTP_M_MAP);
1355 		}
1356 		return (-1);
1357 	}
1358 	memset(new_array1, 0, new_size);
1359 	memset(new_array2, 0, new_size);
1360 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1361 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1362 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1363 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1364 	asoc->mapping_array = new_array1;
1365 	asoc->nr_mapping_array = new_array2;
1366 	asoc->mapping_array_size = new_size;
1367 	return (0);
1368 }
1369 
1370 
1371 static void
1372 sctp_iterator_work(struct sctp_iterator *it)
1373 {
1374 	struct epoch_tracker et;
1375 	struct sctp_inpcb *tinp;
1376 	int iteration_count = 0;
1377 	int inp_skip = 0;
1378 	int first_in = 1;
1379 
1380 	NET_EPOCH_ENTER(et);
1381 	SCTP_INP_INFO_RLOCK();
1382 	SCTP_ITERATOR_LOCK();
1383 	sctp_it_ctl.cur_it = it;
1384 	if (it->inp) {
1385 		SCTP_INP_RLOCK(it->inp);
1386 		SCTP_INP_DECR_REF(it->inp);
1387 	}
1388 	if (it->inp == NULL) {
1389 		/* iterator is complete */
1390 done_with_iterator:
1391 		sctp_it_ctl.cur_it = NULL;
1392 		SCTP_ITERATOR_UNLOCK();
1393 		SCTP_INP_INFO_RUNLOCK();
1394 		if (it->function_atend != NULL) {
1395 			(*it->function_atend) (it->pointer, it->val);
1396 		}
1397 		SCTP_FREE(it, SCTP_M_ITER);
1398 		NET_EPOCH_EXIT(et);
1399 		return;
1400 	}
1401 select_a_new_ep:
1402 	if (first_in) {
1403 		first_in = 0;
1404 	} else {
1405 		SCTP_INP_RLOCK(it->inp);
1406 	}
1407 	while (((it->pcb_flags) &&
1408 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1409 	    ((it->pcb_features) &&
1410 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1411 		/* endpoint flags or features don't match, so keep looking */
1412 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1413 			SCTP_INP_RUNLOCK(it->inp);
1414 			goto done_with_iterator;
1415 		}
1416 		tinp = it->inp;
1417 		it->inp = LIST_NEXT(it->inp, sctp_list);
1418 		SCTP_INP_RUNLOCK(tinp);
1419 		if (it->inp == NULL) {
1420 			goto done_with_iterator;
1421 		}
1422 		SCTP_INP_RLOCK(it->inp);
1423 	}
1424 	/* now go through each assoc which is in the desired state */
1425 	if (it->done_current_ep == 0) {
1426 		if (it->function_inp != NULL)
1427 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1428 		it->done_current_ep = 1;
1429 	}
1430 	if (it->stcb == NULL) {
1431 		/* run the per instance function */
1432 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1433 	}
1434 	if ((inp_skip) || it->stcb == NULL) {
1435 		if (it->function_inp_end != NULL) {
1436 			inp_skip = (*it->function_inp_end) (it->inp,
1437 			    it->pointer,
1438 			    it->val);
1439 		}
1440 		SCTP_INP_RUNLOCK(it->inp);
1441 		goto no_stcb;
1442 	}
1443 	while (it->stcb) {
1444 		SCTP_TCB_LOCK(it->stcb);
1445 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1446 			/* not in the right state... keep looking */
1447 			SCTP_TCB_UNLOCK(it->stcb);
1448 			goto next_assoc;
1449 		}
1450 		/* see if we have limited out the iterator loop */
1451 		iteration_count++;
1452 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1453 			/* Pause to let others grab the lock */
1454 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1455 			SCTP_TCB_UNLOCK(it->stcb);
1456 			SCTP_INP_INCR_REF(it->inp);
1457 			SCTP_INP_RUNLOCK(it->inp);
1458 			SCTP_ITERATOR_UNLOCK();
1459 			SCTP_INP_INFO_RUNLOCK();
1460 			SCTP_INP_INFO_RLOCK();
1461 			SCTP_ITERATOR_LOCK();
1462 			if (sctp_it_ctl.iterator_flags) {
1463 				/* We won't be staying here */
1464 				SCTP_INP_DECR_REF(it->inp);
1465 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1466 				if (sctp_it_ctl.iterator_flags &
1467 				    SCTP_ITERATOR_STOP_CUR_IT) {
1468 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1469 					goto done_with_iterator;
1470 				}
1471 				if (sctp_it_ctl.iterator_flags &
1472 				    SCTP_ITERATOR_STOP_CUR_INP) {
1473 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1474 					goto no_stcb;
1475 				}
1476 				/* If we reach here huh? */
1477 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1478 				    sctp_it_ctl.iterator_flags);
1479 				sctp_it_ctl.iterator_flags = 0;
1480 			}
1481 			SCTP_INP_RLOCK(it->inp);
1482 			SCTP_INP_DECR_REF(it->inp);
1483 			SCTP_TCB_LOCK(it->stcb);
1484 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1485 			iteration_count = 0;
1486 		}
1487 
1488 		/* run function on this one */
1489 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1490 
1491 		/*
1492 		 * we lie here, it really needs to have its own type but
1493 		 * first I must verify that this won't effect things :-0
1494 		 */
1495 		if (it->no_chunk_output == 0)
1496 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1497 
1498 		SCTP_TCB_UNLOCK(it->stcb);
1499 next_assoc:
1500 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1501 		if (it->stcb == NULL) {
1502 			/* Run last function */
1503 			if (it->function_inp_end != NULL) {
1504 				inp_skip = (*it->function_inp_end) (it->inp,
1505 				    it->pointer,
1506 				    it->val);
1507 			}
1508 		}
1509 	}
1510 	SCTP_INP_RUNLOCK(it->inp);
1511 no_stcb:
1512 	/* done with all assocs on this endpoint, move on to next endpoint */
1513 	it->done_current_ep = 0;
1514 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1515 		it->inp = NULL;
1516 	} else {
1517 		it->inp = LIST_NEXT(it->inp, sctp_list);
1518 	}
1519 	if (it->inp == NULL) {
1520 		goto done_with_iterator;
1521 	}
1522 	goto select_a_new_ep;
1523 }
1524 
1525 void
1526 sctp_iterator_worker(void)
1527 {
1528 	struct sctp_iterator *it;
1529 
1530 	/* This function is called with the WQ lock in place */
1531 	sctp_it_ctl.iterator_running = 1;
1532 	while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) {
1533 		/* now lets work on this one */
1534 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1535 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1536 		CURVNET_SET(it->vn);
1537 		sctp_iterator_work(it);
1538 		CURVNET_RESTORE();
1539 		SCTP_IPI_ITERATOR_WQ_LOCK();
1540 		/* sa_ignore FREED_MEMORY */
1541 	}
1542 	sctp_it_ctl.iterator_running = 0;
1543 	return;
1544 }
1545 
1546 
1547 static void
1548 sctp_handle_addr_wq(void)
1549 {
1550 	/* deal with the ADDR wq from the rtsock calls */
1551 	struct sctp_laddr *wi, *nwi;
1552 	struct sctp_asconf_iterator *asc;
1553 
1554 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1555 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1556 	if (asc == NULL) {
1557 		/* Try later, no memory */
1558 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1559 		    (struct sctp_inpcb *)NULL,
1560 		    (struct sctp_tcb *)NULL,
1561 		    (struct sctp_nets *)NULL);
1562 		return;
1563 	}
1564 	LIST_INIT(&asc->list_of_work);
1565 	asc->cnt = 0;
1566 
1567 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1568 		LIST_REMOVE(wi, sctp_nxt_addr);
1569 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1570 		asc->cnt++;
1571 	}
1572 
1573 	if (asc->cnt == 0) {
1574 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1575 	} else {
1576 		int ret;
1577 
1578 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1579 		    sctp_asconf_iterator_stcb,
1580 		    NULL,	/* No ep end for boundall */
1581 		    SCTP_PCB_FLAGS_BOUNDALL,
1582 		    SCTP_PCB_ANY_FEATURES,
1583 		    SCTP_ASOC_ANY_STATE,
1584 		    (void *)asc, 0,
1585 		    sctp_asconf_iterator_end, NULL, 0);
1586 		if (ret) {
1587 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1588 			/*
1589 			 * Freeing if we are stopping or put back on the
1590 			 * addr_wq.
1591 			 */
1592 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1593 				sctp_asconf_iterator_end(asc, 0);
1594 			} else {
1595 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1596 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1597 				}
1598 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1599 			}
1600 		}
1601 	}
1602 }
1603 
1604 void
1605 sctp_timeout_handler(void *t)
1606 {
1607 	struct epoch_tracker et;
1608 	struct sctp_inpcb *inp;
1609 	struct sctp_tcb *stcb;
1610 	struct sctp_nets *net;
1611 	struct sctp_timer *tmr;
1612 	struct mbuf *op_err;
1613 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1614 	struct socket *so;
1615 #endif
1616 	int did_output;
1617 	int type;
1618 
1619 	tmr = (struct sctp_timer *)t;
1620 	inp = (struct sctp_inpcb *)tmr->ep;
1621 	stcb = (struct sctp_tcb *)tmr->tcb;
1622 	net = (struct sctp_nets *)tmr->net;
1623 	CURVNET_SET((struct vnet *)tmr->vnet);
1624 	did_output = 1;
1625 
1626 #ifdef SCTP_AUDITING_ENABLED
1627 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1628 	sctp_auditing(3, inp, stcb, net);
1629 #endif
1630 
1631 	/* sanity checks... */
1632 	if (tmr->self != (void *)tmr) {
1633 		/*
1634 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1635 		 * (void *)tmr);
1636 		 */
1637 		CURVNET_RESTORE();
1638 		return;
1639 	}
1640 	tmr->stopped_from = 0xa001;
1641 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1642 		/*
1643 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1644 		 * tmr->type);
1645 		 */
1646 		CURVNET_RESTORE();
1647 		return;
1648 	}
1649 	tmr->stopped_from = 0xa002;
1650 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1651 		CURVNET_RESTORE();
1652 		return;
1653 	}
1654 	/* if this is an iterator timeout, get the struct and clear inp */
1655 	tmr->stopped_from = 0xa003;
1656 	if (inp) {
1657 		SCTP_INP_INCR_REF(inp);
1658 		if ((inp->sctp_socket == NULL) &&
1659 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1660 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1661 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1662 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1663 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1664 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1665 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1666 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1667 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))) {
1668 			SCTP_INP_DECR_REF(inp);
1669 			CURVNET_RESTORE();
1670 			return;
1671 		}
1672 	}
1673 	tmr->stopped_from = 0xa004;
1674 	if (stcb) {
1675 		atomic_add_int(&stcb->asoc.refcnt, 1);
1676 		if (stcb->asoc.state == 0) {
1677 			atomic_add_int(&stcb->asoc.refcnt, -1);
1678 			if (inp) {
1679 				SCTP_INP_DECR_REF(inp);
1680 			}
1681 			CURVNET_RESTORE();
1682 			return;
1683 		}
1684 	}
1685 	type = tmr->type;
1686 	tmr->stopped_from = 0xa005;
1687 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1688 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1689 		if (inp) {
1690 			SCTP_INP_DECR_REF(inp);
1691 		}
1692 		if (stcb) {
1693 			atomic_add_int(&stcb->asoc.refcnt, -1);
1694 		}
1695 		CURVNET_RESTORE();
1696 		return;
1697 	}
1698 	tmr->stopped_from = 0xa006;
1699 
1700 	if (stcb) {
1701 		SCTP_TCB_LOCK(stcb);
1702 		atomic_add_int(&stcb->asoc.refcnt, -1);
1703 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1704 		    ((stcb->asoc.state == 0) ||
1705 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1706 			SCTP_TCB_UNLOCK(stcb);
1707 			if (inp) {
1708 				SCTP_INP_DECR_REF(inp);
1709 			}
1710 			CURVNET_RESTORE();
1711 			return;
1712 		}
1713 	} else if (inp != NULL) {
1714 		if (type != SCTP_TIMER_TYPE_INPKILL) {
1715 			SCTP_INP_WLOCK(inp);
1716 		}
1717 	} else {
1718 		SCTP_WQ_ADDR_LOCK();
1719 	}
1720 	/* record in stopped what t-o occurred */
1721 	tmr->stopped_from = type;
1722 
1723 	NET_EPOCH_ENTER(et);
1724 	/* mark as being serviced now */
1725 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1726 		/*
1727 		 * Callout has been rescheduled.
1728 		 */
1729 		goto get_out;
1730 	}
1731 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1732 		/*
1733 		 * Not active, so no action.
1734 		 */
1735 		goto get_out;
1736 	}
1737 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1738 
1739 	/* call the handler for the appropriate timer type */
1740 	switch (type) {
1741 	case SCTP_TIMER_TYPE_SEND:
1742 		if ((stcb == NULL) || (inp == NULL)) {
1743 			break;
1744 		}
1745 		SCTP_STAT_INCR(sctps_timodata);
1746 		stcb->asoc.timodata++;
1747 		stcb->asoc.num_send_timers_up--;
1748 		if (stcb->asoc.num_send_timers_up < 0) {
1749 			stcb->asoc.num_send_timers_up = 0;
1750 		}
1751 		SCTP_TCB_LOCK_ASSERT(stcb);
1752 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1753 			/* no need to unlock on tcb its gone */
1754 
1755 			goto out_decr;
1756 		}
1757 		SCTP_TCB_LOCK_ASSERT(stcb);
1758 #ifdef SCTP_AUDITING_ENABLED
1759 		sctp_auditing(4, inp, stcb, net);
1760 #endif
1761 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1762 		if ((stcb->asoc.num_send_timers_up == 0) &&
1763 		    (stcb->asoc.sent_queue_cnt > 0)) {
1764 			struct sctp_tmit_chunk *chk;
1765 
1766 			/*
1767 			 * safeguard. If there on some on the sent queue
1768 			 * somewhere but no timers running something is
1769 			 * wrong... so we start a timer on the first chunk
1770 			 * on the send queue on whatever net it is sent to.
1771 			 */
1772 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1773 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1774 			    chk->whoTo);
1775 		}
1776 		break;
1777 	case SCTP_TIMER_TYPE_INIT:
1778 		if ((stcb == NULL) || (inp == NULL)) {
1779 			break;
1780 		}
1781 		SCTP_STAT_INCR(sctps_timoinit);
1782 		stcb->asoc.timoinit++;
1783 		if (sctp_t1init_timer(inp, stcb, net)) {
1784 			/* no need to unlock on tcb its gone */
1785 			goto out_decr;
1786 		}
1787 		/* We do output but not here */
1788 		did_output = 0;
1789 		break;
1790 	case SCTP_TIMER_TYPE_RECV:
1791 		if ((stcb == NULL) || (inp == NULL)) {
1792 			break;
1793 		}
1794 		SCTP_STAT_INCR(sctps_timosack);
1795 		stcb->asoc.timosack++;
1796 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1797 #ifdef SCTP_AUDITING_ENABLED
1798 		sctp_auditing(4, inp, stcb, net);
1799 #endif
1800 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1801 		break;
1802 	case SCTP_TIMER_TYPE_SHUTDOWN:
1803 		if ((stcb == NULL) || (inp == NULL)) {
1804 			break;
1805 		}
1806 		if (sctp_shutdown_timer(inp, stcb, net)) {
1807 			/* no need to unlock on tcb its gone */
1808 			goto out_decr;
1809 		}
1810 		SCTP_STAT_INCR(sctps_timoshutdown);
1811 		stcb->asoc.timoshutdown++;
1812 #ifdef SCTP_AUDITING_ENABLED
1813 		sctp_auditing(4, inp, stcb, net);
1814 #endif
1815 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1816 		break;
1817 	case SCTP_TIMER_TYPE_HEARTBEAT:
1818 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1819 			break;
1820 		}
1821 		SCTP_STAT_INCR(sctps_timoheartbeat);
1822 		stcb->asoc.timoheartbeat++;
1823 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1824 			/* no need to unlock on tcb its gone */
1825 			goto out_decr;
1826 		}
1827 #ifdef SCTP_AUDITING_ENABLED
1828 		sctp_auditing(4, inp, stcb, net);
1829 #endif
1830 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1831 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1832 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1833 		}
1834 		break;
1835 	case SCTP_TIMER_TYPE_COOKIE:
1836 		if ((stcb == NULL) || (inp == NULL)) {
1837 			break;
1838 		}
1839 
1840 		if (sctp_cookie_timer(inp, stcb, net)) {
1841 			/* no need to unlock on tcb its gone */
1842 			goto out_decr;
1843 		}
1844 		SCTP_STAT_INCR(sctps_timocookie);
1845 		stcb->asoc.timocookie++;
1846 #ifdef SCTP_AUDITING_ENABLED
1847 		sctp_auditing(4, inp, stcb, net);
1848 #endif
1849 		/*
1850 		 * We consider T3 and Cookie timer pretty much the same with
1851 		 * respect to where from in chunk_output.
1852 		 */
1853 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1854 		break;
1855 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1856 		{
1857 			struct timeval tv;
1858 			int i, secret;
1859 
1860 			if (inp == NULL) {
1861 				break;
1862 			}
1863 			SCTP_STAT_INCR(sctps_timosecret);
1864 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1865 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1866 			inp->sctp_ep.last_secret_number =
1867 			    inp->sctp_ep.current_secret_number;
1868 			inp->sctp_ep.current_secret_number++;
1869 			if (inp->sctp_ep.current_secret_number >=
1870 			    SCTP_HOW_MANY_SECRETS) {
1871 				inp->sctp_ep.current_secret_number = 0;
1872 			}
1873 			secret = (int)inp->sctp_ep.current_secret_number;
1874 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1875 				inp->sctp_ep.secret_key[secret][i] =
1876 				    sctp_select_initial_TSN(&inp->sctp_ep);
1877 			}
1878 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL);
1879 		}
1880 		did_output = 0;
1881 		break;
1882 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1883 		if ((stcb == NULL) || (inp == NULL)) {
1884 			break;
1885 		}
1886 		SCTP_STAT_INCR(sctps_timopathmtu);
1887 		sctp_pathmtu_timer(inp, stcb, net);
1888 		did_output = 0;
1889 		break;
1890 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1891 		if ((stcb == NULL) || (inp == NULL)) {
1892 			break;
1893 		}
1894 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1895 			/* no need to unlock on tcb its gone */
1896 			goto out_decr;
1897 		}
1898 		SCTP_STAT_INCR(sctps_timoshutdownack);
1899 		stcb->asoc.timoshutdownack++;
1900 #ifdef SCTP_AUDITING_ENABLED
1901 		sctp_auditing(4, inp, stcb, net);
1902 #endif
1903 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1904 		break;
1905 	case SCTP_TIMER_TYPE_ASCONF:
1906 		if ((stcb == NULL) || (inp == NULL)) {
1907 			break;
1908 		}
1909 		if (sctp_asconf_timer(inp, stcb, net)) {
1910 			/* no need to unlock on tcb its gone */
1911 			goto out_decr;
1912 		}
1913 		SCTP_STAT_INCR(sctps_timoasconf);
1914 #ifdef SCTP_AUDITING_ENABLED
1915 		sctp_auditing(4, inp, stcb, net);
1916 #endif
1917 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1918 		break;
1919 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1920 		if ((stcb == NULL) || (inp == NULL)) {
1921 			break;
1922 		}
1923 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1924 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1925 		    "Shutdown guard timer expired");
1926 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1927 		/* no need to unlock on tcb its gone */
1928 		goto out_decr;
1929 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1930 		if ((stcb == NULL) || (inp == NULL)) {
1931 			break;
1932 		}
1933 		SCTP_STAT_INCR(sctps_timoautoclose);
1934 		sctp_autoclose_timer(inp, stcb, net);
1935 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1936 		did_output = 0;
1937 		break;
1938 	case SCTP_TIMER_TYPE_STRRESET:
1939 		if ((stcb == NULL) || (inp == NULL)) {
1940 			break;
1941 		}
1942 		if (sctp_strreset_timer(inp, stcb)) {
1943 			/* no need to unlock on tcb its gone */
1944 			goto out_decr;
1945 		}
1946 		SCTP_STAT_INCR(sctps_timostrmrst);
1947 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1948 		break;
1949 	case SCTP_TIMER_TYPE_INPKILL:
1950 		SCTP_STAT_INCR(sctps_timoinpkill);
1951 		if (inp == NULL) {
1952 			break;
1953 		}
1954 		/*
1955 		 * special case, take away our increment since WE are the
1956 		 * killer
1957 		 */
1958 		SCTP_INP_DECR_REF(inp);
1959 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1960 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1961 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1962 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1963 		inp = NULL;
1964 		goto out_no_decr;
1965 	case SCTP_TIMER_TYPE_ASOCKILL:
1966 		if ((stcb == NULL) || (inp == NULL)) {
1967 			break;
1968 		}
1969 		SCTP_STAT_INCR(sctps_timoassockill);
1970 		/* Can we free it yet? */
1971 		SCTP_INP_DECR_REF(inp);
1972 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1973 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1974 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1975 		so = SCTP_INP_SO(inp);
1976 		atomic_add_int(&stcb->asoc.refcnt, 1);
1977 		SCTP_TCB_UNLOCK(stcb);
1978 		SCTP_SOCKET_LOCK(so, 1);
1979 		SCTP_TCB_LOCK(stcb);
1980 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1981 #endif
1982 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1983 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1984 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1985 		SCTP_SOCKET_UNLOCK(so, 1);
1986 #endif
1987 		/*
1988 		 * free asoc, always unlocks (or destroy's) so prevent
1989 		 * duplicate unlock or unlock of a free mtx :-0
1990 		 */
1991 		stcb = NULL;
1992 		goto out_no_decr;
1993 	case SCTP_TIMER_TYPE_ADDR_WQ:
1994 		sctp_handle_addr_wq();
1995 		break;
1996 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1997 		if ((stcb == NULL) || (inp == NULL)) {
1998 			break;
1999 		}
2000 		sctp_delete_prim_timer(inp, stcb, net);
2001 		SCTP_STAT_INCR(sctps_timodelprim);
2002 		break;
2003 	default:
2004 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
2005 		    type);
2006 		break;
2007 	}
2008 #ifdef SCTP_AUDITING_ENABLED
2009 	sctp_audit_log(0xF1, (uint8_t)type);
2010 	if (inp)
2011 		sctp_auditing(5, inp, stcb, net);
2012 #endif
2013 	if ((did_output) && stcb) {
2014 		/*
2015 		 * Now we need to clean up the control chunk chain if an
2016 		 * ECNE is on it. It must be marked as UNSENT again so next
2017 		 * call will continue to send it until such time that we get
2018 		 * a CWR, to remove it. It is, however, less likely that we
2019 		 * will find a ecn echo on the chain though.
2020 		 */
2021 		sctp_fix_ecn_echo(&stcb->asoc);
2022 	}
2023 get_out:
2024 	if (stcb) {
2025 		SCTP_TCB_UNLOCK(stcb);
2026 	} else if (inp != NULL) {
2027 		SCTP_INP_WUNLOCK(inp);
2028 	} else {
2029 		SCTP_WQ_ADDR_UNLOCK();
2030 	}
2031 
2032 out_decr:
2033 	if (inp) {
2034 		SCTP_INP_DECR_REF(inp);
2035 	}
2036 
2037 out_no_decr:
2038 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
2039 	CURVNET_RESTORE();
2040 	NET_EPOCH_EXIT(et);
2041 }
2042 
2043 void
2044 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2045     struct sctp_nets *net)
2046 {
2047 	uint32_t to_ticks;
2048 	struct sctp_timer *tmr;
2049 
2050 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
2051 		return;
2052 
2053 	tmr = NULL;
2054 	if (stcb) {
2055 		SCTP_TCB_LOCK_ASSERT(stcb);
2056 	}
2057 	/* Don't restart timer on net that's been removed. */
2058 	if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) {
2059 		return;
2060 	}
2061 	switch (t_type) {
2062 	case SCTP_TIMER_TYPE_SEND:
2063 		/* Here we use the RTO timer */
2064 		{
2065 			int rto_val;
2066 
2067 			if ((stcb == NULL) || (net == NULL)) {
2068 				return;
2069 			}
2070 			tmr = &net->rxt_timer;
2071 			if (net->RTO == 0) {
2072 				rto_val = stcb->asoc.initial_rto;
2073 			} else {
2074 				rto_val = net->RTO;
2075 			}
2076 			to_ticks = MSEC_TO_TICKS(rto_val);
2077 		}
2078 		break;
2079 	case SCTP_TIMER_TYPE_INIT:
2080 		/*
2081 		 * Here we use the INIT timer default usually about 1
2082 		 * minute.
2083 		 */
2084 		if ((stcb == NULL) || (net == NULL)) {
2085 			return;
2086 		}
2087 		tmr = &net->rxt_timer;
2088 		if (net->RTO == 0) {
2089 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2090 		} else {
2091 			to_ticks = MSEC_TO_TICKS(net->RTO);
2092 		}
2093 		break;
2094 	case SCTP_TIMER_TYPE_RECV:
2095 		/*
2096 		 * Here we use the Delayed-Ack timer value from the inp
2097 		 * ususually about 200ms.
2098 		 */
2099 		if (stcb == NULL) {
2100 			return;
2101 		}
2102 		tmr = &stcb->asoc.dack_timer;
2103 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2104 		break;
2105 	case SCTP_TIMER_TYPE_SHUTDOWN:
2106 		/* Here we use the RTO of the destination. */
2107 		if ((stcb == NULL) || (net == NULL)) {
2108 			return;
2109 		}
2110 		if (net->RTO == 0) {
2111 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2112 		} else {
2113 			to_ticks = MSEC_TO_TICKS(net->RTO);
2114 		}
2115 		tmr = &net->rxt_timer;
2116 		break;
2117 	case SCTP_TIMER_TYPE_HEARTBEAT:
2118 		/*
2119 		 * the net is used here so that we can add in the RTO. Even
2120 		 * though we use a different timer. We also add the HB timer
2121 		 * PLUS a random jitter.
2122 		 */
2123 		if ((stcb == NULL) || (net == NULL)) {
2124 			return;
2125 		} else {
2126 			uint32_t rndval;
2127 			uint32_t jitter;
2128 
2129 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2130 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2131 				return;
2132 			}
2133 			if (net->RTO == 0) {
2134 				to_ticks = stcb->asoc.initial_rto;
2135 			} else {
2136 				to_ticks = net->RTO;
2137 			}
2138 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2139 			jitter = rndval % to_ticks;
2140 			if (jitter >= (to_ticks >> 1)) {
2141 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2142 			} else {
2143 				to_ticks = to_ticks - jitter;
2144 			}
2145 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2146 			    !(net->dest_state & SCTP_ADDR_PF)) {
2147 				to_ticks += net->heart_beat_delay;
2148 			}
2149 			/*
2150 			 * Now we must convert the to_ticks that are now in
2151 			 * ms to ticks.
2152 			 */
2153 			to_ticks = MSEC_TO_TICKS(to_ticks);
2154 			tmr = &net->hb_timer;
2155 		}
2156 		break;
2157 	case SCTP_TIMER_TYPE_COOKIE:
2158 		/*
2159 		 * Here we can use the RTO timer from the network since one
2160 		 * RTT was compelete. If a retran happened then we will be
2161 		 * using the RTO initial value.
2162 		 */
2163 		if ((stcb == NULL) || (net == NULL)) {
2164 			return;
2165 		}
2166 		if (net->RTO == 0) {
2167 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2168 		} else {
2169 			to_ticks = MSEC_TO_TICKS(net->RTO);
2170 		}
2171 		tmr = &net->rxt_timer;
2172 		break;
2173 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2174 		/*
2175 		 * nothing needed but the endpoint here ususually about 60
2176 		 * minutes.
2177 		 */
2178 		tmr = &inp->sctp_ep.signature_change;
2179 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2180 		break;
2181 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2182 		/*
2183 		 * Here we use the value found in the EP for PMTU ususually
2184 		 * about 10 minutes.
2185 		 */
2186 		if ((stcb == NULL) || (net == NULL)) {
2187 			return;
2188 		}
2189 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2190 			return;
2191 		}
2192 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2193 		tmr = &net->pmtu_timer;
2194 		break;
2195 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2196 		/* Here we use the RTO of the destination */
2197 		if ((stcb == NULL) || (net == NULL)) {
2198 			return;
2199 		}
2200 		if (net->RTO == 0) {
2201 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2202 		} else {
2203 			to_ticks = MSEC_TO_TICKS(net->RTO);
2204 		}
2205 		tmr = &net->rxt_timer;
2206 		break;
2207 	case SCTP_TIMER_TYPE_ASCONF:
2208 		/*
2209 		 * Here the timer comes from the stcb but its value is from
2210 		 * the net's RTO.
2211 		 */
2212 		if ((stcb == NULL) || (net == NULL)) {
2213 			return;
2214 		}
2215 		if (net->RTO == 0) {
2216 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2217 		} else {
2218 			to_ticks = MSEC_TO_TICKS(net->RTO);
2219 		}
2220 		tmr = &stcb->asoc.asconf_timer;
2221 		break;
2222 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2223 		/*
2224 		 * Here we use the endpoints shutdown guard timer usually
2225 		 * about 3 minutes.
2226 		 */
2227 		if (stcb == NULL) {
2228 			return;
2229 		}
2230 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2231 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2232 		} else {
2233 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2234 		}
2235 		tmr = &stcb->asoc.shut_guard_timer;
2236 		break;
2237 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2238 		if (stcb == NULL) {
2239 			return;
2240 		}
2241 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2242 			/*
2243 			 * Really an error since stcb is NOT set to
2244 			 * autoclose
2245 			 */
2246 			return;
2247 		}
2248 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2249 		tmr = &stcb->asoc.autoclose_timer;
2250 		break;
2251 	case SCTP_TIMER_TYPE_STRRESET:
2252 		/*
2253 		 * Here the timer comes from the stcb but its value is from
2254 		 * the net's RTO.
2255 		 */
2256 		if ((stcb == NULL) || (net == NULL)) {
2257 			return;
2258 		}
2259 		if (net->RTO == 0) {
2260 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2261 		} else {
2262 			to_ticks = MSEC_TO_TICKS(net->RTO);
2263 		}
2264 		tmr = &stcb->asoc.strreset_timer;
2265 		break;
2266 	case SCTP_TIMER_TYPE_INPKILL:
2267 		/*
2268 		 * The inp is setup to die. We re-use the signature_chage
2269 		 * timer since that has stopped and we are in the GONE
2270 		 * state.
2271 		 */
2272 		tmr = &inp->sctp_ep.signature_change;
2273 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2274 		break;
2275 	case SCTP_TIMER_TYPE_ASOCKILL:
2276 		if (stcb == NULL) {
2277 			return;
2278 		}
2279 		tmr = &stcb->asoc.strreset_timer;
2280 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2281 		break;
2282 	case SCTP_TIMER_TYPE_ADDR_WQ:
2283 		/* Only 1 tick away :-) */
2284 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2285 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
2286 		break;
2287 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2288 		if ((stcb == NULL) || (net != NULL)) {
2289 			return;
2290 		}
2291 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2292 		tmr = &stcb->asoc.delete_prim_timer;
2293 		break;
2294 	default:
2295 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2296 		    __func__, t_type);
2297 		return;
2298 		break;
2299 	}
2300 	if ((to_ticks <= 0) || (tmr == NULL)) {
2301 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2302 		    __func__, t_type, to_ticks, (void *)tmr);
2303 		return;
2304 	}
2305 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2306 		/*
2307 		 * we do NOT allow you to have it already running. if it is
2308 		 * we leave the current one up unchanged
2309 		 */
2310 		return;
2311 	}
2312 	/* At this point we can proceed */
2313 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2314 		stcb->asoc.num_send_timers_up++;
2315 	}
2316 	tmr->stopped_from = 0;
2317 	tmr->type = t_type;
2318 	tmr->ep = (void *)inp;
2319 	tmr->tcb = (void *)stcb;
2320 	tmr->net = (void *)net;
2321 	tmr->self = (void *)tmr;
2322 	tmr->vnet = (void *)curvnet;
2323 	tmr->ticks = sctp_get_tick_count();
2324 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2325 	return;
2326 }
2327 
2328 void
2329 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2330     struct sctp_nets *net, uint32_t from)
2331 {
2332 	struct sctp_timer *tmr;
2333 
2334 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2335 	    (inp == NULL))
2336 		return;
2337 
2338 	tmr = NULL;
2339 	if (stcb) {
2340 		SCTP_TCB_LOCK_ASSERT(stcb);
2341 	}
2342 	switch (t_type) {
2343 	case SCTP_TIMER_TYPE_SEND:
2344 		if ((stcb == NULL) || (net == NULL)) {
2345 			return;
2346 		}
2347 		tmr = &net->rxt_timer;
2348 		break;
2349 	case SCTP_TIMER_TYPE_INIT:
2350 		if ((stcb == NULL) || (net == NULL)) {
2351 			return;
2352 		}
2353 		tmr = &net->rxt_timer;
2354 		break;
2355 	case SCTP_TIMER_TYPE_RECV:
2356 		if (stcb == NULL) {
2357 			return;
2358 		}
2359 		tmr = &stcb->asoc.dack_timer;
2360 		break;
2361 	case SCTP_TIMER_TYPE_SHUTDOWN:
2362 		if ((stcb == NULL) || (net == NULL)) {
2363 			return;
2364 		}
2365 		tmr = &net->rxt_timer;
2366 		break;
2367 	case SCTP_TIMER_TYPE_HEARTBEAT:
2368 		if ((stcb == NULL) || (net == NULL)) {
2369 			return;
2370 		}
2371 		tmr = &net->hb_timer;
2372 		break;
2373 	case SCTP_TIMER_TYPE_COOKIE:
2374 		if ((stcb == NULL) || (net == NULL)) {
2375 			return;
2376 		}
2377 		tmr = &net->rxt_timer;
2378 		break;
2379 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2380 		/* nothing needed but the endpoint here */
2381 		tmr = &inp->sctp_ep.signature_change;
2382 		/*
2383 		 * We re-use the newcookie timer for the INP kill timer. We
2384 		 * must assure that we do not kill it by accident.
2385 		 */
2386 		break;
2387 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2388 		if ((stcb == NULL) || (net == NULL)) {
2389 			return;
2390 		}
2391 		tmr = &net->pmtu_timer;
2392 		break;
2393 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2394 		if ((stcb == NULL) || (net == NULL)) {
2395 			return;
2396 		}
2397 		tmr = &net->rxt_timer;
2398 		break;
2399 	case SCTP_TIMER_TYPE_ASCONF:
2400 		if (stcb == NULL) {
2401 			return;
2402 		}
2403 		tmr = &stcb->asoc.asconf_timer;
2404 		break;
2405 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2406 		if (stcb == NULL) {
2407 			return;
2408 		}
2409 		tmr = &stcb->asoc.shut_guard_timer;
2410 		break;
2411 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2412 		if (stcb == NULL) {
2413 			return;
2414 		}
2415 		tmr = &stcb->asoc.autoclose_timer;
2416 		break;
2417 	case SCTP_TIMER_TYPE_STRRESET:
2418 		if (stcb == NULL) {
2419 			return;
2420 		}
2421 		tmr = &stcb->asoc.strreset_timer;
2422 		break;
2423 	case SCTP_TIMER_TYPE_INPKILL:
2424 		/*
2425 		 * The inp is setup to die. We re-use the signature_chage
2426 		 * timer since that has stopped and we are in the GONE
2427 		 * state.
2428 		 */
2429 		tmr = &inp->sctp_ep.signature_change;
2430 		break;
2431 	case SCTP_TIMER_TYPE_ASOCKILL:
2432 		/*
2433 		 * Stop the asoc kill timer.
2434 		 */
2435 		if (stcb == NULL) {
2436 			return;
2437 		}
2438 		tmr = &stcb->asoc.strreset_timer;
2439 		break;
2440 	case SCTP_TIMER_TYPE_ADDR_WQ:
2441 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2442 		break;
2443 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2444 		if (stcb == NULL) {
2445 			return;
2446 		}
2447 		tmr = &stcb->asoc.delete_prim_timer;
2448 		break;
2449 	default:
2450 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2451 		    __func__, t_type);
2452 		break;
2453 	}
2454 	if (tmr == NULL) {
2455 		return;
2456 	}
2457 	if ((tmr->type != t_type) && tmr->type) {
2458 		/*
2459 		 * Ok we have a timer that is under joint use. Cookie timer
2460 		 * per chance with the SEND timer. We therefore are NOT
2461 		 * running the timer that the caller wants stopped.  So just
2462 		 * return.
2463 		 */
2464 		return;
2465 	}
2466 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2467 		stcb->asoc.num_send_timers_up--;
2468 		if (stcb->asoc.num_send_timers_up < 0) {
2469 			stcb->asoc.num_send_timers_up = 0;
2470 		}
2471 	}
2472 	tmr->self = NULL;
2473 	tmr->stopped_from = from;
2474 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2475 	return;
2476 }
2477 
2478 uint32_t
2479 sctp_calculate_len(struct mbuf *m)
2480 {
2481 	uint32_t tlen = 0;
2482 	struct mbuf *at;
2483 
2484 	at = m;
2485 	while (at) {
2486 		tlen += SCTP_BUF_LEN(at);
2487 		at = SCTP_BUF_NEXT(at);
2488 	}
2489 	return (tlen);
2490 }
2491 
2492 void
2493 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2494     struct sctp_association *asoc, uint32_t mtu)
2495 {
2496 	/*
2497 	 * Reset the P-MTU size on this association, this involves changing
2498 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2499 	 * allow the DF flag to be cleared.
2500 	 */
2501 	struct sctp_tmit_chunk *chk;
2502 	unsigned int eff_mtu, ovh;
2503 
2504 	asoc->smallest_mtu = mtu;
2505 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2506 		ovh = SCTP_MIN_OVERHEAD;
2507 	} else {
2508 		ovh = SCTP_MIN_V4_OVERHEAD;
2509 	}
2510 	eff_mtu = mtu - ovh;
2511 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2512 		if (chk->send_size > eff_mtu) {
2513 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2514 		}
2515 	}
2516 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2517 		if (chk->send_size > eff_mtu) {
2518 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2519 		}
2520 	}
2521 }
2522 
2523 
2524 /*
2525  * Given an association and starting time of the current RTT period, update
2526  * RTO in number of msecs. net should point to the current network.
2527  * Return 1, if an RTO update was performed, return 0 if no update was
2528  * performed due to invalid starting point.
2529  */
2530 
2531 int
2532 sctp_calculate_rto(struct sctp_tcb *stcb,
2533     struct sctp_association *asoc,
2534     struct sctp_nets *net,
2535     struct timeval *old,
2536     int rtt_from_sack)
2537 {
2538 	struct timeval now;
2539 	uint64_t rtt_us;	/* RTT in us */
2540 	int32_t rtt;		/* RTT in ms */
2541 	uint32_t new_rto;
2542 	int first_measure = 0;
2543 
2544 	/************************/
2545 	/* 1. calculate new RTT */
2546 	/************************/
2547 	/* get the current time */
2548 	if (stcb->asoc.use_precise_time) {
2549 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2550 	} else {
2551 		(void)SCTP_GETTIME_TIMEVAL(&now);
2552 	}
2553 	if ((old->tv_sec > now.tv_sec) ||
2554 	    ((old->tv_sec == now.tv_sec) && (old->tv_sec > now.tv_sec))) {
2555 		/* The starting point is in the future. */
2556 		return (0);
2557 	}
2558 	timevalsub(&now, old);
2559 	rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec;
2560 	if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) {
2561 		/* The RTT is larger than a sane value. */
2562 		return (0);
2563 	}
2564 	/* store the current RTT in us */
2565 	net->rtt = rtt_us;
2566 	/* compute rtt in ms */
2567 	rtt = (int32_t)(net->rtt / 1000);
2568 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2569 		/*
2570 		 * Tell the CC module that a new update has just occurred
2571 		 * from a sack
2572 		 */
2573 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2574 	}
2575 	/*
2576 	 * Do we need to determine the lan? We do this only on sacks i.e.
2577 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2578 	 */
2579 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2580 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2581 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2582 			net->lan_type = SCTP_LAN_INTERNET;
2583 		} else {
2584 			net->lan_type = SCTP_LAN_LOCAL;
2585 		}
2586 	}
2587 
2588 	/***************************/
2589 	/* 2. update RTTVAR & SRTT */
2590 	/***************************/
2591 	/*-
2592 	 * Compute the scaled average lastsa and the
2593 	 * scaled variance lastsv as described in van Jacobson
2594 	 * Paper "Congestion Avoidance and Control", Annex A.
2595 	 *
2596 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2597 	 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar
2598 	 */
2599 	if (net->RTO_measured) {
2600 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2601 		net->lastsa += rtt;
2602 		if (rtt < 0) {
2603 			rtt = -rtt;
2604 		}
2605 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2606 		net->lastsv += rtt;
2607 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2608 			rto_logging(net, SCTP_LOG_RTTVAR);
2609 		}
2610 	} else {
2611 		/* First RTO measurment */
2612 		net->RTO_measured = 1;
2613 		first_measure = 1;
2614 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2615 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2616 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2617 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2618 		}
2619 	}
2620 	if (net->lastsv == 0) {
2621 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2622 	}
2623 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2624 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2625 	    (stcb->asoc.sat_network_lockout == 0)) {
2626 		stcb->asoc.sat_network = 1;
2627 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2628 		stcb->asoc.sat_network = 0;
2629 		stcb->asoc.sat_network_lockout = 1;
2630 	}
2631 	/* bound it, per C6/C7 in Section 5.3.1 */
2632 	if (new_rto < stcb->asoc.minrto) {
2633 		new_rto = stcb->asoc.minrto;
2634 	}
2635 	if (new_rto > stcb->asoc.maxrto) {
2636 		new_rto = stcb->asoc.maxrto;
2637 	}
2638 	net->RTO = new_rto;
2639 	return (1);
2640 }
2641 
2642 /*
2643  * return a pointer to a contiguous piece of data from the given mbuf chain
2644  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2645  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2646  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2647  */
2648 caddr_t
2649 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2650 {
2651 	uint32_t count;
2652 	uint8_t *ptr;
2653 
2654 	ptr = in_ptr;
2655 	if ((off < 0) || (len <= 0))
2656 		return (NULL);
2657 
2658 	/* find the desired start location */
2659 	while ((m != NULL) && (off > 0)) {
2660 		if (off < SCTP_BUF_LEN(m))
2661 			break;
2662 		off -= SCTP_BUF_LEN(m);
2663 		m = SCTP_BUF_NEXT(m);
2664 	}
2665 	if (m == NULL)
2666 		return (NULL);
2667 
2668 	/* is the current mbuf large enough (eg. contiguous)? */
2669 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2670 		return (mtod(m, caddr_t)+off);
2671 	} else {
2672 		/* else, it spans more than one mbuf, so save a temp copy... */
2673 		while ((m != NULL) && (len > 0)) {
2674 			count = min(SCTP_BUF_LEN(m) - off, len);
2675 			memcpy(ptr, mtod(m, caddr_t)+off, count);
2676 			len -= count;
2677 			ptr += count;
2678 			off = 0;
2679 			m = SCTP_BUF_NEXT(m);
2680 		}
2681 		if ((m == NULL) && (len > 0))
2682 			return (NULL);
2683 		else
2684 			return ((caddr_t)in_ptr);
2685 	}
2686 }
2687 
2688 
2689 
2690 struct sctp_paramhdr *
2691 sctp_get_next_param(struct mbuf *m,
2692     int offset,
2693     struct sctp_paramhdr *pull,
2694     int pull_limit)
2695 {
2696 	/* This just provides a typed signature to Peter's Pull routine */
2697 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2698 	    (uint8_t *)pull));
2699 }
2700 
2701 
2702 struct mbuf *
2703 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2704 {
2705 	struct mbuf *m_last;
2706 	caddr_t dp;
2707 
2708 	if (padlen > 3) {
2709 		return (NULL);
2710 	}
2711 	if (padlen <= M_TRAILINGSPACE(m)) {
2712 		/*
2713 		 * The easy way. We hope the majority of the time we hit
2714 		 * here :)
2715 		 */
2716 		m_last = m;
2717 	} else {
2718 		/* Hard way we must grow the mbuf chain */
2719 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2720 		if (m_last == NULL) {
2721 			return (NULL);
2722 		}
2723 		SCTP_BUF_LEN(m_last) = 0;
2724 		SCTP_BUF_NEXT(m_last) = NULL;
2725 		SCTP_BUF_NEXT(m) = m_last;
2726 	}
2727 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2728 	SCTP_BUF_LEN(m_last) += padlen;
2729 	memset(dp, 0, padlen);
2730 	return (m_last);
2731 }
2732 
2733 struct mbuf *
2734 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2735 {
2736 	/* find the last mbuf in chain and pad it */
2737 	struct mbuf *m_at;
2738 
2739 	if (last_mbuf != NULL) {
2740 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2741 	} else {
2742 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2743 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2744 				return (sctp_add_pad_tombuf(m_at, padval));
2745 			}
2746 		}
2747 	}
2748 	return (NULL);
2749 }
2750 
2751 static void
2752 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2753     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2754 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2755     SCTP_UNUSED
2756 #endif
2757 )
2758 {
2759 	struct mbuf *m_notify;
2760 	struct sctp_assoc_change *sac;
2761 	struct sctp_queued_to_read *control;
2762 	unsigned int notif_len;
2763 	uint16_t abort_len;
2764 	unsigned int i;
2765 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2766 	struct socket *so;
2767 #endif
2768 
2769 	if (stcb == NULL) {
2770 		return;
2771 	}
2772 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2773 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2774 		if (abort != NULL) {
2775 			abort_len = ntohs(abort->ch.chunk_length);
2776 			/*
2777 			 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
2778 			 * contiguous.
2779 			 */
2780 			if (abort_len > SCTP_CHUNK_BUFFER_SIZE) {
2781 				abort_len = SCTP_CHUNK_BUFFER_SIZE;
2782 			}
2783 		} else {
2784 			abort_len = 0;
2785 		}
2786 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2787 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2788 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2789 			notif_len += abort_len;
2790 		}
2791 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2792 		if (m_notify == NULL) {
2793 			/* Retry with smaller value. */
2794 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2795 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2796 			if (m_notify == NULL) {
2797 				goto set_error;
2798 			}
2799 		}
2800 		SCTP_BUF_NEXT(m_notify) = NULL;
2801 		sac = mtod(m_notify, struct sctp_assoc_change *);
2802 		memset(sac, 0, notif_len);
2803 		sac->sac_type = SCTP_ASSOC_CHANGE;
2804 		sac->sac_flags = 0;
2805 		sac->sac_length = sizeof(struct sctp_assoc_change);
2806 		sac->sac_state = state;
2807 		sac->sac_error = error;
2808 		/* XXX verify these stream counts */
2809 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2810 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2811 		sac->sac_assoc_id = sctp_get_associd(stcb);
2812 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2813 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2814 				i = 0;
2815 				if (stcb->asoc.prsctp_supported == 1) {
2816 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2817 				}
2818 				if (stcb->asoc.auth_supported == 1) {
2819 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2820 				}
2821 				if (stcb->asoc.asconf_supported == 1) {
2822 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2823 				}
2824 				if (stcb->asoc.idata_supported == 1) {
2825 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2826 				}
2827 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2828 				if (stcb->asoc.reconfig_supported == 1) {
2829 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2830 				}
2831 				sac->sac_length += i;
2832 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2833 				memcpy(sac->sac_info, abort, abort_len);
2834 				sac->sac_length += abort_len;
2835 			}
2836 		}
2837 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2838 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2839 		    0, 0, stcb->asoc.context, 0, 0, 0,
2840 		    m_notify);
2841 		if (control != NULL) {
2842 			control->length = SCTP_BUF_LEN(m_notify);
2843 			control->spec_flags = M_NOTIFICATION;
2844 			/* not that we need this */
2845 			control->tail_mbuf = m_notify;
2846 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2847 			    control,
2848 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2849 			    so_locked);
2850 		} else {
2851 			sctp_m_freem(m_notify);
2852 		}
2853 	}
2854 	/*
2855 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2856 	 * comes in.
2857 	 */
2858 set_error:
2859 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2860 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2861 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2862 		SOCK_LOCK(stcb->sctp_socket);
2863 		if (from_peer) {
2864 			if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
2865 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2866 				stcb->sctp_socket->so_error = ECONNREFUSED;
2867 			} else {
2868 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2869 				stcb->sctp_socket->so_error = ECONNRESET;
2870 			}
2871 		} else {
2872 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
2873 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
2874 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2875 				stcb->sctp_socket->so_error = ETIMEDOUT;
2876 			} else {
2877 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2878 				stcb->sctp_socket->so_error = ECONNABORTED;
2879 			}
2880 		}
2881 		SOCK_UNLOCK(stcb->sctp_socket);
2882 	}
2883 	/* Wake ANY sleepers */
2884 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2885 	so = SCTP_INP_SO(stcb->sctp_ep);
2886 	if (!so_locked) {
2887 		atomic_add_int(&stcb->asoc.refcnt, 1);
2888 		SCTP_TCB_UNLOCK(stcb);
2889 		SCTP_SOCKET_LOCK(so, 1);
2890 		SCTP_TCB_LOCK(stcb);
2891 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2892 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2893 			SCTP_SOCKET_UNLOCK(so, 1);
2894 			return;
2895 		}
2896 	}
2897 #endif
2898 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2899 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2900 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2901 		socantrcvmore(stcb->sctp_socket);
2902 	}
2903 	sorwakeup(stcb->sctp_socket);
2904 	sowwakeup(stcb->sctp_socket);
2905 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2906 	if (!so_locked) {
2907 		SCTP_SOCKET_UNLOCK(so, 1);
2908 	}
2909 #endif
2910 }
2911 
2912 static void
2913 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2914     struct sockaddr *sa, uint32_t error, int so_locked
2915 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2916     SCTP_UNUSED
2917 #endif
2918 )
2919 {
2920 	struct mbuf *m_notify;
2921 	struct sctp_paddr_change *spc;
2922 	struct sctp_queued_to_read *control;
2923 
2924 	if ((stcb == NULL) ||
2925 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2926 		/* event not enabled */
2927 		return;
2928 	}
2929 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2930 	if (m_notify == NULL)
2931 		return;
2932 	SCTP_BUF_LEN(m_notify) = 0;
2933 	spc = mtod(m_notify, struct sctp_paddr_change *);
2934 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2935 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2936 	spc->spc_flags = 0;
2937 	spc->spc_length = sizeof(struct sctp_paddr_change);
2938 	switch (sa->sa_family) {
2939 #ifdef INET
2940 	case AF_INET:
2941 #ifdef INET6
2942 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2943 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2944 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2945 		} else {
2946 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2947 		}
2948 #else
2949 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2950 #endif
2951 		break;
2952 #endif
2953 #ifdef INET6
2954 	case AF_INET6:
2955 		{
2956 			struct sockaddr_in6 *sin6;
2957 
2958 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2959 
2960 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2961 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2962 				if (sin6->sin6_scope_id == 0) {
2963 					/* recover scope_id for user */
2964 					(void)sa6_recoverscope(sin6);
2965 				} else {
2966 					/* clear embedded scope_id for user */
2967 					in6_clearscope(&sin6->sin6_addr);
2968 				}
2969 			}
2970 			break;
2971 		}
2972 #endif
2973 	default:
2974 		/* TSNH */
2975 		break;
2976 	}
2977 	spc->spc_state = state;
2978 	spc->spc_error = error;
2979 	spc->spc_assoc_id = sctp_get_associd(stcb);
2980 
2981 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2982 	SCTP_BUF_NEXT(m_notify) = NULL;
2983 
2984 	/* append to socket */
2985 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2986 	    0, 0, stcb->asoc.context, 0, 0, 0,
2987 	    m_notify);
2988 	if (control == NULL) {
2989 		/* no memory */
2990 		sctp_m_freem(m_notify);
2991 		return;
2992 	}
2993 	control->length = SCTP_BUF_LEN(m_notify);
2994 	control->spec_flags = M_NOTIFICATION;
2995 	/* not that we need this */
2996 	control->tail_mbuf = m_notify;
2997 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2998 	    control,
2999 	    &stcb->sctp_socket->so_rcv, 1,
3000 	    SCTP_READ_LOCK_NOT_HELD,
3001 	    so_locked);
3002 }
3003 
3004 
3005 static void
3006 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
3007     struct sctp_tmit_chunk *chk, int so_locked
3008 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3009     SCTP_UNUSED
3010 #endif
3011 )
3012 {
3013 	struct mbuf *m_notify;
3014 	struct sctp_send_failed *ssf;
3015 	struct sctp_send_failed_event *ssfe;
3016 	struct sctp_queued_to_read *control;
3017 	struct sctp_chunkhdr *chkhdr;
3018 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
3019 
3020 	if ((stcb == NULL) ||
3021 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3022 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3023 		/* event not enabled */
3024 		return;
3025 	}
3026 
3027 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3028 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3029 	} else {
3030 		notifhdr_len = sizeof(struct sctp_send_failed);
3031 	}
3032 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3033 	if (m_notify == NULL)
3034 		/* no space left */
3035 		return;
3036 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3037 	if (stcb->asoc.idata_supported) {
3038 		chkhdr_len = sizeof(struct sctp_idata_chunk);
3039 	} else {
3040 		chkhdr_len = sizeof(struct sctp_data_chunk);
3041 	}
3042 	/* Use some defaults in case we can't access the chunk header */
3043 	if (chk->send_size >= chkhdr_len) {
3044 		payload_len = chk->send_size - chkhdr_len;
3045 	} else {
3046 		payload_len = 0;
3047 	}
3048 	padding_len = 0;
3049 	if (chk->data != NULL) {
3050 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
3051 		if (chkhdr != NULL) {
3052 			chk_len = ntohs(chkhdr->chunk_length);
3053 			if ((chk_len >= chkhdr_len) &&
3054 			    (chk->send_size >= chk_len) &&
3055 			    (chk->send_size - chk_len < 4)) {
3056 				padding_len = chk->send_size - chk_len;
3057 				payload_len = chk->send_size - chkhdr_len - padding_len;
3058 			}
3059 		}
3060 	}
3061 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3062 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3063 		memset(ssfe, 0, notifhdr_len);
3064 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3065 		if (sent) {
3066 			ssfe->ssfe_flags = SCTP_DATA_SENT;
3067 		} else {
3068 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3069 		}
3070 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
3071 		ssfe->ssfe_error = error;
3072 		/* not exactly what the user sent in, but should be close :) */
3073 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
3074 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
3075 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
3076 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
3077 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3078 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3079 	} else {
3080 		ssf = mtod(m_notify, struct sctp_send_failed *);
3081 		memset(ssf, 0, notifhdr_len);
3082 		ssf->ssf_type = SCTP_SEND_FAILED;
3083 		if (sent) {
3084 			ssf->ssf_flags = SCTP_DATA_SENT;
3085 		} else {
3086 			ssf->ssf_flags = SCTP_DATA_UNSENT;
3087 		}
3088 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3089 		ssf->ssf_error = error;
3090 		/* not exactly what the user sent in, but should be close :) */
3091 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3092 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3093 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3094 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3095 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3096 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3097 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3098 	}
3099 	if (chk->data != NULL) {
3100 		/* Trim off the sctp chunk header (it should be there) */
3101 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3102 			m_adj(chk->data, chkhdr_len);
3103 			m_adj(chk->data, -padding_len);
3104 			sctp_mbuf_crush(chk->data);
3105 			chk->send_size -= (chkhdr_len + padding_len);
3106 		}
3107 	}
3108 	SCTP_BUF_NEXT(m_notify) = chk->data;
3109 	/* Steal off the mbuf */
3110 	chk->data = NULL;
3111 	/*
3112 	 * For this case, we check the actual socket buffer, since the assoc
3113 	 * is going away we don't want to overfill the socket buffer for a
3114 	 * non-reader
3115 	 */
3116 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3117 		sctp_m_freem(m_notify);
3118 		return;
3119 	}
3120 	/* append to socket */
3121 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3122 	    0, 0, stcb->asoc.context, 0, 0, 0,
3123 	    m_notify);
3124 	if (control == NULL) {
3125 		/* no memory */
3126 		sctp_m_freem(m_notify);
3127 		return;
3128 	}
3129 	control->length = SCTP_BUF_LEN(m_notify);
3130 	control->spec_flags = M_NOTIFICATION;
3131 	/* not that we need this */
3132 	control->tail_mbuf = m_notify;
3133 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3134 	    control,
3135 	    &stcb->sctp_socket->so_rcv, 1,
3136 	    SCTP_READ_LOCK_NOT_HELD,
3137 	    so_locked);
3138 }
3139 
3140 
3141 static void
3142 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3143     struct sctp_stream_queue_pending *sp, int so_locked
3144 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3145     SCTP_UNUSED
3146 #endif
3147 )
3148 {
3149 	struct mbuf *m_notify;
3150 	struct sctp_send_failed *ssf;
3151 	struct sctp_send_failed_event *ssfe;
3152 	struct sctp_queued_to_read *control;
3153 	int notifhdr_len;
3154 
3155 	if ((stcb == NULL) ||
3156 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3157 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3158 		/* event not enabled */
3159 		return;
3160 	}
3161 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3162 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3163 	} else {
3164 		notifhdr_len = sizeof(struct sctp_send_failed);
3165 	}
3166 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3167 	if (m_notify == NULL) {
3168 		/* no space left */
3169 		return;
3170 	}
3171 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3172 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3173 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3174 		memset(ssfe, 0, notifhdr_len);
3175 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3176 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3177 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3178 		ssfe->ssfe_error = error;
3179 		/* not exactly what the user sent in, but should be close :) */
3180 		ssfe->ssfe_info.snd_sid = sp->sid;
3181 		if (sp->some_taken) {
3182 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3183 		} else {
3184 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3185 		}
3186 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3187 		ssfe->ssfe_info.snd_context = sp->context;
3188 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3189 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3190 	} else {
3191 		ssf = mtod(m_notify, struct sctp_send_failed *);
3192 		memset(ssf, 0, notifhdr_len);
3193 		ssf->ssf_type = SCTP_SEND_FAILED;
3194 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3195 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3196 		ssf->ssf_error = error;
3197 		/* not exactly what the user sent in, but should be close :) */
3198 		ssf->ssf_info.sinfo_stream = sp->sid;
3199 		ssf->ssf_info.sinfo_ssn = 0;
3200 		if (sp->some_taken) {
3201 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3202 		} else {
3203 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3204 		}
3205 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3206 		ssf->ssf_info.sinfo_context = sp->context;
3207 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3208 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3209 	}
3210 	SCTP_BUF_NEXT(m_notify) = sp->data;
3211 
3212 	/* Steal off the mbuf */
3213 	sp->data = NULL;
3214 	/*
3215 	 * For this case, we check the actual socket buffer, since the assoc
3216 	 * is going away we don't want to overfill the socket buffer for a
3217 	 * non-reader
3218 	 */
3219 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3220 		sctp_m_freem(m_notify);
3221 		return;
3222 	}
3223 	/* append to socket */
3224 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3225 	    0, 0, stcb->asoc.context, 0, 0, 0,
3226 	    m_notify);
3227 	if (control == NULL) {
3228 		/* no memory */
3229 		sctp_m_freem(m_notify);
3230 		return;
3231 	}
3232 	control->length = SCTP_BUF_LEN(m_notify);
3233 	control->spec_flags = M_NOTIFICATION;
3234 	/* not that we need this */
3235 	control->tail_mbuf = m_notify;
3236 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3237 	    control,
3238 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3239 }
3240 
3241 
3242 
3243 static void
3244 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3245 {
3246 	struct mbuf *m_notify;
3247 	struct sctp_adaptation_event *sai;
3248 	struct sctp_queued_to_read *control;
3249 
3250 	if ((stcb == NULL) ||
3251 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3252 		/* event not enabled */
3253 		return;
3254 	}
3255 
3256 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3257 	if (m_notify == NULL)
3258 		/* no space left */
3259 		return;
3260 	SCTP_BUF_LEN(m_notify) = 0;
3261 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3262 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3263 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3264 	sai->sai_flags = 0;
3265 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3266 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3267 	sai->sai_assoc_id = sctp_get_associd(stcb);
3268 
3269 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3270 	SCTP_BUF_NEXT(m_notify) = NULL;
3271 
3272 	/* append to socket */
3273 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3274 	    0, 0, stcb->asoc.context, 0, 0, 0,
3275 	    m_notify);
3276 	if (control == NULL) {
3277 		/* no memory */
3278 		sctp_m_freem(m_notify);
3279 		return;
3280 	}
3281 	control->length = SCTP_BUF_LEN(m_notify);
3282 	control->spec_flags = M_NOTIFICATION;
3283 	/* not that we need this */
3284 	control->tail_mbuf = m_notify;
3285 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3286 	    control,
3287 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3288 }
3289 
3290 /* This always must be called with the read-queue LOCKED in the INP */
3291 static void
3292 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3293     uint32_t val, int so_locked
3294 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3295     SCTP_UNUSED
3296 #endif
3297 )
3298 {
3299 	struct mbuf *m_notify;
3300 	struct sctp_pdapi_event *pdapi;
3301 	struct sctp_queued_to_read *control;
3302 	struct sockbuf *sb;
3303 
3304 	if ((stcb == NULL) ||
3305 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3306 		/* event not enabled */
3307 		return;
3308 	}
3309 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3310 		return;
3311 	}
3312 
3313 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3314 	if (m_notify == NULL)
3315 		/* no space left */
3316 		return;
3317 	SCTP_BUF_LEN(m_notify) = 0;
3318 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3319 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3320 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3321 	pdapi->pdapi_flags = 0;
3322 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3323 	pdapi->pdapi_indication = error;
3324 	pdapi->pdapi_stream = (val >> 16);
3325 	pdapi->pdapi_seq = (val & 0x0000ffff);
3326 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3327 
3328 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3329 	SCTP_BUF_NEXT(m_notify) = NULL;
3330 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3331 	    0, 0, stcb->asoc.context, 0, 0, 0,
3332 	    m_notify);
3333 	if (control == NULL) {
3334 		/* no memory */
3335 		sctp_m_freem(m_notify);
3336 		return;
3337 	}
3338 	control->length = SCTP_BUF_LEN(m_notify);
3339 	control->spec_flags = M_NOTIFICATION;
3340 	/* not that we need this */
3341 	control->tail_mbuf = m_notify;
3342 	sb = &stcb->sctp_socket->so_rcv;
3343 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3344 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3345 	}
3346 	sctp_sballoc(stcb, sb, m_notify);
3347 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3348 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3349 	}
3350 	control->end_added = 1;
3351 	if (stcb->asoc.control_pdapi)
3352 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3353 	else {
3354 		/* we really should not see this case */
3355 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3356 	}
3357 	if (stcb->sctp_ep && stcb->sctp_socket) {
3358 		/* This should always be the case */
3359 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3360 		struct socket *so;
3361 
3362 		so = SCTP_INP_SO(stcb->sctp_ep);
3363 		if (!so_locked) {
3364 			atomic_add_int(&stcb->asoc.refcnt, 1);
3365 			SCTP_TCB_UNLOCK(stcb);
3366 			SCTP_SOCKET_LOCK(so, 1);
3367 			SCTP_TCB_LOCK(stcb);
3368 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3369 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3370 				SCTP_SOCKET_UNLOCK(so, 1);
3371 				return;
3372 			}
3373 		}
3374 #endif
3375 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3376 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3377 		if (!so_locked) {
3378 			SCTP_SOCKET_UNLOCK(so, 1);
3379 		}
3380 #endif
3381 	}
3382 }
3383 
3384 static void
3385 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3386 {
3387 	struct mbuf *m_notify;
3388 	struct sctp_shutdown_event *sse;
3389 	struct sctp_queued_to_read *control;
3390 
3391 	/*
3392 	 * For TCP model AND UDP connected sockets we will send an error up
3393 	 * when an SHUTDOWN completes
3394 	 */
3395 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3396 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3397 		/* mark socket closed for read/write and wakeup! */
3398 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3399 		struct socket *so;
3400 
3401 		so = SCTP_INP_SO(stcb->sctp_ep);
3402 		atomic_add_int(&stcb->asoc.refcnt, 1);
3403 		SCTP_TCB_UNLOCK(stcb);
3404 		SCTP_SOCKET_LOCK(so, 1);
3405 		SCTP_TCB_LOCK(stcb);
3406 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3407 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3408 			SCTP_SOCKET_UNLOCK(so, 1);
3409 			return;
3410 		}
3411 #endif
3412 		socantsendmore(stcb->sctp_socket);
3413 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3414 		SCTP_SOCKET_UNLOCK(so, 1);
3415 #endif
3416 	}
3417 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3418 		/* event not enabled */
3419 		return;
3420 	}
3421 
3422 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3423 	if (m_notify == NULL)
3424 		/* no space left */
3425 		return;
3426 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3427 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3428 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3429 	sse->sse_flags = 0;
3430 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3431 	sse->sse_assoc_id = sctp_get_associd(stcb);
3432 
3433 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3434 	SCTP_BUF_NEXT(m_notify) = NULL;
3435 
3436 	/* append to socket */
3437 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3438 	    0, 0, stcb->asoc.context, 0, 0, 0,
3439 	    m_notify);
3440 	if (control == NULL) {
3441 		/* no memory */
3442 		sctp_m_freem(m_notify);
3443 		return;
3444 	}
3445 	control->length = SCTP_BUF_LEN(m_notify);
3446 	control->spec_flags = M_NOTIFICATION;
3447 	/* not that we need this */
3448 	control->tail_mbuf = m_notify;
3449 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3450 	    control,
3451 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3452 }
3453 
3454 static void
3455 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3456     int so_locked
3457 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3458     SCTP_UNUSED
3459 #endif
3460 )
3461 {
3462 	struct mbuf *m_notify;
3463 	struct sctp_sender_dry_event *event;
3464 	struct sctp_queued_to_read *control;
3465 
3466 	if ((stcb == NULL) ||
3467 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3468 		/* event not enabled */
3469 		return;
3470 	}
3471 
3472 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3473 	if (m_notify == NULL) {
3474 		/* no space left */
3475 		return;
3476 	}
3477 	SCTP_BUF_LEN(m_notify) = 0;
3478 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3479 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3480 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3481 	event->sender_dry_flags = 0;
3482 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3483 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3484 
3485 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3486 	SCTP_BUF_NEXT(m_notify) = NULL;
3487 
3488 	/* append to socket */
3489 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3490 	    0, 0, stcb->asoc.context, 0, 0, 0,
3491 	    m_notify);
3492 	if (control == NULL) {
3493 		/* no memory */
3494 		sctp_m_freem(m_notify);
3495 		return;
3496 	}
3497 	control->length = SCTP_BUF_LEN(m_notify);
3498 	control->spec_flags = M_NOTIFICATION;
3499 	/* not that we need this */
3500 	control->tail_mbuf = m_notify;
3501 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3502 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3503 }
3504 
3505 
3506 void
3507 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3508 {
3509 	struct mbuf *m_notify;
3510 	struct sctp_queued_to_read *control;
3511 	struct sctp_stream_change_event *stradd;
3512 
3513 	if ((stcb == NULL) ||
3514 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3515 		/* event not enabled */
3516 		return;
3517 	}
3518 	if ((stcb->asoc.peer_req_out) && flag) {
3519 		/* Peer made the request, don't tell the local user */
3520 		stcb->asoc.peer_req_out = 0;
3521 		return;
3522 	}
3523 	stcb->asoc.peer_req_out = 0;
3524 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3525 	if (m_notify == NULL)
3526 		/* no space left */
3527 		return;
3528 	SCTP_BUF_LEN(m_notify) = 0;
3529 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3530 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3531 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3532 	stradd->strchange_flags = flag;
3533 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3534 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3535 	stradd->strchange_instrms = numberin;
3536 	stradd->strchange_outstrms = numberout;
3537 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3538 	SCTP_BUF_NEXT(m_notify) = NULL;
3539 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3540 		/* no space */
3541 		sctp_m_freem(m_notify);
3542 		return;
3543 	}
3544 	/* append to socket */
3545 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3546 	    0, 0, stcb->asoc.context, 0, 0, 0,
3547 	    m_notify);
3548 	if (control == NULL) {
3549 		/* no memory */
3550 		sctp_m_freem(m_notify);
3551 		return;
3552 	}
3553 	control->length = SCTP_BUF_LEN(m_notify);
3554 	control->spec_flags = M_NOTIFICATION;
3555 	/* not that we need this */
3556 	control->tail_mbuf = m_notify;
3557 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3558 	    control,
3559 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3560 }
3561 
3562 void
3563 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3564 {
3565 	struct mbuf *m_notify;
3566 	struct sctp_queued_to_read *control;
3567 	struct sctp_assoc_reset_event *strasoc;
3568 
3569 	if ((stcb == NULL) ||
3570 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3571 		/* event not enabled */
3572 		return;
3573 	}
3574 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3575 	if (m_notify == NULL)
3576 		/* no space left */
3577 		return;
3578 	SCTP_BUF_LEN(m_notify) = 0;
3579 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3580 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3581 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3582 	strasoc->assocreset_flags = flag;
3583 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3584 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3585 	strasoc->assocreset_local_tsn = sending_tsn;
3586 	strasoc->assocreset_remote_tsn = recv_tsn;
3587 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3588 	SCTP_BUF_NEXT(m_notify) = NULL;
3589 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3590 		/* no space */
3591 		sctp_m_freem(m_notify);
3592 		return;
3593 	}
3594 	/* append to socket */
3595 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3596 	    0, 0, stcb->asoc.context, 0, 0, 0,
3597 	    m_notify);
3598 	if (control == NULL) {
3599 		/* no memory */
3600 		sctp_m_freem(m_notify);
3601 		return;
3602 	}
3603 	control->length = SCTP_BUF_LEN(m_notify);
3604 	control->spec_flags = M_NOTIFICATION;
3605 	/* not that we need this */
3606 	control->tail_mbuf = m_notify;
3607 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3608 	    control,
3609 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3610 }
3611 
3612 
3613 
3614 static void
3615 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3616     int number_entries, uint16_t *list, int flag)
3617 {
3618 	struct mbuf *m_notify;
3619 	struct sctp_queued_to_read *control;
3620 	struct sctp_stream_reset_event *strreset;
3621 	int len;
3622 
3623 	if ((stcb == NULL) ||
3624 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3625 		/* event not enabled */
3626 		return;
3627 	}
3628 
3629 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3630 	if (m_notify == NULL)
3631 		/* no space left */
3632 		return;
3633 	SCTP_BUF_LEN(m_notify) = 0;
3634 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3635 	if (len > M_TRAILINGSPACE(m_notify)) {
3636 		/* never enough room */
3637 		sctp_m_freem(m_notify);
3638 		return;
3639 	}
3640 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3641 	memset(strreset, 0, len);
3642 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3643 	strreset->strreset_flags = flag;
3644 	strreset->strreset_length = len;
3645 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3646 	if (number_entries) {
3647 		int i;
3648 
3649 		for (i = 0; i < number_entries; i++) {
3650 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3651 		}
3652 	}
3653 	SCTP_BUF_LEN(m_notify) = len;
3654 	SCTP_BUF_NEXT(m_notify) = NULL;
3655 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3656 		/* no space */
3657 		sctp_m_freem(m_notify);
3658 		return;
3659 	}
3660 	/* append to socket */
3661 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3662 	    0, 0, stcb->asoc.context, 0, 0, 0,
3663 	    m_notify);
3664 	if (control == NULL) {
3665 		/* no memory */
3666 		sctp_m_freem(m_notify);
3667 		return;
3668 	}
3669 	control->length = SCTP_BUF_LEN(m_notify);
3670 	control->spec_flags = M_NOTIFICATION;
3671 	/* not that we need this */
3672 	control->tail_mbuf = m_notify;
3673 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3674 	    control,
3675 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3676 }
3677 
3678 
3679 static void
3680 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3681 {
3682 	struct mbuf *m_notify;
3683 	struct sctp_remote_error *sre;
3684 	struct sctp_queued_to_read *control;
3685 	unsigned int notif_len;
3686 	uint16_t chunk_len;
3687 
3688 	if ((stcb == NULL) ||
3689 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3690 		return;
3691 	}
3692 	if (chunk != NULL) {
3693 		chunk_len = ntohs(chunk->ch.chunk_length);
3694 		/*
3695 		 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
3696 		 * contiguous.
3697 		 */
3698 		if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) {
3699 			chunk_len = SCTP_CHUNK_BUFFER_SIZE;
3700 		}
3701 	} else {
3702 		chunk_len = 0;
3703 	}
3704 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3705 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3706 	if (m_notify == NULL) {
3707 		/* Retry with smaller value. */
3708 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3709 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3710 		if (m_notify == NULL) {
3711 			return;
3712 		}
3713 	}
3714 	SCTP_BUF_NEXT(m_notify) = NULL;
3715 	sre = mtod(m_notify, struct sctp_remote_error *);
3716 	memset(sre, 0, notif_len);
3717 	sre->sre_type = SCTP_REMOTE_ERROR;
3718 	sre->sre_flags = 0;
3719 	sre->sre_length = sizeof(struct sctp_remote_error);
3720 	sre->sre_error = error;
3721 	sre->sre_assoc_id = sctp_get_associd(stcb);
3722 	if (notif_len > sizeof(struct sctp_remote_error)) {
3723 		memcpy(sre->sre_data, chunk, chunk_len);
3724 		sre->sre_length += chunk_len;
3725 	}
3726 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3727 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3728 	    0, 0, stcb->asoc.context, 0, 0, 0,
3729 	    m_notify);
3730 	if (control != NULL) {
3731 		control->length = SCTP_BUF_LEN(m_notify);
3732 		control->spec_flags = M_NOTIFICATION;
3733 		/* not that we need this */
3734 		control->tail_mbuf = m_notify;
3735 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3736 		    control,
3737 		    &stcb->sctp_socket->so_rcv, 1,
3738 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3739 	} else {
3740 		sctp_m_freem(m_notify);
3741 	}
3742 }
3743 
3744 
3745 void
3746 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3747     uint32_t error, void *data, int so_locked
3748 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3749     SCTP_UNUSED
3750 #endif
3751 )
3752 {
3753 	if ((stcb == NULL) ||
3754 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3755 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3756 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3757 		/* If the socket is gone we are out of here */
3758 		return;
3759 	}
3760 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3761 		return;
3762 	}
3763 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3764 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3765 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3766 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3767 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3768 			/* Don't report these in front states */
3769 			return;
3770 		}
3771 	}
3772 	switch (notification) {
3773 	case SCTP_NOTIFY_ASSOC_UP:
3774 		if (stcb->asoc.assoc_up_sent == 0) {
3775 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3776 			stcb->asoc.assoc_up_sent = 1;
3777 		}
3778 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3779 			sctp_notify_adaptation_layer(stcb);
3780 		}
3781 		if (stcb->asoc.auth_supported == 0) {
3782 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3783 			    NULL, so_locked);
3784 		}
3785 		break;
3786 	case SCTP_NOTIFY_ASSOC_DOWN:
3787 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3788 		break;
3789 	case SCTP_NOTIFY_INTERFACE_DOWN:
3790 		{
3791 			struct sctp_nets *net;
3792 
3793 			net = (struct sctp_nets *)data;
3794 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3795 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3796 			break;
3797 		}
3798 	case SCTP_NOTIFY_INTERFACE_UP:
3799 		{
3800 			struct sctp_nets *net;
3801 
3802 			net = (struct sctp_nets *)data;
3803 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3804 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3805 			break;
3806 		}
3807 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3808 		{
3809 			struct sctp_nets *net;
3810 
3811 			net = (struct sctp_nets *)data;
3812 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3813 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3814 			break;
3815 		}
3816 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3817 		sctp_notify_send_failed2(stcb, error,
3818 		    (struct sctp_stream_queue_pending *)data, so_locked);
3819 		break;
3820 	case SCTP_NOTIFY_SENT_DG_FAIL:
3821 		sctp_notify_send_failed(stcb, 1, error,
3822 		    (struct sctp_tmit_chunk *)data, so_locked);
3823 		break;
3824 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3825 		sctp_notify_send_failed(stcb, 0, error,
3826 		    (struct sctp_tmit_chunk *)data, so_locked);
3827 		break;
3828 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3829 		{
3830 			uint32_t val;
3831 
3832 			val = *((uint32_t *)data);
3833 
3834 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3835 			break;
3836 		}
3837 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3838 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3839 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3840 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3841 		} else {
3842 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3843 		}
3844 		break;
3845 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3846 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3847 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3848 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3849 		} else {
3850 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3851 		}
3852 		break;
3853 	case SCTP_NOTIFY_ASSOC_RESTART:
3854 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3855 		if (stcb->asoc.auth_supported == 0) {
3856 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3857 			    NULL, so_locked);
3858 		}
3859 		break;
3860 	case SCTP_NOTIFY_STR_RESET_SEND:
3861 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3862 		break;
3863 	case SCTP_NOTIFY_STR_RESET_RECV:
3864 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3865 		break;
3866 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3867 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3868 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3869 		break;
3870 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3871 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3872 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3873 		break;
3874 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3875 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3876 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3877 		break;
3878 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3879 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3880 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3881 		break;
3882 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3883 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3884 		    error, so_locked);
3885 		break;
3886 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3887 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3888 		    error, so_locked);
3889 		break;
3890 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3891 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3892 		    error, so_locked);
3893 		break;
3894 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3895 		sctp_notify_shutdown_event(stcb);
3896 		break;
3897 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3898 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3899 		    (uint16_t)(uintptr_t)data,
3900 		    so_locked);
3901 		break;
3902 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3903 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3904 		    (uint16_t)(uintptr_t)data,
3905 		    so_locked);
3906 		break;
3907 	case SCTP_NOTIFY_NO_PEER_AUTH:
3908 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3909 		    (uint16_t)(uintptr_t)data,
3910 		    so_locked);
3911 		break;
3912 	case SCTP_NOTIFY_SENDER_DRY:
3913 		sctp_notify_sender_dry_event(stcb, so_locked);
3914 		break;
3915 	case SCTP_NOTIFY_REMOTE_ERROR:
3916 		sctp_notify_remote_error(stcb, error, data);
3917 		break;
3918 	default:
3919 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3920 		    __func__, notification, notification);
3921 		break;
3922 	}			/* end switch */
3923 }
3924 
3925 void
3926 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3927 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3928     SCTP_UNUSED
3929 #endif
3930 )
3931 {
3932 	struct sctp_association *asoc;
3933 	struct sctp_stream_out *outs;
3934 	struct sctp_tmit_chunk *chk, *nchk;
3935 	struct sctp_stream_queue_pending *sp, *nsp;
3936 	int i;
3937 
3938 	if (stcb == NULL) {
3939 		return;
3940 	}
3941 	asoc = &stcb->asoc;
3942 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3943 		/* already being freed */
3944 		return;
3945 	}
3946 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3947 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3948 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3949 		return;
3950 	}
3951 	/* now through all the gunk freeing chunks */
3952 	if (holds_lock == 0) {
3953 		SCTP_TCB_SEND_LOCK(stcb);
3954 	}
3955 	/* sent queue SHOULD be empty */
3956 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3957 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3958 		asoc->sent_queue_cnt--;
3959 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3960 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3961 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3962 #ifdef INVARIANTS
3963 			} else {
3964 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3965 #endif
3966 			}
3967 		}
3968 		if (chk->data != NULL) {
3969 			sctp_free_bufspace(stcb, asoc, chk, 1);
3970 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3971 			    error, chk, so_locked);
3972 			if (chk->data) {
3973 				sctp_m_freem(chk->data);
3974 				chk->data = NULL;
3975 			}
3976 		}
3977 		sctp_free_a_chunk(stcb, chk, so_locked);
3978 		/* sa_ignore FREED_MEMORY */
3979 	}
3980 	/* pending send queue SHOULD be empty */
3981 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3982 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3983 		asoc->send_queue_cnt--;
3984 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3985 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3986 #ifdef INVARIANTS
3987 		} else {
3988 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3989 #endif
3990 		}
3991 		if (chk->data != NULL) {
3992 			sctp_free_bufspace(stcb, asoc, chk, 1);
3993 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3994 			    error, chk, so_locked);
3995 			if (chk->data) {
3996 				sctp_m_freem(chk->data);
3997 				chk->data = NULL;
3998 			}
3999 		}
4000 		sctp_free_a_chunk(stcb, chk, so_locked);
4001 		/* sa_ignore FREED_MEMORY */
4002 	}
4003 	for (i = 0; i < asoc->streamoutcnt; i++) {
4004 		/* For each stream */
4005 		outs = &asoc->strmout[i];
4006 		/* clean up any sends there */
4007 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
4008 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
4009 			TAILQ_REMOVE(&outs->outqueue, sp, next);
4010 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
4011 			sctp_free_spbufspace(stcb, asoc, sp);
4012 			if (sp->data) {
4013 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
4014 				    error, (void *)sp, so_locked);
4015 				if (sp->data) {
4016 					sctp_m_freem(sp->data);
4017 					sp->data = NULL;
4018 					sp->tail_mbuf = NULL;
4019 					sp->length = 0;
4020 				}
4021 			}
4022 			if (sp->net) {
4023 				sctp_free_remote_addr(sp->net);
4024 				sp->net = NULL;
4025 			}
4026 			/* Free the chunk */
4027 			sctp_free_a_strmoq(stcb, sp, so_locked);
4028 			/* sa_ignore FREED_MEMORY */
4029 		}
4030 	}
4031 
4032 	if (holds_lock == 0) {
4033 		SCTP_TCB_SEND_UNLOCK(stcb);
4034 	}
4035 }
4036 
4037 void
4038 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
4039     struct sctp_abort_chunk *abort, int so_locked
4040 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4041     SCTP_UNUSED
4042 #endif
4043 )
4044 {
4045 	if (stcb == NULL) {
4046 		return;
4047 	}
4048 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
4049 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4050 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
4051 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
4052 	}
4053 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4054 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4055 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4056 		return;
4057 	}
4058 	/* Tell them we lost the asoc */
4059 	sctp_report_all_outbound(stcb, error, 0, so_locked);
4060 	if (from_peer) {
4061 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
4062 	} else {
4063 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
4064 	}
4065 }
4066 
4067 void
4068 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4069     struct mbuf *m, int iphlen,
4070     struct sockaddr *src, struct sockaddr *dst,
4071     struct sctphdr *sh, struct mbuf *op_err,
4072     uint8_t mflowtype, uint32_t mflowid,
4073     uint32_t vrf_id, uint16_t port)
4074 {
4075 	uint32_t vtag;
4076 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4077 	struct socket *so;
4078 #endif
4079 
4080 	vtag = 0;
4081 	if (stcb != NULL) {
4082 		vtag = stcb->asoc.peer_vtag;
4083 		vrf_id = stcb->asoc.vrf_id;
4084 	}
4085 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4086 	    mflowtype, mflowid, inp->fibnum,
4087 	    vrf_id, port);
4088 	if (stcb != NULL) {
4089 		/* We have a TCB to abort, send notification too */
4090 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4091 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4092 		/* Ok, now lets free it */
4093 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4094 		so = SCTP_INP_SO(inp);
4095 		atomic_add_int(&stcb->asoc.refcnt, 1);
4096 		SCTP_TCB_UNLOCK(stcb);
4097 		SCTP_SOCKET_LOCK(so, 1);
4098 		SCTP_TCB_LOCK(stcb);
4099 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4100 #endif
4101 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4102 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4103 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4104 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4105 		}
4106 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4107 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4108 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4109 		SCTP_SOCKET_UNLOCK(so, 1);
4110 #endif
4111 	}
4112 }
4113 #ifdef SCTP_ASOCLOG_OF_TSNS
4114 void
4115 sctp_print_out_track_log(struct sctp_tcb *stcb)
4116 {
4117 #ifdef NOSIY_PRINTS
4118 	int i;
4119 
4120 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4121 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4122 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4123 		SCTP_PRINTF("None rcvd\n");
4124 		goto none_in;
4125 	}
4126 	if (stcb->asoc.tsn_in_wrapped) {
4127 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4128 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4129 			    stcb->asoc.in_tsnlog[i].tsn,
4130 			    stcb->asoc.in_tsnlog[i].strm,
4131 			    stcb->asoc.in_tsnlog[i].seq,
4132 			    stcb->asoc.in_tsnlog[i].flgs,
4133 			    stcb->asoc.in_tsnlog[i].sz);
4134 		}
4135 	}
4136 	if (stcb->asoc.tsn_in_at) {
4137 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4138 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4139 			    stcb->asoc.in_tsnlog[i].tsn,
4140 			    stcb->asoc.in_tsnlog[i].strm,
4141 			    stcb->asoc.in_tsnlog[i].seq,
4142 			    stcb->asoc.in_tsnlog[i].flgs,
4143 			    stcb->asoc.in_tsnlog[i].sz);
4144 		}
4145 	}
4146 none_in:
4147 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4148 	if ((stcb->asoc.tsn_out_at == 0) &&
4149 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4150 		SCTP_PRINTF("None sent\n");
4151 	}
4152 	if (stcb->asoc.tsn_out_wrapped) {
4153 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4154 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4155 			    stcb->asoc.out_tsnlog[i].tsn,
4156 			    stcb->asoc.out_tsnlog[i].strm,
4157 			    stcb->asoc.out_tsnlog[i].seq,
4158 			    stcb->asoc.out_tsnlog[i].flgs,
4159 			    stcb->asoc.out_tsnlog[i].sz);
4160 		}
4161 	}
4162 	if (stcb->asoc.tsn_out_at) {
4163 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4164 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4165 			    stcb->asoc.out_tsnlog[i].tsn,
4166 			    stcb->asoc.out_tsnlog[i].strm,
4167 			    stcb->asoc.out_tsnlog[i].seq,
4168 			    stcb->asoc.out_tsnlog[i].flgs,
4169 			    stcb->asoc.out_tsnlog[i].sz);
4170 		}
4171 	}
4172 #endif
4173 }
4174 #endif
4175 
4176 void
4177 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4178     struct mbuf *op_err,
4179     int so_locked
4180 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4181     SCTP_UNUSED
4182 #endif
4183 )
4184 {
4185 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4186 	struct socket *so;
4187 #endif
4188 
4189 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4190 	so = SCTP_INP_SO(inp);
4191 #endif
4192 	if (stcb == NULL) {
4193 		/* Got to have a TCB */
4194 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4195 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4196 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4197 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4198 			}
4199 		}
4200 		return;
4201 	} else {
4202 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4203 	}
4204 	/* notify the peer */
4205 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4206 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4207 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4208 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4209 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4210 	}
4211 	/* notify the ulp */
4212 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4213 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4214 	}
4215 	/* now free the asoc */
4216 #ifdef SCTP_ASOCLOG_OF_TSNS
4217 	sctp_print_out_track_log(stcb);
4218 #endif
4219 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4220 	if (!so_locked) {
4221 		atomic_add_int(&stcb->asoc.refcnt, 1);
4222 		SCTP_TCB_UNLOCK(stcb);
4223 		SCTP_SOCKET_LOCK(so, 1);
4224 		SCTP_TCB_LOCK(stcb);
4225 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4226 	}
4227 #endif
4228 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4229 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4230 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4231 	if (!so_locked) {
4232 		SCTP_SOCKET_UNLOCK(so, 1);
4233 	}
4234 #endif
4235 }
4236 
4237 void
4238 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4239     struct sockaddr *src, struct sockaddr *dst,
4240     struct sctphdr *sh, struct sctp_inpcb *inp,
4241     struct mbuf *cause,
4242     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4243     uint32_t vrf_id, uint16_t port)
4244 {
4245 	struct sctp_chunkhdr *ch, chunk_buf;
4246 	unsigned int chk_length;
4247 	int contains_init_chunk;
4248 
4249 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4250 	/* Generate a TO address for future reference */
4251 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4252 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4253 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4254 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4255 		}
4256 	}
4257 	contains_init_chunk = 0;
4258 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4259 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4260 	while (ch != NULL) {
4261 		chk_length = ntohs(ch->chunk_length);
4262 		if (chk_length < sizeof(*ch)) {
4263 			/* break to abort land */
4264 			break;
4265 		}
4266 		switch (ch->chunk_type) {
4267 		case SCTP_INIT:
4268 			contains_init_chunk = 1;
4269 			break;
4270 		case SCTP_PACKET_DROPPED:
4271 			/* we don't respond to pkt-dropped */
4272 			return;
4273 		case SCTP_ABORT_ASSOCIATION:
4274 			/* we don't respond with an ABORT to an ABORT */
4275 			return;
4276 		case SCTP_SHUTDOWN_COMPLETE:
4277 			/*
4278 			 * we ignore it since we are not waiting for it and
4279 			 * peer is gone
4280 			 */
4281 			return;
4282 		case SCTP_SHUTDOWN_ACK:
4283 			sctp_send_shutdown_complete2(src, dst, sh,
4284 			    mflowtype, mflowid, fibnum,
4285 			    vrf_id, port);
4286 			return;
4287 		default:
4288 			break;
4289 		}
4290 		offset += SCTP_SIZE32(chk_length);
4291 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4292 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4293 	}
4294 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4295 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4296 	    (contains_init_chunk == 0))) {
4297 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4298 		    mflowtype, mflowid, fibnum,
4299 		    vrf_id, port);
4300 	}
4301 }
4302 
4303 /*
4304  * check the inbound datagram to make sure there is not an abort inside it,
4305  * if there is return 1, else return 0.
4306  */
4307 int
4308 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4309 {
4310 	struct sctp_chunkhdr *ch;
4311 	struct sctp_init_chunk *init_chk, chunk_buf;
4312 	int offset;
4313 	unsigned int chk_length;
4314 
4315 	offset = iphlen + sizeof(struct sctphdr);
4316 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4317 	    (uint8_t *)&chunk_buf);
4318 	while (ch != NULL) {
4319 		chk_length = ntohs(ch->chunk_length);
4320 		if (chk_length < sizeof(*ch)) {
4321 			/* packet is probably corrupt */
4322 			break;
4323 		}
4324 		/* we seem to be ok, is it an abort? */
4325 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4326 			/* yep, tell them */
4327 			return (1);
4328 		}
4329 		if (ch->chunk_type == SCTP_INITIATION) {
4330 			/* need to update the Vtag */
4331 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4332 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4333 			if (init_chk != NULL) {
4334 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4335 			}
4336 		}
4337 		/* Nope, move to the next chunk */
4338 		offset += SCTP_SIZE32(chk_length);
4339 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4340 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4341 	}
4342 	return (0);
4343 }
4344 
4345 /*
4346  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4347  * set (i.e. it's 0) so, create this function to compare link local scopes
4348  */
4349 #ifdef INET6
4350 uint32_t
4351 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4352 {
4353 	struct sockaddr_in6 a, b;
4354 
4355 	/* save copies */
4356 	a = *addr1;
4357 	b = *addr2;
4358 
4359 	if (a.sin6_scope_id == 0)
4360 		if (sa6_recoverscope(&a)) {
4361 			/* can't get scope, so can't match */
4362 			return (0);
4363 		}
4364 	if (b.sin6_scope_id == 0)
4365 		if (sa6_recoverscope(&b)) {
4366 			/* can't get scope, so can't match */
4367 			return (0);
4368 		}
4369 	if (a.sin6_scope_id != b.sin6_scope_id)
4370 		return (0);
4371 
4372 	return (1);
4373 }
4374 
4375 /*
4376  * returns a sockaddr_in6 with embedded scope recovered and removed
4377  */
4378 struct sockaddr_in6 *
4379 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4380 {
4381 	/* check and strip embedded scope junk */
4382 	if (addr->sin6_family == AF_INET6) {
4383 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4384 			if (addr->sin6_scope_id == 0) {
4385 				*store = *addr;
4386 				if (!sa6_recoverscope(store)) {
4387 					/* use the recovered scope */
4388 					addr = store;
4389 				}
4390 			} else {
4391 				/* else, return the original "to" addr */
4392 				in6_clearscope(&addr->sin6_addr);
4393 			}
4394 		}
4395 	}
4396 	return (addr);
4397 }
4398 #endif
4399 
4400 /*
4401  * are the two addresses the same?  currently a "scopeless" check returns: 1
4402  * if same, 0 if not
4403  */
4404 int
4405 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4406 {
4407 
4408 	/* must be valid */
4409 	if (sa1 == NULL || sa2 == NULL)
4410 		return (0);
4411 
4412 	/* must be the same family */
4413 	if (sa1->sa_family != sa2->sa_family)
4414 		return (0);
4415 
4416 	switch (sa1->sa_family) {
4417 #ifdef INET6
4418 	case AF_INET6:
4419 		{
4420 			/* IPv6 addresses */
4421 			struct sockaddr_in6 *sin6_1, *sin6_2;
4422 
4423 			sin6_1 = (struct sockaddr_in6 *)sa1;
4424 			sin6_2 = (struct sockaddr_in6 *)sa2;
4425 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4426 			    sin6_2));
4427 		}
4428 #endif
4429 #ifdef INET
4430 	case AF_INET:
4431 		{
4432 			/* IPv4 addresses */
4433 			struct sockaddr_in *sin_1, *sin_2;
4434 
4435 			sin_1 = (struct sockaddr_in *)sa1;
4436 			sin_2 = (struct sockaddr_in *)sa2;
4437 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4438 		}
4439 #endif
4440 	default:
4441 		/* we don't do these... */
4442 		return (0);
4443 	}
4444 }
4445 
4446 void
4447 sctp_print_address(struct sockaddr *sa)
4448 {
4449 #ifdef INET6
4450 	char ip6buf[INET6_ADDRSTRLEN];
4451 #endif
4452 
4453 	switch (sa->sa_family) {
4454 #ifdef INET6
4455 	case AF_INET6:
4456 		{
4457 			struct sockaddr_in6 *sin6;
4458 
4459 			sin6 = (struct sockaddr_in6 *)sa;
4460 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4461 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4462 			    ntohs(sin6->sin6_port),
4463 			    sin6->sin6_scope_id);
4464 			break;
4465 		}
4466 #endif
4467 #ifdef INET
4468 	case AF_INET:
4469 		{
4470 			struct sockaddr_in *sin;
4471 			unsigned char *p;
4472 
4473 			sin = (struct sockaddr_in *)sa;
4474 			p = (unsigned char *)&sin->sin_addr;
4475 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4476 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4477 			break;
4478 		}
4479 #endif
4480 	default:
4481 		SCTP_PRINTF("?\n");
4482 		break;
4483 	}
4484 }
4485 
4486 void
4487 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4488     struct sctp_inpcb *new_inp,
4489     struct sctp_tcb *stcb,
4490     int waitflags)
4491 {
4492 	/*
4493 	 * go through our old INP and pull off any control structures that
4494 	 * belong to stcb and move then to the new inp.
4495 	 */
4496 	struct socket *old_so, *new_so;
4497 	struct sctp_queued_to_read *control, *nctl;
4498 	struct sctp_readhead tmp_queue;
4499 	struct mbuf *m;
4500 	int error = 0;
4501 
4502 	old_so = old_inp->sctp_socket;
4503 	new_so = new_inp->sctp_socket;
4504 	TAILQ_INIT(&tmp_queue);
4505 	error = sblock(&old_so->so_rcv, waitflags);
4506 	if (error) {
4507 		/*
4508 		 * Gak, can't get sblock, we have a problem. data will be
4509 		 * left stranded.. and we don't dare look at it since the
4510 		 * other thread may be reading something. Oh well, its a
4511 		 * screwed up app that does a peeloff OR a accept while
4512 		 * reading from the main socket... actually its only the
4513 		 * peeloff() case, since I think read will fail on a
4514 		 * listening socket..
4515 		 */
4516 		return;
4517 	}
4518 	/* lock the socket buffers */
4519 	SCTP_INP_READ_LOCK(old_inp);
4520 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4521 		/* Pull off all for out target stcb */
4522 		if (control->stcb == stcb) {
4523 			/* remove it we want it */
4524 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4525 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4526 			m = control->data;
4527 			while (m) {
4528 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4529 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4530 				}
4531 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4532 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4533 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4534 				}
4535 				m = SCTP_BUF_NEXT(m);
4536 			}
4537 		}
4538 	}
4539 	SCTP_INP_READ_UNLOCK(old_inp);
4540 	/* Remove the sb-lock on the old socket */
4541 
4542 	sbunlock(&old_so->so_rcv);
4543 	/* Now we move them over to the new socket buffer */
4544 	SCTP_INP_READ_LOCK(new_inp);
4545 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4546 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4547 		m = control->data;
4548 		while (m) {
4549 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4550 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4551 			}
4552 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4553 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4554 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4555 			}
4556 			m = SCTP_BUF_NEXT(m);
4557 		}
4558 	}
4559 	SCTP_INP_READ_UNLOCK(new_inp);
4560 }
4561 
4562 void
4563 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4564     struct sctp_tcb *stcb,
4565     int so_locked
4566 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4567     SCTP_UNUSED
4568 #endif
4569 )
4570 {
4571 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4572 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4573 		struct socket *so;
4574 
4575 		so = SCTP_INP_SO(inp);
4576 		if (!so_locked) {
4577 			if (stcb) {
4578 				atomic_add_int(&stcb->asoc.refcnt, 1);
4579 				SCTP_TCB_UNLOCK(stcb);
4580 			}
4581 			SCTP_SOCKET_LOCK(so, 1);
4582 			if (stcb) {
4583 				SCTP_TCB_LOCK(stcb);
4584 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4585 			}
4586 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4587 				SCTP_SOCKET_UNLOCK(so, 1);
4588 				return;
4589 			}
4590 		}
4591 #endif
4592 		sctp_sorwakeup(inp, inp->sctp_socket);
4593 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4594 		if (!so_locked) {
4595 			SCTP_SOCKET_UNLOCK(so, 1);
4596 		}
4597 #endif
4598 	}
4599 }
4600 
4601 void
4602 sctp_add_to_readq(struct sctp_inpcb *inp,
4603     struct sctp_tcb *stcb,
4604     struct sctp_queued_to_read *control,
4605     struct sockbuf *sb,
4606     int end,
4607     int inp_read_lock_held,
4608     int so_locked
4609 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4610     SCTP_UNUSED
4611 #endif
4612 )
4613 {
4614 	/*
4615 	 * Here we must place the control on the end of the socket read
4616 	 * queue AND increment sb_cc so that select will work properly on
4617 	 * read.
4618 	 */
4619 	struct mbuf *m, *prev = NULL;
4620 
4621 	if (inp == NULL) {
4622 		/* Gak, TSNH!! */
4623 #ifdef INVARIANTS
4624 		panic("Gak, inp NULL on add_to_readq");
4625 #endif
4626 		return;
4627 	}
4628 	if (inp_read_lock_held == 0)
4629 		SCTP_INP_READ_LOCK(inp);
4630 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4631 		if (!control->on_strm_q) {
4632 			sctp_free_remote_addr(control->whoFrom);
4633 			if (control->data) {
4634 				sctp_m_freem(control->data);
4635 				control->data = NULL;
4636 			}
4637 			sctp_free_a_readq(stcb, control);
4638 		}
4639 		if (inp_read_lock_held == 0)
4640 			SCTP_INP_READ_UNLOCK(inp);
4641 		return;
4642 	}
4643 	if (!(control->spec_flags & M_NOTIFICATION)) {
4644 		atomic_add_int(&inp->total_recvs, 1);
4645 		if (!control->do_not_ref_stcb) {
4646 			atomic_add_int(&stcb->total_recvs, 1);
4647 		}
4648 	}
4649 	m = control->data;
4650 	control->held_length = 0;
4651 	control->length = 0;
4652 	while (m) {
4653 		if (SCTP_BUF_LEN(m) == 0) {
4654 			/* Skip mbufs with NO length */
4655 			if (prev == NULL) {
4656 				/* First one */
4657 				control->data = sctp_m_free(m);
4658 				m = control->data;
4659 			} else {
4660 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4661 				m = SCTP_BUF_NEXT(prev);
4662 			}
4663 			if (m == NULL) {
4664 				control->tail_mbuf = prev;
4665 			}
4666 			continue;
4667 		}
4668 		prev = m;
4669 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4670 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4671 		}
4672 		sctp_sballoc(stcb, sb, m);
4673 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4674 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4675 		}
4676 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4677 		m = SCTP_BUF_NEXT(m);
4678 	}
4679 	if (prev != NULL) {
4680 		control->tail_mbuf = prev;
4681 	} else {
4682 		/* Everything got collapsed out?? */
4683 		if (!control->on_strm_q) {
4684 			sctp_free_remote_addr(control->whoFrom);
4685 			sctp_free_a_readq(stcb, control);
4686 		}
4687 		if (inp_read_lock_held == 0)
4688 			SCTP_INP_READ_UNLOCK(inp);
4689 		return;
4690 	}
4691 	if (end) {
4692 		control->end_added = 1;
4693 	}
4694 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4695 	control->on_read_q = 1;
4696 	if (inp_read_lock_held == 0)
4697 		SCTP_INP_READ_UNLOCK(inp);
4698 	if (inp && inp->sctp_socket) {
4699 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4700 	}
4701 }
4702 
4703 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4704  *************ALTERNATE ROUTING CODE
4705  */
4706 
4707 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4708  *************ALTERNATE ROUTING CODE
4709  */
4710 
4711 struct mbuf *
4712 sctp_generate_cause(uint16_t code, char *info)
4713 {
4714 	struct mbuf *m;
4715 	struct sctp_gen_error_cause *cause;
4716 	size_t info_len;
4717 	uint16_t len;
4718 
4719 	if ((code == 0) || (info == NULL)) {
4720 		return (NULL);
4721 	}
4722 	info_len = strlen(info);
4723 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4724 		return (NULL);
4725 	}
4726 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4727 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4728 	if (m != NULL) {
4729 		SCTP_BUF_LEN(m) = len;
4730 		cause = mtod(m, struct sctp_gen_error_cause *);
4731 		cause->code = htons(code);
4732 		cause->length = htons(len);
4733 		memcpy(cause->info, info, info_len);
4734 	}
4735 	return (m);
4736 }
4737 
4738 struct mbuf *
4739 sctp_generate_no_user_data_cause(uint32_t tsn)
4740 {
4741 	struct mbuf *m;
4742 	struct sctp_error_no_user_data *no_user_data_cause;
4743 	uint16_t len;
4744 
4745 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4746 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4747 	if (m != NULL) {
4748 		SCTP_BUF_LEN(m) = len;
4749 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4750 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4751 		no_user_data_cause->cause.length = htons(len);
4752 		no_user_data_cause->tsn = htonl(tsn);
4753 	}
4754 	return (m);
4755 }
4756 
4757 #ifdef SCTP_MBCNT_LOGGING
4758 void
4759 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4760     struct sctp_tmit_chunk *tp1, int chk_cnt)
4761 {
4762 	if (tp1->data == NULL) {
4763 		return;
4764 	}
4765 	asoc->chunks_on_out_queue -= chk_cnt;
4766 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4767 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4768 		    asoc->total_output_queue_size,
4769 		    tp1->book_size,
4770 		    0,
4771 		    tp1->mbcnt);
4772 	}
4773 	if (asoc->total_output_queue_size >= tp1->book_size) {
4774 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4775 	} else {
4776 		asoc->total_output_queue_size = 0;
4777 	}
4778 
4779 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4780 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4781 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4782 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4783 		} else {
4784 			stcb->sctp_socket->so_snd.sb_cc = 0;
4785 
4786 		}
4787 	}
4788 }
4789 
4790 #endif
4791 
4792 int
4793 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4794     uint8_t sent, int so_locked
4795 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4796     SCTP_UNUSED
4797 #endif
4798 )
4799 {
4800 	struct sctp_stream_out *strq;
4801 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4802 	struct sctp_stream_queue_pending *sp;
4803 	uint32_t mid;
4804 	uint16_t sid;
4805 	uint8_t foundeom = 0;
4806 	int ret_sz = 0;
4807 	int notdone;
4808 	int do_wakeup_routine = 0;
4809 
4810 	sid = tp1->rec.data.sid;
4811 	mid = tp1->rec.data.mid;
4812 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4813 		stcb->asoc.abandoned_sent[0]++;
4814 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4815 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
4816 #if defined(SCTP_DETAILED_STR_STATS)
4817 		stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4818 #endif
4819 	} else {
4820 		stcb->asoc.abandoned_unsent[0]++;
4821 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4822 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4823 #if defined(SCTP_DETAILED_STR_STATS)
4824 		stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4825 #endif
4826 	}
4827 	do {
4828 		ret_sz += tp1->book_size;
4829 		if (tp1->data != NULL) {
4830 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4831 				sctp_flight_size_decrease(tp1);
4832 				sctp_total_flight_decrease(stcb, tp1);
4833 			}
4834 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4835 			stcb->asoc.peers_rwnd += tp1->send_size;
4836 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4837 			if (sent) {
4838 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4839 			} else {
4840 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4841 			}
4842 			if (tp1->data) {
4843 				sctp_m_freem(tp1->data);
4844 				tp1->data = NULL;
4845 			}
4846 			do_wakeup_routine = 1;
4847 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4848 				stcb->asoc.sent_queue_cnt_removeable--;
4849 			}
4850 		}
4851 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4852 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4853 		    SCTP_DATA_NOT_FRAG) {
4854 			/* not frag'ed we ae done   */
4855 			notdone = 0;
4856 			foundeom = 1;
4857 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4858 			/* end of frag, we are done */
4859 			notdone = 0;
4860 			foundeom = 1;
4861 		} else {
4862 			/*
4863 			 * Its a begin or middle piece, we must mark all of
4864 			 * it
4865 			 */
4866 			notdone = 1;
4867 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4868 		}
4869 	} while (tp1 && notdone);
4870 	if (foundeom == 0) {
4871 		/*
4872 		 * The multi-part message was scattered across the send and
4873 		 * sent queue.
4874 		 */
4875 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4876 			if ((tp1->rec.data.sid != sid) ||
4877 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4878 				break;
4879 			}
4880 			/*
4881 			 * save to chk in case we have some on stream out
4882 			 * queue. If so and we have an un-transmitted one we
4883 			 * don't have to fudge the TSN.
4884 			 */
4885 			chk = tp1;
4886 			ret_sz += tp1->book_size;
4887 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4888 			if (sent) {
4889 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4890 			} else {
4891 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4892 			}
4893 			if (tp1->data) {
4894 				sctp_m_freem(tp1->data);
4895 				tp1->data = NULL;
4896 			}
4897 			/* No flight involved here book the size to 0 */
4898 			tp1->book_size = 0;
4899 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4900 				foundeom = 1;
4901 			}
4902 			do_wakeup_routine = 1;
4903 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4904 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4905 			/*
4906 			 * on to the sent queue so we can wait for it to be
4907 			 * passed by.
4908 			 */
4909 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4910 			    sctp_next);
4911 			stcb->asoc.send_queue_cnt--;
4912 			stcb->asoc.sent_queue_cnt++;
4913 		}
4914 	}
4915 	if (foundeom == 0) {
4916 		/*
4917 		 * Still no eom found. That means there is stuff left on the
4918 		 * stream out queue.. yuck.
4919 		 */
4920 		SCTP_TCB_SEND_LOCK(stcb);
4921 		strq = &stcb->asoc.strmout[sid];
4922 		sp = TAILQ_FIRST(&strq->outqueue);
4923 		if (sp != NULL) {
4924 			sp->discard_rest = 1;
4925 			/*
4926 			 * We may need to put a chunk on the queue that
4927 			 * holds the TSN that would have been sent with the
4928 			 * LAST bit.
4929 			 */
4930 			if (chk == NULL) {
4931 				/* Yep, we have to */
4932 				sctp_alloc_a_chunk(stcb, chk);
4933 				if (chk == NULL) {
4934 					/*
4935 					 * we are hosed. All we can do is
4936 					 * nothing.. which will cause an
4937 					 * abort if the peer is paying
4938 					 * attention.
4939 					 */
4940 					goto oh_well;
4941 				}
4942 				memset(chk, 0, sizeof(*chk));
4943 				chk->rec.data.rcv_flags = 0;
4944 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4945 				chk->asoc = &stcb->asoc;
4946 				if (stcb->asoc.idata_supported == 0) {
4947 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4948 						chk->rec.data.mid = 0;
4949 					} else {
4950 						chk->rec.data.mid = strq->next_mid_ordered;
4951 					}
4952 				} else {
4953 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4954 						chk->rec.data.mid = strq->next_mid_unordered;
4955 					} else {
4956 						chk->rec.data.mid = strq->next_mid_ordered;
4957 					}
4958 				}
4959 				chk->rec.data.sid = sp->sid;
4960 				chk->rec.data.ppid = sp->ppid;
4961 				chk->rec.data.context = sp->context;
4962 				chk->flags = sp->act_flags;
4963 				chk->whoTo = NULL;
4964 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4965 				strq->chunks_on_queues++;
4966 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4967 				stcb->asoc.sent_queue_cnt++;
4968 				stcb->asoc.pr_sctp_cnt++;
4969 			}
4970 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4971 			if (sp->sinfo_flags & SCTP_UNORDERED) {
4972 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4973 			}
4974 			if (stcb->asoc.idata_supported == 0) {
4975 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4976 					strq->next_mid_ordered++;
4977 				}
4978 			} else {
4979 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4980 					strq->next_mid_unordered++;
4981 				} else {
4982 					strq->next_mid_ordered++;
4983 				}
4984 			}
4985 	oh_well:
4986 			if (sp->data) {
4987 				/*
4988 				 * Pull any data to free up the SB and allow
4989 				 * sender to "add more" while we will throw
4990 				 * away :-)
4991 				 */
4992 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4993 				ret_sz += sp->length;
4994 				do_wakeup_routine = 1;
4995 				sp->some_taken = 1;
4996 				sctp_m_freem(sp->data);
4997 				sp->data = NULL;
4998 				sp->tail_mbuf = NULL;
4999 				sp->length = 0;
5000 			}
5001 		}
5002 		SCTP_TCB_SEND_UNLOCK(stcb);
5003 	}
5004 	if (do_wakeup_routine) {
5005 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5006 		struct socket *so;
5007 
5008 		so = SCTP_INP_SO(stcb->sctp_ep);
5009 		if (!so_locked) {
5010 			atomic_add_int(&stcb->asoc.refcnt, 1);
5011 			SCTP_TCB_UNLOCK(stcb);
5012 			SCTP_SOCKET_LOCK(so, 1);
5013 			SCTP_TCB_LOCK(stcb);
5014 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
5015 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5016 				/* assoc was freed while we were unlocked */
5017 				SCTP_SOCKET_UNLOCK(so, 1);
5018 				return (ret_sz);
5019 			}
5020 		}
5021 #endif
5022 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5023 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5024 		if (!so_locked) {
5025 			SCTP_SOCKET_UNLOCK(so, 1);
5026 		}
5027 #endif
5028 	}
5029 	return (ret_sz);
5030 }
5031 
5032 /*
5033  * checks to see if the given address, sa, is one that is currently known by
5034  * the kernel note: can't distinguish the same address on multiple interfaces
5035  * and doesn't handle multiple addresses with different zone/scope id's note:
5036  * ifa_ifwithaddr() compares the entire sockaddr struct
5037  */
5038 struct sctp_ifa *
5039 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5040     int holds_lock)
5041 {
5042 	struct sctp_laddr *laddr;
5043 
5044 	if (holds_lock == 0) {
5045 		SCTP_INP_RLOCK(inp);
5046 	}
5047 
5048 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5049 		if (laddr->ifa == NULL)
5050 			continue;
5051 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5052 			continue;
5053 #ifdef INET
5054 		if (addr->sa_family == AF_INET) {
5055 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5056 			    laddr->ifa->address.sin.sin_addr.s_addr) {
5057 				/* found him. */
5058 				if (holds_lock == 0) {
5059 					SCTP_INP_RUNLOCK(inp);
5060 				}
5061 				return (laddr->ifa);
5062 				break;
5063 			}
5064 		}
5065 #endif
5066 #ifdef INET6
5067 		if (addr->sa_family == AF_INET6) {
5068 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5069 			    &laddr->ifa->address.sin6)) {
5070 				/* found him. */
5071 				if (holds_lock == 0) {
5072 					SCTP_INP_RUNLOCK(inp);
5073 				}
5074 				return (laddr->ifa);
5075 				break;
5076 			}
5077 		}
5078 #endif
5079 	}
5080 	if (holds_lock == 0) {
5081 		SCTP_INP_RUNLOCK(inp);
5082 	}
5083 	return (NULL);
5084 }
5085 
5086 uint32_t
5087 sctp_get_ifa_hash_val(struct sockaddr *addr)
5088 {
5089 	switch (addr->sa_family) {
5090 #ifdef INET
5091 	case AF_INET:
5092 		{
5093 			struct sockaddr_in *sin;
5094 
5095 			sin = (struct sockaddr_in *)addr;
5096 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5097 		}
5098 #endif
5099 #ifdef INET6
5100 	case AF_INET6:
5101 		{
5102 			struct sockaddr_in6 *sin6;
5103 			uint32_t hash_of_addr;
5104 
5105 			sin6 = (struct sockaddr_in6 *)addr;
5106 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5107 			    sin6->sin6_addr.s6_addr32[1] +
5108 			    sin6->sin6_addr.s6_addr32[2] +
5109 			    sin6->sin6_addr.s6_addr32[3]);
5110 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5111 			return (hash_of_addr);
5112 		}
5113 #endif
5114 	default:
5115 		break;
5116 	}
5117 	return (0);
5118 }
5119 
5120 struct sctp_ifa *
5121 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5122 {
5123 	struct sctp_ifa *sctp_ifap;
5124 	struct sctp_vrf *vrf;
5125 	struct sctp_ifalist *hash_head;
5126 	uint32_t hash_of_addr;
5127 
5128 	if (holds_lock == 0)
5129 		SCTP_IPI_ADDR_RLOCK();
5130 
5131 	vrf = sctp_find_vrf(vrf_id);
5132 	if (vrf == NULL) {
5133 		if (holds_lock == 0)
5134 			SCTP_IPI_ADDR_RUNLOCK();
5135 		return (NULL);
5136 	}
5137 
5138 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5139 
5140 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5141 	if (hash_head == NULL) {
5142 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5143 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5144 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5145 		sctp_print_address(addr);
5146 		SCTP_PRINTF("No such bucket for address\n");
5147 		if (holds_lock == 0)
5148 			SCTP_IPI_ADDR_RUNLOCK();
5149 
5150 		return (NULL);
5151 	}
5152 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5153 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5154 			continue;
5155 #ifdef INET
5156 		if (addr->sa_family == AF_INET) {
5157 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5158 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5159 				/* found him. */
5160 				if (holds_lock == 0)
5161 					SCTP_IPI_ADDR_RUNLOCK();
5162 				return (sctp_ifap);
5163 				break;
5164 			}
5165 		}
5166 #endif
5167 #ifdef INET6
5168 		if (addr->sa_family == AF_INET6) {
5169 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5170 			    &sctp_ifap->address.sin6)) {
5171 				/* found him. */
5172 				if (holds_lock == 0)
5173 					SCTP_IPI_ADDR_RUNLOCK();
5174 				return (sctp_ifap);
5175 				break;
5176 			}
5177 		}
5178 #endif
5179 	}
5180 	if (holds_lock == 0)
5181 		SCTP_IPI_ADDR_RUNLOCK();
5182 	return (NULL);
5183 }
5184 
5185 static void
5186 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5187     uint32_t rwnd_req)
5188 {
5189 	/* User pulled some data, do we need a rwnd update? */
5190 	struct epoch_tracker et;
5191 	int r_unlocked = 0;
5192 	uint32_t dif, rwnd;
5193 	struct socket *so = NULL;
5194 
5195 	if (stcb == NULL)
5196 		return;
5197 
5198 	atomic_add_int(&stcb->asoc.refcnt, 1);
5199 
5200 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
5201 	    (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) {
5202 		/* Pre-check If we are freeing no update */
5203 		goto no_lock;
5204 	}
5205 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5206 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5207 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5208 		goto out;
5209 	}
5210 	so = stcb->sctp_socket;
5211 	if (so == NULL) {
5212 		goto out;
5213 	}
5214 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5215 	/* Have you have freed enough to look */
5216 	*freed_so_far = 0;
5217 	/* Yep, its worth a look and the lock overhead */
5218 
5219 	/* Figure out what the rwnd would be */
5220 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5221 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5222 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5223 	} else {
5224 		dif = 0;
5225 	}
5226 	if (dif >= rwnd_req) {
5227 		if (hold_rlock) {
5228 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5229 			r_unlocked = 1;
5230 		}
5231 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5232 			/*
5233 			 * One last check before we allow the guy possibly
5234 			 * to get in. There is a race, where the guy has not
5235 			 * reached the gate. In that case
5236 			 */
5237 			goto out;
5238 		}
5239 		SCTP_TCB_LOCK(stcb);
5240 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5241 			/* No reports here */
5242 			SCTP_TCB_UNLOCK(stcb);
5243 			goto out;
5244 		}
5245 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5246 		NET_EPOCH_ENTER(et);
5247 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5248 
5249 		sctp_chunk_output(stcb->sctp_ep, stcb,
5250 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5251 		/* make sure no timer is running */
5252 		NET_EPOCH_EXIT(et);
5253 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5254 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5255 		SCTP_TCB_UNLOCK(stcb);
5256 	} else {
5257 		/* Update how much we have pending */
5258 		stcb->freed_by_sorcv_sincelast = dif;
5259 	}
5260 out:
5261 	if (so && r_unlocked && hold_rlock) {
5262 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5263 	}
5264 
5265 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5266 no_lock:
5267 	atomic_add_int(&stcb->asoc.refcnt, -1);
5268 	return;
5269 }
5270 
5271 int
5272 sctp_sorecvmsg(struct socket *so,
5273     struct uio *uio,
5274     struct mbuf **mp,
5275     struct sockaddr *from,
5276     int fromlen,
5277     int *msg_flags,
5278     struct sctp_sndrcvinfo *sinfo,
5279     int filling_sinfo)
5280 {
5281 	/*
5282 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5283 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5284 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5285 	 * On the way out we may send out any combination of:
5286 	 * MSG_NOTIFICATION MSG_EOR
5287 	 *
5288 	 */
5289 	struct sctp_inpcb *inp = NULL;
5290 	ssize_t my_len = 0;
5291 	ssize_t cp_len = 0;
5292 	int error = 0;
5293 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5294 	struct mbuf *m = NULL;
5295 	struct sctp_tcb *stcb = NULL;
5296 	int wakeup_read_socket = 0;
5297 	int freecnt_applied = 0;
5298 	int out_flags = 0, in_flags = 0;
5299 	int block_allowed = 1;
5300 	uint32_t freed_so_far = 0;
5301 	ssize_t copied_so_far = 0;
5302 	int in_eeor_mode = 0;
5303 	int no_rcv_needed = 0;
5304 	uint32_t rwnd_req = 0;
5305 	int hold_sblock = 0;
5306 	int hold_rlock = 0;
5307 	ssize_t slen = 0;
5308 	uint32_t held_length = 0;
5309 	int sockbuf_lock = 0;
5310 
5311 	if (uio == NULL) {
5312 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5313 		return (EINVAL);
5314 	}
5315 
5316 	if (msg_flags) {
5317 		in_flags = *msg_flags;
5318 		if (in_flags & MSG_PEEK)
5319 			SCTP_STAT_INCR(sctps_read_peeks);
5320 	} else {
5321 		in_flags = 0;
5322 	}
5323 	slen = uio->uio_resid;
5324 
5325 	/* Pull in and set up our int flags */
5326 	if (in_flags & MSG_OOB) {
5327 		/* Out of band's NOT supported */
5328 		return (EOPNOTSUPP);
5329 	}
5330 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5331 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5332 		return (EINVAL);
5333 	}
5334 	if ((in_flags & (MSG_DONTWAIT
5335 	    | MSG_NBIO
5336 	    )) ||
5337 	    SCTP_SO_IS_NBIO(so)) {
5338 		block_allowed = 0;
5339 	}
5340 	/* setup the endpoint */
5341 	inp = (struct sctp_inpcb *)so->so_pcb;
5342 	if (inp == NULL) {
5343 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5344 		return (EFAULT);
5345 	}
5346 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5347 	/* Must be at least a MTU's worth */
5348 	if (rwnd_req < SCTP_MIN_RWND)
5349 		rwnd_req = SCTP_MIN_RWND;
5350 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5351 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5352 		sctp_misc_ints(SCTP_SORECV_ENTER,
5353 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5354 	}
5355 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5356 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5357 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5358 	}
5359 
5360 
5361 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5362 	if (error) {
5363 		goto release_unlocked;
5364 	}
5365 	sockbuf_lock = 1;
5366 restart:
5367 
5368 
5369 restart_nosblocks:
5370 	if (hold_sblock == 0) {
5371 		SOCKBUF_LOCK(&so->so_rcv);
5372 		hold_sblock = 1;
5373 	}
5374 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5375 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5376 		goto out;
5377 	}
5378 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5379 		if (so->so_error) {
5380 			error = so->so_error;
5381 			if ((in_flags & MSG_PEEK) == 0)
5382 				so->so_error = 0;
5383 			goto out;
5384 		} else {
5385 			if (so->so_rcv.sb_cc == 0) {
5386 				/* indicate EOF */
5387 				error = 0;
5388 				goto out;
5389 			}
5390 		}
5391 	}
5392 	if (so->so_rcv.sb_cc <= held_length) {
5393 		if (so->so_error) {
5394 			error = so->so_error;
5395 			if ((in_flags & MSG_PEEK) == 0) {
5396 				so->so_error = 0;
5397 			}
5398 			goto out;
5399 		}
5400 		if ((so->so_rcv.sb_cc == 0) &&
5401 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5402 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5403 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5404 				/*
5405 				 * For active open side clear flags for
5406 				 * re-use passive open is blocked by
5407 				 * connect.
5408 				 */
5409 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5410 					/*
5411 					 * You were aborted, passive side
5412 					 * always hits here
5413 					 */
5414 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5415 					error = ECONNRESET;
5416 				}
5417 				so->so_state &= ~(SS_ISCONNECTING |
5418 				    SS_ISDISCONNECTING |
5419 				    SS_ISCONFIRMING |
5420 				    SS_ISCONNECTED);
5421 				if (error == 0) {
5422 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5423 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5424 						error = ENOTCONN;
5425 					}
5426 				}
5427 				goto out;
5428 			}
5429 		}
5430 		if (block_allowed) {
5431 			error = sbwait(&so->so_rcv);
5432 			if (error) {
5433 				goto out;
5434 			}
5435 			held_length = 0;
5436 			goto restart_nosblocks;
5437 		} else {
5438 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5439 			error = EWOULDBLOCK;
5440 			goto out;
5441 		}
5442 	}
5443 	if (hold_sblock == 1) {
5444 		SOCKBUF_UNLOCK(&so->so_rcv);
5445 		hold_sblock = 0;
5446 	}
5447 	/* we possibly have data we can read */
5448 	/* sa_ignore FREED_MEMORY */
5449 	control = TAILQ_FIRST(&inp->read_queue);
5450 	if (control == NULL) {
5451 		/*
5452 		 * This could be happening since the appender did the
5453 		 * increment but as not yet did the tailq insert onto the
5454 		 * read_queue
5455 		 */
5456 		if (hold_rlock == 0) {
5457 			SCTP_INP_READ_LOCK(inp);
5458 		}
5459 		control = TAILQ_FIRST(&inp->read_queue);
5460 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5461 #ifdef INVARIANTS
5462 			panic("Huh, its non zero and nothing on control?");
5463 #endif
5464 			so->so_rcv.sb_cc = 0;
5465 		}
5466 		SCTP_INP_READ_UNLOCK(inp);
5467 		hold_rlock = 0;
5468 		goto restart;
5469 	}
5470 
5471 	if ((control->length == 0) &&
5472 	    (control->do_not_ref_stcb)) {
5473 		/*
5474 		 * Clean up code for freeing assoc that left behind a
5475 		 * pdapi.. maybe a peer in EEOR that just closed after
5476 		 * sending and never indicated a EOR.
5477 		 */
5478 		if (hold_rlock == 0) {
5479 			hold_rlock = 1;
5480 			SCTP_INP_READ_LOCK(inp);
5481 		}
5482 		control->held_length = 0;
5483 		if (control->data) {
5484 			/* Hmm there is data here .. fix */
5485 			struct mbuf *m_tmp;
5486 			int cnt = 0;
5487 
5488 			m_tmp = control->data;
5489 			while (m_tmp) {
5490 				cnt += SCTP_BUF_LEN(m_tmp);
5491 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5492 					control->tail_mbuf = m_tmp;
5493 					control->end_added = 1;
5494 				}
5495 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5496 			}
5497 			control->length = cnt;
5498 		} else {
5499 			/* remove it */
5500 			TAILQ_REMOVE(&inp->read_queue, control, next);
5501 			/* Add back any hiddend data */
5502 			sctp_free_remote_addr(control->whoFrom);
5503 			sctp_free_a_readq(stcb, control);
5504 		}
5505 		if (hold_rlock) {
5506 			hold_rlock = 0;
5507 			SCTP_INP_READ_UNLOCK(inp);
5508 		}
5509 		goto restart;
5510 	}
5511 	if ((control->length == 0) &&
5512 	    (control->end_added == 1)) {
5513 		/*
5514 		 * Do we also need to check for (control->pdapi_aborted ==
5515 		 * 1)?
5516 		 */
5517 		if (hold_rlock == 0) {
5518 			hold_rlock = 1;
5519 			SCTP_INP_READ_LOCK(inp);
5520 		}
5521 		TAILQ_REMOVE(&inp->read_queue, control, next);
5522 		if (control->data) {
5523 #ifdef INVARIANTS
5524 			panic("control->data not null but control->length == 0");
5525 #else
5526 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5527 			sctp_m_freem(control->data);
5528 			control->data = NULL;
5529 #endif
5530 		}
5531 		if (control->aux_data) {
5532 			sctp_m_free(control->aux_data);
5533 			control->aux_data = NULL;
5534 		}
5535 #ifdef INVARIANTS
5536 		if (control->on_strm_q) {
5537 			panic("About to free ctl:%p so:%p and its in %d",
5538 			    control, so, control->on_strm_q);
5539 		}
5540 #endif
5541 		sctp_free_remote_addr(control->whoFrom);
5542 		sctp_free_a_readq(stcb, control);
5543 		if (hold_rlock) {
5544 			hold_rlock = 0;
5545 			SCTP_INP_READ_UNLOCK(inp);
5546 		}
5547 		goto restart;
5548 	}
5549 	if (control->length == 0) {
5550 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5551 		    (filling_sinfo)) {
5552 			/* find a more suitable one then this */
5553 			ctl = TAILQ_NEXT(control, next);
5554 			while (ctl) {
5555 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5556 				    (ctl->some_taken ||
5557 				    (ctl->spec_flags & M_NOTIFICATION) ||
5558 				    ((ctl->do_not_ref_stcb == 0) &&
5559 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5560 				    ) {
5561 					/*-
5562 					 * If we have a different TCB next, and there is data
5563 					 * present. If we have already taken some (pdapi), OR we can
5564 					 * ref the tcb and no delivery as started on this stream, we
5565 					 * take it. Note we allow a notification on a different
5566 					 * assoc to be delivered..
5567 					 */
5568 					control = ctl;
5569 					goto found_one;
5570 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5571 					    (ctl->length) &&
5572 					    ((ctl->some_taken) ||
5573 					    ((ctl->do_not_ref_stcb == 0) &&
5574 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5575 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5576 					/*-
5577 					 * If we have the same tcb, and there is data present, and we
5578 					 * have the strm interleave feature present. Then if we have
5579 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5580 					 * not started a delivery for this stream, we can take it.
5581 					 * Note we do NOT allow a notificaiton on the same assoc to
5582 					 * be delivered.
5583 					 */
5584 					control = ctl;
5585 					goto found_one;
5586 				}
5587 				ctl = TAILQ_NEXT(ctl, next);
5588 			}
5589 		}
5590 		/*
5591 		 * if we reach here, not suitable replacement is available
5592 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5593 		 * into the our held count, and its time to sleep again.
5594 		 */
5595 		held_length = so->so_rcv.sb_cc;
5596 		control->held_length = so->so_rcv.sb_cc;
5597 		goto restart;
5598 	}
5599 	/* Clear the held length since there is something to read */
5600 	control->held_length = 0;
5601 found_one:
5602 	/*
5603 	 * If we reach here, control has a some data for us to read off.
5604 	 * Note that stcb COULD be NULL.
5605 	 */
5606 	if (hold_rlock == 0) {
5607 		hold_rlock = 1;
5608 		SCTP_INP_READ_LOCK(inp);
5609 	}
5610 	control->some_taken++;
5611 	stcb = control->stcb;
5612 	if (stcb) {
5613 		if ((control->do_not_ref_stcb == 0) &&
5614 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5615 			if (freecnt_applied == 0)
5616 				stcb = NULL;
5617 		} else if (control->do_not_ref_stcb == 0) {
5618 			/* you can't free it on me please */
5619 			/*
5620 			 * The lock on the socket buffer protects us so the
5621 			 * free code will stop. But since we used the
5622 			 * socketbuf lock and the sender uses the tcb_lock
5623 			 * to increment, we need to use the atomic add to
5624 			 * the refcnt
5625 			 */
5626 			if (freecnt_applied) {
5627 #ifdef INVARIANTS
5628 				panic("refcnt already incremented");
5629 #else
5630 				SCTP_PRINTF("refcnt already incremented?\n");
5631 #endif
5632 			} else {
5633 				atomic_add_int(&stcb->asoc.refcnt, 1);
5634 				freecnt_applied = 1;
5635 			}
5636 			/*
5637 			 * Setup to remember how much we have not yet told
5638 			 * the peer our rwnd has opened up. Note we grab the
5639 			 * value from the tcb from last time. Note too that
5640 			 * sack sending clears this when a sack is sent,
5641 			 * which is fine. Once we hit the rwnd_req, we then
5642 			 * will go to the sctp_user_rcvd() that will not
5643 			 * lock until it KNOWs it MUST send a WUP-SACK.
5644 			 */
5645 			freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast;
5646 			stcb->freed_by_sorcv_sincelast = 0;
5647 		}
5648 	}
5649 	if (stcb &&
5650 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5651 	    control->do_not_ref_stcb == 0) {
5652 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5653 	}
5654 
5655 	/* First lets get off the sinfo and sockaddr info */
5656 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5657 		sinfo->sinfo_stream = control->sinfo_stream;
5658 		sinfo->sinfo_ssn = (uint16_t)control->mid;
5659 		sinfo->sinfo_flags = control->sinfo_flags;
5660 		sinfo->sinfo_ppid = control->sinfo_ppid;
5661 		sinfo->sinfo_context = control->sinfo_context;
5662 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5663 		sinfo->sinfo_tsn = control->sinfo_tsn;
5664 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5665 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5666 		nxt = TAILQ_NEXT(control, next);
5667 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5668 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5669 			struct sctp_extrcvinfo *s_extra;
5670 
5671 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5672 			if ((nxt) &&
5673 			    (nxt->length)) {
5674 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5675 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5676 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5677 				}
5678 				if (nxt->spec_flags & M_NOTIFICATION) {
5679 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5680 				}
5681 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5682 				s_extra->serinfo_next_length = nxt->length;
5683 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5684 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5685 				if (nxt->tail_mbuf != NULL) {
5686 					if (nxt->end_added) {
5687 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5688 					}
5689 				}
5690 			} else {
5691 				/*
5692 				 * we explicitly 0 this, since the memcpy
5693 				 * got some other things beyond the older
5694 				 * sinfo_ that is on the control's structure
5695 				 * :-D
5696 				 */
5697 				nxt = NULL;
5698 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5699 				s_extra->serinfo_next_aid = 0;
5700 				s_extra->serinfo_next_length = 0;
5701 				s_extra->serinfo_next_ppid = 0;
5702 				s_extra->serinfo_next_stream = 0;
5703 			}
5704 		}
5705 		/*
5706 		 * update off the real current cum-ack, if we have an stcb.
5707 		 */
5708 		if ((control->do_not_ref_stcb == 0) && stcb)
5709 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5710 		/*
5711 		 * mask off the high bits, we keep the actual chunk bits in
5712 		 * there.
5713 		 */
5714 		sinfo->sinfo_flags &= 0x00ff;
5715 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5716 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5717 		}
5718 	}
5719 #ifdef SCTP_ASOCLOG_OF_TSNS
5720 	{
5721 		int index, newindex;
5722 		struct sctp_pcbtsn_rlog *entry;
5723 
5724 		do {
5725 			index = inp->readlog_index;
5726 			newindex = index + 1;
5727 			if (newindex >= SCTP_READ_LOG_SIZE) {
5728 				newindex = 0;
5729 			}
5730 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5731 		entry = &inp->readlog[index];
5732 		entry->vtag = control->sinfo_assoc_id;
5733 		entry->strm = control->sinfo_stream;
5734 		entry->seq = (uint16_t)control->mid;
5735 		entry->sz = control->length;
5736 		entry->flgs = control->sinfo_flags;
5737 	}
5738 #endif
5739 	if ((fromlen > 0) && (from != NULL)) {
5740 		union sctp_sockstore store;
5741 		size_t len;
5742 
5743 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5744 #ifdef INET6
5745 		case AF_INET6:
5746 			len = sizeof(struct sockaddr_in6);
5747 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5748 			store.sin6.sin6_port = control->port_from;
5749 			break;
5750 #endif
5751 #ifdef INET
5752 		case AF_INET:
5753 #ifdef INET6
5754 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5755 				len = sizeof(struct sockaddr_in6);
5756 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5757 				    &store.sin6);
5758 				store.sin6.sin6_port = control->port_from;
5759 			} else {
5760 				len = sizeof(struct sockaddr_in);
5761 				store.sin = control->whoFrom->ro._l_addr.sin;
5762 				store.sin.sin_port = control->port_from;
5763 			}
5764 #else
5765 			len = sizeof(struct sockaddr_in);
5766 			store.sin = control->whoFrom->ro._l_addr.sin;
5767 			store.sin.sin_port = control->port_from;
5768 #endif
5769 			break;
5770 #endif
5771 		default:
5772 			len = 0;
5773 			break;
5774 		}
5775 		memcpy(from, &store, min((size_t)fromlen, len));
5776 #ifdef INET6
5777 		{
5778 			struct sockaddr_in6 lsa6, *from6;
5779 
5780 			from6 = (struct sockaddr_in6 *)from;
5781 			sctp_recover_scope_mac(from6, (&lsa6));
5782 		}
5783 #endif
5784 	}
5785 	if (hold_rlock) {
5786 		SCTP_INP_READ_UNLOCK(inp);
5787 		hold_rlock = 0;
5788 	}
5789 	if (hold_sblock) {
5790 		SOCKBUF_UNLOCK(&so->so_rcv);
5791 		hold_sblock = 0;
5792 	}
5793 	/* now copy out what data we can */
5794 	if (mp == NULL) {
5795 		/* copy out each mbuf in the chain up to length */
5796 get_more_data:
5797 		m = control->data;
5798 		while (m) {
5799 			/* Move out all we can */
5800 			cp_len = uio->uio_resid;
5801 			my_len = SCTP_BUF_LEN(m);
5802 			if (cp_len > my_len) {
5803 				/* not enough in this buf */
5804 				cp_len = my_len;
5805 			}
5806 			if (hold_rlock) {
5807 				SCTP_INP_READ_UNLOCK(inp);
5808 				hold_rlock = 0;
5809 			}
5810 			if (cp_len > 0)
5811 				error = uiomove(mtod(m, char *), (int)cp_len, uio);
5812 			/* re-read */
5813 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5814 				goto release;
5815 			}
5816 
5817 			if ((control->do_not_ref_stcb == 0) && stcb &&
5818 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5819 				no_rcv_needed = 1;
5820 			}
5821 			if (error) {
5822 				/* error we are out of here */
5823 				goto release;
5824 			}
5825 			SCTP_INP_READ_LOCK(inp);
5826 			hold_rlock = 1;
5827 			if (cp_len == SCTP_BUF_LEN(m)) {
5828 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5829 				    (control->end_added)) {
5830 					out_flags |= MSG_EOR;
5831 					if ((control->do_not_ref_stcb == 0) &&
5832 					    (control->stcb != NULL) &&
5833 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5834 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5835 				}
5836 				if (control->spec_flags & M_NOTIFICATION) {
5837 					out_flags |= MSG_NOTIFICATION;
5838 				}
5839 				/* we ate up the mbuf */
5840 				if (in_flags & MSG_PEEK) {
5841 					/* just looking */
5842 					m = SCTP_BUF_NEXT(m);
5843 					copied_so_far += cp_len;
5844 				} else {
5845 					/* dispose of the mbuf */
5846 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5847 						sctp_sblog(&so->so_rcv,
5848 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5849 					}
5850 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5851 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5852 						sctp_sblog(&so->so_rcv,
5853 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5854 					}
5855 					copied_so_far += cp_len;
5856 					freed_so_far += (uint32_t)cp_len;
5857 					freed_so_far += MSIZE;
5858 					atomic_subtract_int(&control->length, cp_len);
5859 					control->data = sctp_m_free(m);
5860 					m = control->data;
5861 					/*
5862 					 * been through it all, must hold sb
5863 					 * lock ok to null tail
5864 					 */
5865 					if (control->data == NULL) {
5866 #ifdef INVARIANTS
5867 						if ((control->end_added == 0) ||
5868 						    (TAILQ_NEXT(control, next) == NULL)) {
5869 							/*
5870 							 * If the end is not
5871 							 * added, OR the
5872 							 * next is NOT null
5873 							 * we MUST have the
5874 							 * lock.
5875 							 */
5876 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5877 								panic("Hmm we don't own the lock?");
5878 							}
5879 						}
5880 #endif
5881 						control->tail_mbuf = NULL;
5882 #ifdef INVARIANTS
5883 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5884 							panic("end_added, nothing left and no MSG_EOR");
5885 						}
5886 #endif
5887 					}
5888 				}
5889 			} else {
5890 				/* Do we need to trim the mbuf? */
5891 				if (control->spec_flags & M_NOTIFICATION) {
5892 					out_flags |= MSG_NOTIFICATION;
5893 				}
5894 				if ((in_flags & MSG_PEEK) == 0) {
5895 					SCTP_BUF_RESV_UF(m, cp_len);
5896 					SCTP_BUF_LEN(m) -= (int)cp_len;
5897 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5898 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len);
5899 					}
5900 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5901 					if ((control->do_not_ref_stcb == 0) &&
5902 					    stcb) {
5903 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5904 					}
5905 					copied_so_far += cp_len;
5906 					freed_so_far += (uint32_t)cp_len;
5907 					freed_so_far += MSIZE;
5908 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5909 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5910 						    SCTP_LOG_SBRESULT, 0);
5911 					}
5912 					atomic_subtract_int(&control->length, cp_len);
5913 				} else {
5914 					copied_so_far += cp_len;
5915 				}
5916 			}
5917 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5918 				break;
5919 			}
5920 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5921 			    (control->do_not_ref_stcb == 0) &&
5922 			    (freed_so_far >= rwnd_req)) {
5923 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5924 			}
5925 		}		/* end while(m) */
5926 		/*
5927 		 * At this point we have looked at it all and we either have
5928 		 * a MSG_EOR/or read all the user wants... <OR>
5929 		 * control->length == 0.
5930 		 */
5931 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5932 			/* we are done with this control */
5933 			if (control->length == 0) {
5934 				if (control->data) {
5935 #ifdef INVARIANTS
5936 					panic("control->data not null at read eor?");
5937 #else
5938 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5939 					sctp_m_freem(control->data);
5940 					control->data = NULL;
5941 #endif
5942 				}
5943 		done_with_control:
5944 				if (hold_rlock == 0) {
5945 					SCTP_INP_READ_LOCK(inp);
5946 					hold_rlock = 1;
5947 				}
5948 				TAILQ_REMOVE(&inp->read_queue, control, next);
5949 				/* Add back any hiddend data */
5950 				if (control->held_length) {
5951 					held_length = 0;
5952 					control->held_length = 0;
5953 					wakeup_read_socket = 1;
5954 				}
5955 				if (control->aux_data) {
5956 					sctp_m_free(control->aux_data);
5957 					control->aux_data = NULL;
5958 				}
5959 				no_rcv_needed = control->do_not_ref_stcb;
5960 				sctp_free_remote_addr(control->whoFrom);
5961 				control->data = NULL;
5962 #ifdef INVARIANTS
5963 				if (control->on_strm_q) {
5964 					panic("About to free ctl:%p so:%p and its in %d",
5965 					    control, so, control->on_strm_q);
5966 				}
5967 #endif
5968 				sctp_free_a_readq(stcb, control);
5969 				control = NULL;
5970 				if ((freed_so_far >= rwnd_req) &&
5971 				    (no_rcv_needed == 0))
5972 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5973 
5974 			} else {
5975 				/*
5976 				 * The user did not read all of this
5977 				 * message, turn off the returned MSG_EOR
5978 				 * since we are leaving more behind on the
5979 				 * control to read.
5980 				 */
5981 #ifdef INVARIANTS
5982 				if (control->end_added &&
5983 				    (control->data == NULL) &&
5984 				    (control->tail_mbuf == NULL)) {
5985 					panic("Gak, control->length is corrupt?");
5986 				}
5987 #endif
5988 				no_rcv_needed = control->do_not_ref_stcb;
5989 				out_flags &= ~MSG_EOR;
5990 			}
5991 		}
5992 		if (out_flags & MSG_EOR) {
5993 			goto release;
5994 		}
5995 		if ((uio->uio_resid == 0) ||
5996 		    ((in_eeor_mode) &&
5997 		    (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) {
5998 			goto release;
5999 		}
6000 		/*
6001 		 * If I hit here the receiver wants more and this message is
6002 		 * NOT done (pd-api). So two questions. Can we block? if not
6003 		 * we are done. Did the user NOT set MSG_WAITALL?
6004 		 */
6005 		if (block_allowed == 0) {
6006 			goto release;
6007 		}
6008 		/*
6009 		 * We need to wait for more data a few things: - We don't
6010 		 * sbunlock() so we don't get someone else reading. - We
6011 		 * must be sure to account for the case where what is added
6012 		 * is NOT to our control when we wakeup.
6013 		 */
6014 
6015 		/*
6016 		 * Do we need to tell the transport a rwnd update might be
6017 		 * needed before we go to sleep?
6018 		 */
6019 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6020 		    ((freed_so_far >= rwnd_req) &&
6021 		    (control->do_not_ref_stcb == 0) &&
6022 		    (no_rcv_needed == 0))) {
6023 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6024 		}
6025 wait_some_more:
6026 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6027 			goto release;
6028 		}
6029 
6030 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6031 			goto release;
6032 
6033 		if (hold_rlock == 1) {
6034 			SCTP_INP_READ_UNLOCK(inp);
6035 			hold_rlock = 0;
6036 		}
6037 		if (hold_sblock == 0) {
6038 			SOCKBUF_LOCK(&so->so_rcv);
6039 			hold_sblock = 1;
6040 		}
6041 		if ((copied_so_far) && (control->length == 0) &&
6042 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6043 			goto release;
6044 		}
6045 		if (so->so_rcv.sb_cc <= control->held_length) {
6046 			error = sbwait(&so->so_rcv);
6047 			if (error) {
6048 				goto release;
6049 			}
6050 			control->held_length = 0;
6051 		}
6052 		if (hold_sblock) {
6053 			SOCKBUF_UNLOCK(&so->so_rcv);
6054 			hold_sblock = 0;
6055 		}
6056 		if (control->length == 0) {
6057 			/* still nothing here */
6058 			if (control->end_added == 1) {
6059 				/* he aborted, or is done i.e.did a shutdown */
6060 				out_flags |= MSG_EOR;
6061 				if (control->pdapi_aborted) {
6062 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6063 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6064 
6065 					out_flags |= MSG_TRUNC;
6066 				} else {
6067 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6068 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6069 				}
6070 				goto done_with_control;
6071 			}
6072 			if (so->so_rcv.sb_cc > held_length) {
6073 				control->held_length = so->so_rcv.sb_cc;
6074 				held_length = 0;
6075 			}
6076 			goto wait_some_more;
6077 		} else if (control->data == NULL) {
6078 			/*
6079 			 * we must re-sync since data is probably being
6080 			 * added
6081 			 */
6082 			SCTP_INP_READ_LOCK(inp);
6083 			if ((control->length > 0) && (control->data == NULL)) {
6084 				/*
6085 				 * big trouble.. we have the lock and its
6086 				 * corrupt?
6087 				 */
6088 #ifdef INVARIANTS
6089 				panic("Impossible data==NULL length !=0");
6090 #endif
6091 				out_flags |= MSG_EOR;
6092 				out_flags |= MSG_TRUNC;
6093 				control->length = 0;
6094 				SCTP_INP_READ_UNLOCK(inp);
6095 				goto done_with_control;
6096 			}
6097 			SCTP_INP_READ_UNLOCK(inp);
6098 			/* We will fall around to get more data */
6099 		}
6100 		goto get_more_data;
6101 	} else {
6102 		/*-
6103 		 * Give caller back the mbuf chain,
6104 		 * store in uio_resid the length
6105 		 */
6106 		wakeup_read_socket = 0;
6107 		if ((control->end_added == 0) ||
6108 		    (TAILQ_NEXT(control, next) == NULL)) {
6109 			/* Need to get rlock */
6110 			if (hold_rlock == 0) {
6111 				SCTP_INP_READ_LOCK(inp);
6112 				hold_rlock = 1;
6113 			}
6114 		}
6115 		if (control->end_added) {
6116 			out_flags |= MSG_EOR;
6117 			if ((control->do_not_ref_stcb == 0) &&
6118 			    (control->stcb != NULL) &&
6119 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6120 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6121 		}
6122 		if (control->spec_flags & M_NOTIFICATION) {
6123 			out_flags |= MSG_NOTIFICATION;
6124 		}
6125 		uio->uio_resid = control->length;
6126 		*mp = control->data;
6127 		m = control->data;
6128 		while (m) {
6129 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6130 				sctp_sblog(&so->so_rcv,
6131 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6132 			}
6133 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6134 			freed_so_far += (uint32_t)SCTP_BUF_LEN(m);
6135 			freed_so_far += MSIZE;
6136 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6137 				sctp_sblog(&so->so_rcv,
6138 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6139 			}
6140 			m = SCTP_BUF_NEXT(m);
6141 		}
6142 		control->data = control->tail_mbuf = NULL;
6143 		control->length = 0;
6144 		if (out_flags & MSG_EOR) {
6145 			/* Done with this control */
6146 			goto done_with_control;
6147 		}
6148 	}
6149 release:
6150 	if (hold_rlock == 1) {
6151 		SCTP_INP_READ_UNLOCK(inp);
6152 		hold_rlock = 0;
6153 	}
6154 	if (hold_sblock == 1) {
6155 		SOCKBUF_UNLOCK(&so->so_rcv);
6156 		hold_sblock = 0;
6157 	}
6158 
6159 	sbunlock(&so->so_rcv);
6160 	sockbuf_lock = 0;
6161 
6162 release_unlocked:
6163 	if (hold_sblock) {
6164 		SOCKBUF_UNLOCK(&so->so_rcv);
6165 		hold_sblock = 0;
6166 	}
6167 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6168 		if ((freed_so_far >= rwnd_req) &&
6169 		    (control && (control->do_not_ref_stcb == 0)) &&
6170 		    (no_rcv_needed == 0))
6171 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6172 	}
6173 out:
6174 	if (msg_flags) {
6175 		*msg_flags = out_flags;
6176 	}
6177 	if (((out_flags & MSG_EOR) == 0) &&
6178 	    ((in_flags & MSG_PEEK) == 0) &&
6179 	    (sinfo) &&
6180 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6181 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6182 		struct sctp_extrcvinfo *s_extra;
6183 
6184 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6185 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6186 	}
6187 	if (hold_rlock == 1) {
6188 		SCTP_INP_READ_UNLOCK(inp);
6189 	}
6190 	if (hold_sblock) {
6191 		SOCKBUF_UNLOCK(&so->so_rcv);
6192 	}
6193 	if (sockbuf_lock) {
6194 		sbunlock(&so->so_rcv);
6195 	}
6196 
6197 	if (freecnt_applied) {
6198 		/*
6199 		 * The lock on the socket buffer protects us so the free
6200 		 * code will stop. But since we used the socketbuf lock and
6201 		 * the sender uses the tcb_lock to increment, we need to use
6202 		 * the atomic add to the refcnt.
6203 		 */
6204 		if (stcb == NULL) {
6205 #ifdef INVARIANTS
6206 			panic("stcb for refcnt has gone NULL?");
6207 			goto stage_left;
6208 #else
6209 			goto stage_left;
6210 #endif
6211 		}
6212 		/* Save the value back for next time */
6213 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6214 		atomic_add_int(&stcb->asoc.refcnt, -1);
6215 	}
6216 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6217 		if (stcb) {
6218 			sctp_misc_ints(SCTP_SORECV_DONE,
6219 			    freed_so_far,
6220 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6221 			    stcb->asoc.my_rwnd,
6222 			    so->so_rcv.sb_cc);
6223 		} else {
6224 			sctp_misc_ints(SCTP_SORECV_DONE,
6225 			    freed_so_far,
6226 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6227 			    0,
6228 			    so->so_rcv.sb_cc);
6229 		}
6230 	}
6231 stage_left:
6232 	if (wakeup_read_socket) {
6233 		sctp_sorwakeup(inp, so);
6234 	}
6235 	return (error);
6236 }
6237 
6238 
6239 #ifdef SCTP_MBUF_LOGGING
6240 struct mbuf *
6241 sctp_m_free(struct mbuf *m)
6242 {
6243 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6244 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6245 	}
6246 	return (m_free(m));
6247 }
6248 
6249 void
6250 sctp_m_freem(struct mbuf *mb)
6251 {
6252 	while (mb != NULL)
6253 		mb = sctp_m_free(mb);
6254 }
6255 
6256 #endif
6257 
6258 int
6259 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6260 {
6261 	/*
6262 	 * Given a local address. For all associations that holds the
6263 	 * address, request a peer-set-primary.
6264 	 */
6265 	struct sctp_ifa *ifa;
6266 	struct sctp_laddr *wi;
6267 
6268 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6269 	if (ifa == NULL) {
6270 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6271 		return (EADDRNOTAVAIL);
6272 	}
6273 	/*
6274 	 * Now that we have the ifa we must awaken the iterator with this
6275 	 * message.
6276 	 */
6277 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6278 	if (wi == NULL) {
6279 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6280 		return (ENOMEM);
6281 	}
6282 	/* Now incr the count and int wi structure */
6283 	SCTP_INCR_LADDR_COUNT();
6284 	memset(wi, 0, sizeof(*wi));
6285 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6286 	wi->ifa = ifa;
6287 	wi->action = SCTP_SET_PRIM_ADDR;
6288 	atomic_add_int(&ifa->refcount, 1);
6289 
6290 	/* Now add it to the work queue */
6291 	SCTP_WQ_ADDR_LOCK();
6292 	/*
6293 	 * Should this really be a tailq? As it is we will process the
6294 	 * newest first :-0
6295 	 */
6296 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6297 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6298 	    (struct sctp_inpcb *)NULL,
6299 	    (struct sctp_tcb *)NULL,
6300 	    (struct sctp_nets *)NULL);
6301 	SCTP_WQ_ADDR_UNLOCK();
6302 	return (0);
6303 }
6304 
6305 
6306 int
6307 sctp_soreceive(struct socket *so,
6308     struct sockaddr **psa,
6309     struct uio *uio,
6310     struct mbuf **mp0,
6311     struct mbuf **controlp,
6312     int *flagsp)
6313 {
6314 	int error, fromlen;
6315 	uint8_t sockbuf[256];
6316 	struct sockaddr *from;
6317 	struct sctp_extrcvinfo sinfo;
6318 	int filling_sinfo = 1;
6319 	int flags;
6320 	struct sctp_inpcb *inp;
6321 
6322 	inp = (struct sctp_inpcb *)so->so_pcb;
6323 	/* pickup the assoc we are reading from */
6324 	if (inp == NULL) {
6325 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6326 		return (EINVAL);
6327 	}
6328 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6329 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6330 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6331 	    (controlp == NULL)) {
6332 		/* user does not want the sndrcv ctl */
6333 		filling_sinfo = 0;
6334 	}
6335 	if (psa) {
6336 		from = (struct sockaddr *)sockbuf;
6337 		fromlen = sizeof(sockbuf);
6338 		from->sa_len = 0;
6339 	} else {
6340 		from = NULL;
6341 		fromlen = 0;
6342 	}
6343 
6344 	if (filling_sinfo) {
6345 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6346 	}
6347 	if (flagsp != NULL) {
6348 		flags = *flagsp;
6349 	} else {
6350 		flags = 0;
6351 	}
6352 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags,
6353 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6354 	if (flagsp != NULL) {
6355 		*flagsp = flags;
6356 	}
6357 	if (controlp != NULL) {
6358 		/* copy back the sinfo in a CMSG format */
6359 		if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) {
6360 			*controlp = sctp_build_ctl_nchunk(inp,
6361 			    (struct sctp_sndrcvinfo *)&sinfo);
6362 		} else {
6363 			*controlp = NULL;
6364 		}
6365 	}
6366 	if (psa) {
6367 		/* copy back the address info */
6368 		if (from && from->sa_len) {
6369 			*psa = sodupsockaddr(from, M_NOWAIT);
6370 		} else {
6371 			*psa = NULL;
6372 		}
6373 	}
6374 	return (error);
6375 }
6376 
6377 
6378 
6379 
6380 
6381 int
6382 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6383     int totaddr, int *error)
6384 {
6385 	int added = 0;
6386 	int i;
6387 	struct sctp_inpcb *inp;
6388 	struct sockaddr *sa;
6389 	size_t incr = 0;
6390 #ifdef INET
6391 	struct sockaddr_in *sin;
6392 #endif
6393 #ifdef INET6
6394 	struct sockaddr_in6 *sin6;
6395 #endif
6396 
6397 	sa = addr;
6398 	inp = stcb->sctp_ep;
6399 	*error = 0;
6400 	for (i = 0; i < totaddr; i++) {
6401 		switch (sa->sa_family) {
6402 #ifdef INET
6403 		case AF_INET:
6404 			incr = sizeof(struct sockaddr_in);
6405 			sin = (struct sockaddr_in *)sa;
6406 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6407 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6408 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6409 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6410 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6411 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6412 				*error = EINVAL;
6413 				goto out_now;
6414 			}
6415 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6416 			    SCTP_DONOT_SETSCOPE,
6417 			    SCTP_ADDR_IS_CONFIRMED)) {
6418 				/* assoc gone no un-lock */
6419 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6420 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6421 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6422 				*error = ENOBUFS;
6423 				goto out_now;
6424 			}
6425 			added++;
6426 			break;
6427 #endif
6428 #ifdef INET6
6429 		case AF_INET6:
6430 			incr = sizeof(struct sockaddr_in6);
6431 			sin6 = (struct sockaddr_in6 *)sa;
6432 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6433 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6434 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6435 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6436 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6437 				*error = EINVAL;
6438 				goto out_now;
6439 			}
6440 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6441 			    SCTP_DONOT_SETSCOPE,
6442 			    SCTP_ADDR_IS_CONFIRMED)) {
6443 				/* assoc gone no un-lock */
6444 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6445 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6446 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6447 				*error = ENOBUFS;
6448 				goto out_now;
6449 			}
6450 			added++;
6451 			break;
6452 #endif
6453 		default:
6454 			break;
6455 		}
6456 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6457 	}
6458 out_now:
6459 	return (added);
6460 }
6461 
6462 int
6463 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6464     unsigned int totaddr,
6465     unsigned int *num_v4, unsigned int *num_v6,
6466     unsigned int limit)
6467 {
6468 	struct sockaddr *sa;
6469 	struct sctp_tcb *stcb;
6470 	unsigned int incr, at, i;
6471 
6472 	at = 0;
6473 	sa = addr;
6474 	*num_v6 = *num_v4 = 0;
6475 	/* account and validate addresses */
6476 	if (totaddr == 0) {
6477 		return (EINVAL);
6478 	}
6479 	for (i = 0; i < totaddr; i++) {
6480 		if (at + sizeof(struct sockaddr) > limit) {
6481 			return (EINVAL);
6482 		}
6483 		switch (sa->sa_family) {
6484 #ifdef INET
6485 		case AF_INET:
6486 			incr = (unsigned int)sizeof(struct sockaddr_in);
6487 			if (sa->sa_len != incr) {
6488 				return (EINVAL);
6489 			}
6490 			(*num_v4) += 1;
6491 			break;
6492 #endif
6493 #ifdef INET6
6494 		case AF_INET6:
6495 			{
6496 				struct sockaddr_in6 *sin6;
6497 
6498 				sin6 = (struct sockaddr_in6 *)sa;
6499 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6500 					/* Must be non-mapped for connectx */
6501 					return (EINVAL);
6502 				}
6503 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6504 				if (sa->sa_len != incr) {
6505 					return (EINVAL);
6506 				}
6507 				(*num_v6) += 1;
6508 				break;
6509 			}
6510 #endif
6511 		default:
6512 			return (EINVAL);
6513 		}
6514 		if ((at + incr) > limit) {
6515 			return (EINVAL);
6516 		}
6517 		SCTP_INP_INCR_REF(inp);
6518 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6519 		if (stcb != NULL) {
6520 			SCTP_TCB_UNLOCK(stcb);
6521 			return (EALREADY);
6522 		} else {
6523 			SCTP_INP_DECR_REF(inp);
6524 		}
6525 		at += incr;
6526 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6527 	}
6528 	return (0);
6529 }
6530 
6531 /*
6532  * sctp_bindx(ADD) for one address.
6533  * assumes all arguments are valid/checked by caller.
6534  */
6535 void
6536 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6537     struct sockaddr *sa, sctp_assoc_t assoc_id,
6538     uint32_t vrf_id, int *error, void *p)
6539 {
6540 	struct sockaddr *addr_touse;
6541 #if defined(INET) && defined(INET6)
6542 	struct sockaddr_in sin;
6543 #endif
6544 
6545 	/* see if we're bound all already! */
6546 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6547 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6548 		*error = EINVAL;
6549 		return;
6550 	}
6551 	addr_touse = sa;
6552 #ifdef INET6
6553 	if (sa->sa_family == AF_INET6) {
6554 #ifdef INET
6555 		struct sockaddr_in6 *sin6;
6556 
6557 #endif
6558 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6559 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6560 			*error = EINVAL;
6561 			return;
6562 		}
6563 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6564 			/* can only bind v6 on PF_INET6 sockets */
6565 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6566 			*error = EINVAL;
6567 			return;
6568 		}
6569 #ifdef INET
6570 		sin6 = (struct sockaddr_in6 *)addr_touse;
6571 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6572 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6573 			    SCTP_IPV6_V6ONLY(inp)) {
6574 				/* can't bind v4-mapped on PF_INET sockets */
6575 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6576 				*error = EINVAL;
6577 				return;
6578 			}
6579 			in6_sin6_2_sin(&sin, sin6);
6580 			addr_touse = (struct sockaddr *)&sin;
6581 		}
6582 #endif
6583 	}
6584 #endif
6585 #ifdef INET
6586 	if (sa->sa_family == AF_INET) {
6587 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6588 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6589 			*error = EINVAL;
6590 			return;
6591 		}
6592 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6593 		    SCTP_IPV6_V6ONLY(inp)) {
6594 			/* can't bind v4 on PF_INET sockets */
6595 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6596 			*error = EINVAL;
6597 			return;
6598 		}
6599 	}
6600 #endif
6601 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6602 		if (p == NULL) {
6603 			/* Can't get proc for Net/Open BSD */
6604 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6605 			*error = EINVAL;
6606 			return;
6607 		}
6608 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6609 		return;
6610 	}
6611 	/*
6612 	 * No locks required here since bind and mgmt_ep_sa all do their own
6613 	 * locking. If we do something for the FIX: below we may need to
6614 	 * lock in that case.
6615 	 */
6616 	if (assoc_id == 0) {
6617 		/* add the address */
6618 		struct sctp_inpcb *lep;
6619 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6620 
6621 		/* validate the incoming port */
6622 		if ((lsin->sin_port != 0) &&
6623 		    (lsin->sin_port != inp->sctp_lport)) {
6624 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6625 			*error = EINVAL;
6626 			return;
6627 		} else {
6628 			/* user specified 0 port, set it to existing port */
6629 			lsin->sin_port = inp->sctp_lport;
6630 		}
6631 
6632 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6633 		if (lep != NULL) {
6634 			/*
6635 			 * We must decrement the refcount since we have the
6636 			 * ep already and are binding. No remove going on
6637 			 * here.
6638 			 */
6639 			SCTP_INP_DECR_REF(lep);
6640 		}
6641 		if (lep == inp) {
6642 			/* already bound to it.. ok */
6643 			return;
6644 		} else if (lep == NULL) {
6645 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6646 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6647 			    SCTP_ADD_IP_ADDRESS,
6648 			    vrf_id, NULL);
6649 		} else {
6650 			*error = EADDRINUSE;
6651 		}
6652 		if (*error)
6653 			return;
6654 	} else {
6655 		/*
6656 		 * FIX: decide whether we allow assoc based bindx
6657 		 */
6658 	}
6659 }
6660 
6661 /*
6662  * sctp_bindx(DELETE) for one address.
6663  * assumes all arguments are valid/checked by caller.
6664  */
6665 void
6666 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6667     struct sockaddr *sa, sctp_assoc_t assoc_id,
6668     uint32_t vrf_id, int *error)
6669 {
6670 	struct sockaddr *addr_touse;
6671 #if defined(INET) && defined(INET6)
6672 	struct sockaddr_in sin;
6673 #endif
6674 
6675 	/* see if we're bound all already! */
6676 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6677 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6678 		*error = EINVAL;
6679 		return;
6680 	}
6681 	addr_touse = sa;
6682 #ifdef INET6
6683 	if (sa->sa_family == AF_INET6) {
6684 #ifdef INET
6685 		struct sockaddr_in6 *sin6;
6686 #endif
6687 
6688 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6689 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6690 			*error = EINVAL;
6691 			return;
6692 		}
6693 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6694 			/* can only bind v6 on PF_INET6 sockets */
6695 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6696 			*error = EINVAL;
6697 			return;
6698 		}
6699 #ifdef INET
6700 		sin6 = (struct sockaddr_in6 *)addr_touse;
6701 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6702 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6703 			    SCTP_IPV6_V6ONLY(inp)) {
6704 				/* can't bind mapped-v4 on PF_INET sockets */
6705 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6706 				*error = EINVAL;
6707 				return;
6708 			}
6709 			in6_sin6_2_sin(&sin, sin6);
6710 			addr_touse = (struct sockaddr *)&sin;
6711 		}
6712 #endif
6713 	}
6714 #endif
6715 #ifdef INET
6716 	if (sa->sa_family == AF_INET) {
6717 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6718 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6719 			*error = EINVAL;
6720 			return;
6721 		}
6722 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6723 		    SCTP_IPV6_V6ONLY(inp)) {
6724 			/* can't bind v4 on PF_INET sockets */
6725 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6726 			*error = EINVAL;
6727 			return;
6728 		}
6729 	}
6730 #endif
6731 	/*
6732 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6733 	 * below is ever changed we may need to lock before calling
6734 	 * association level binding.
6735 	 */
6736 	if (assoc_id == 0) {
6737 		/* delete the address */
6738 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6739 		    SCTP_DEL_IP_ADDRESS,
6740 		    vrf_id, NULL);
6741 	} else {
6742 		/*
6743 		 * FIX: decide whether we allow assoc based bindx
6744 		 */
6745 	}
6746 }
6747 
6748 /*
6749  * returns the valid local address count for an assoc, taking into account
6750  * all scoping rules
6751  */
6752 int
6753 sctp_local_addr_count(struct sctp_tcb *stcb)
6754 {
6755 	int loopback_scope;
6756 #if defined(INET)
6757 	int ipv4_local_scope, ipv4_addr_legal;
6758 #endif
6759 #if defined (INET6)
6760 	int local_scope, site_scope, ipv6_addr_legal;
6761 #endif
6762 	struct sctp_vrf *vrf;
6763 	struct sctp_ifn *sctp_ifn;
6764 	struct sctp_ifa *sctp_ifa;
6765 	int count = 0;
6766 
6767 	/* Turn on all the appropriate scopes */
6768 	loopback_scope = stcb->asoc.scope.loopback_scope;
6769 #if defined(INET)
6770 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6771 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6772 #endif
6773 #if defined(INET6)
6774 	local_scope = stcb->asoc.scope.local_scope;
6775 	site_scope = stcb->asoc.scope.site_scope;
6776 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6777 #endif
6778 	SCTP_IPI_ADDR_RLOCK();
6779 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6780 	if (vrf == NULL) {
6781 		/* no vrf, no addresses */
6782 		SCTP_IPI_ADDR_RUNLOCK();
6783 		return (0);
6784 	}
6785 
6786 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6787 		/*
6788 		 * bound all case: go through all ifns on the vrf
6789 		 */
6790 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6791 			if ((loopback_scope == 0) &&
6792 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6793 				continue;
6794 			}
6795 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6796 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6797 					continue;
6798 				switch (sctp_ifa->address.sa.sa_family) {
6799 #ifdef INET
6800 				case AF_INET:
6801 					if (ipv4_addr_legal) {
6802 						struct sockaddr_in *sin;
6803 
6804 						sin = &sctp_ifa->address.sin;
6805 						if (sin->sin_addr.s_addr == 0) {
6806 							/*
6807 							 * skip unspecified
6808 							 * addrs
6809 							 */
6810 							continue;
6811 						}
6812 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6813 						    &sin->sin_addr) != 0) {
6814 							continue;
6815 						}
6816 						if ((ipv4_local_scope == 0) &&
6817 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6818 							continue;
6819 						}
6820 						/* count this one */
6821 						count++;
6822 					} else {
6823 						continue;
6824 					}
6825 					break;
6826 #endif
6827 #ifdef INET6
6828 				case AF_INET6:
6829 					if (ipv6_addr_legal) {
6830 						struct sockaddr_in6 *sin6;
6831 
6832 						sin6 = &sctp_ifa->address.sin6;
6833 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6834 							continue;
6835 						}
6836 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6837 						    &sin6->sin6_addr) != 0) {
6838 							continue;
6839 						}
6840 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6841 							if (local_scope == 0)
6842 								continue;
6843 							if (sin6->sin6_scope_id == 0) {
6844 								if (sa6_recoverscope(sin6) != 0)
6845 									/*
6846 									 *
6847 									 * bad
6848 									 * link
6849 									 *
6850 									 * local
6851 									 *
6852 									 * address
6853 									 */
6854 									continue;
6855 							}
6856 						}
6857 						if ((site_scope == 0) &&
6858 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6859 							continue;
6860 						}
6861 						/* count this one */
6862 						count++;
6863 					}
6864 					break;
6865 #endif
6866 				default:
6867 					/* TSNH */
6868 					break;
6869 				}
6870 			}
6871 		}
6872 	} else {
6873 		/*
6874 		 * subset bound case
6875 		 */
6876 		struct sctp_laddr *laddr;
6877 
6878 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6879 		    sctp_nxt_addr) {
6880 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6881 				continue;
6882 			}
6883 			/* count this one */
6884 			count++;
6885 		}
6886 	}
6887 	SCTP_IPI_ADDR_RUNLOCK();
6888 	return (count);
6889 }
6890 
6891 #if defined(SCTP_LOCAL_TRACE_BUF)
6892 
6893 void
6894 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6895 {
6896 	uint32_t saveindex, newindex;
6897 
6898 	do {
6899 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6900 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6901 			newindex = 1;
6902 		} else {
6903 			newindex = saveindex + 1;
6904 		}
6905 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6906 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6907 		saveindex = 0;
6908 	}
6909 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6910 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6911 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6912 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6913 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6914 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6915 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6916 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6917 }
6918 
6919 #endif
6920 static void
6921 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6922     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6923 {
6924 	struct ip *iph;
6925 #ifdef INET6
6926 	struct ip6_hdr *ip6;
6927 #endif
6928 	struct mbuf *sp, *last;
6929 	struct udphdr *uhdr;
6930 	uint16_t port;
6931 
6932 	if ((m->m_flags & M_PKTHDR) == 0) {
6933 		/* Can't handle one that is not a pkt hdr */
6934 		goto out;
6935 	}
6936 	/* Pull the src port */
6937 	iph = mtod(m, struct ip *);
6938 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6939 	port = uhdr->uh_sport;
6940 	/*
6941 	 * Split out the mbuf chain. Leave the IP header in m, place the
6942 	 * rest in the sp.
6943 	 */
6944 	sp = m_split(m, off, M_NOWAIT);
6945 	if (sp == NULL) {
6946 		/* Gak, drop packet, we can't do a split */
6947 		goto out;
6948 	}
6949 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6950 		/* Gak, packet can't have an SCTP header in it - too small */
6951 		m_freem(sp);
6952 		goto out;
6953 	}
6954 	/* Now pull up the UDP header and SCTP header together */
6955 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6956 	if (sp == NULL) {
6957 		/* Gak pullup failed */
6958 		goto out;
6959 	}
6960 	/* Trim out the UDP header */
6961 	m_adj(sp, sizeof(struct udphdr));
6962 
6963 	/* Now reconstruct the mbuf chain */
6964 	for (last = m; last->m_next; last = last->m_next);
6965 	last->m_next = sp;
6966 	m->m_pkthdr.len += sp->m_pkthdr.len;
6967 	/*
6968 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6969 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6970 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6971 	 * SCTP checksum. Therefore, clear the bit.
6972 	 */
6973 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6974 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6975 	    m->m_pkthdr.len,
6976 	    if_name(m->m_pkthdr.rcvif),
6977 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6978 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6979 	iph = mtod(m, struct ip *);
6980 	switch (iph->ip_v) {
6981 #ifdef INET
6982 	case IPVERSION:
6983 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6984 		sctp_input_with_port(m, off, port);
6985 		break;
6986 #endif
6987 #ifdef INET6
6988 	case IPV6_VERSION >> 4:
6989 		ip6 = mtod(m, struct ip6_hdr *);
6990 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6991 		sctp6_input_with_port(&m, &off, port);
6992 		break;
6993 #endif
6994 	default:
6995 		goto out;
6996 		break;
6997 	}
6998 	return;
6999 out:
7000 	m_freem(m);
7001 }
7002 
7003 #ifdef INET
7004 static void
7005 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
7006 {
7007 	struct ip *outer_ip, *inner_ip;
7008 	struct sctphdr *sh;
7009 	struct icmp *icmp;
7010 	struct udphdr *udp;
7011 	struct sctp_inpcb *inp;
7012 	struct sctp_tcb *stcb;
7013 	struct sctp_nets *net;
7014 	struct sctp_init_chunk *ch;
7015 	struct sockaddr_in src, dst;
7016 	uint8_t type, code;
7017 
7018 	inner_ip = (struct ip *)vip;
7019 	icmp = (struct icmp *)((caddr_t)inner_ip -
7020 	    (sizeof(struct icmp) - sizeof(struct ip)));
7021 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
7022 	if (ntohs(outer_ip->ip_len) <
7023 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
7024 		return;
7025 	}
7026 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
7027 	sh = (struct sctphdr *)(udp + 1);
7028 	memset(&src, 0, sizeof(struct sockaddr_in));
7029 	src.sin_family = AF_INET;
7030 	src.sin_len = sizeof(struct sockaddr_in);
7031 	src.sin_port = sh->src_port;
7032 	src.sin_addr = inner_ip->ip_src;
7033 	memset(&dst, 0, sizeof(struct sockaddr_in));
7034 	dst.sin_family = AF_INET;
7035 	dst.sin_len = sizeof(struct sockaddr_in);
7036 	dst.sin_port = sh->dest_port;
7037 	dst.sin_addr = inner_ip->ip_dst;
7038 	/*
7039 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
7040 	 * holds our local endpoint address. Thus we reverse the dst and the
7041 	 * src in the lookup.
7042 	 */
7043 	inp = NULL;
7044 	net = NULL;
7045 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7046 	    (struct sockaddr *)&src,
7047 	    &inp, &net, 1,
7048 	    SCTP_DEFAULT_VRFID);
7049 	if ((stcb != NULL) &&
7050 	    (net != NULL) &&
7051 	    (inp != NULL)) {
7052 		/* Check the UDP port numbers */
7053 		if ((udp->uh_dport != net->port) ||
7054 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7055 			SCTP_TCB_UNLOCK(stcb);
7056 			return;
7057 		}
7058 		/* Check the verification tag */
7059 		if (ntohl(sh->v_tag) != 0) {
7060 			/*
7061 			 * This must be the verification tag used for
7062 			 * sending out packets. We don't consider packets
7063 			 * reflecting the verification tag.
7064 			 */
7065 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
7066 				SCTP_TCB_UNLOCK(stcb);
7067 				return;
7068 			}
7069 		} else {
7070 			if (ntohs(outer_ip->ip_len) >=
7071 			    sizeof(struct ip) +
7072 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
7073 				/*
7074 				 * In this case we can check if we got an
7075 				 * INIT chunk and if the initiate tag
7076 				 * matches.
7077 				 */
7078 				ch = (struct sctp_init_chunk *)(sh + 1);
7079 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
7080 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
7081 					SCTP_TCB_UNLOCK(stcb);
7082 					return;
7083 				}
7084 			} else {
7085 				SCTP_TCB_UNLOCK(stcb);
7086 				return;
7087 			}
7088 		}
7089 		type = icmp->icmp_type;
7090 		code = icmp->icmp_code;
7091 		if ((type == ICMP_UNREACH) &&
7092 		    (code == ICMP_UNREACH_PORT)) {
7093 			code = ICMP_UNREACH_PROTOCOL;
7094 		}
7095 		sctp_notify(inp, stcb, net, type, code,
7096 		    ntohs(inner_ip->ip_len),
7097 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
7098 	} else {
7099 		if ((stcb == NULL) && (inp != NULL)) {
7100 			/* reduce ref-count */
7101 			SCTP_INP_WLOCK(inp);
7102 			SCTP_INP_DECR_REF(inp);
7103 			SCTP_INP_WUNLOCK(inp);
7104 		}
7105 		if (stcb) {
7106 			SCTP_TCB_UNLOCK(stcb);
7107 		}
7108 	}
7109 	return;
7110 }
7111 #endif
7112 
7113 #ifdef INET6
7114 static void
7115 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7116 {
7117 	struct ip6ctlparam *ip6cp;
7118 	struct sctp_inpcb *inp;
7119 	struct sctp_tcb *stcb;
7120 	struct sctp_nets *net;
7121 	struct sctphdr sh;
7122 	struct udphdr udp;
7123 	struct sockaddr_in6 src, dst;
7124 	uint8_t type, code;
7125 
7126 	ip6cp = (struct ip6ctlparam *)d;
7127 	/*
7128 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7129 	 */
7130 	if (ip6cp->ip6c_m == NULL) {
7131 		return;
7132 	}
7133 	/*
7134 	 * Check if we can safely examine the ports and the verification tag
7135 	 * of the SCTP common header.
7136 	 */
7137 	if (ip6cp->ip6c_m->m_pkthdr.len <
7138 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7139 		return;
7140 	}
7141 	/* Copy out the UDP header. */
7142 	memset(&udp, 0, sizeof(struct udphdr));
7143 	m_copydata(ip6cp->ip6c_m,
7144 	    ip6cp->ip6c_off,
7145 	    sizeof(struct udphdr),
7146 	    (caddr_t)&udp);
7147 	/* Copy out the port numbers and the verification tag. */
7148 	memset(&sh, 0, sizeof(struct sctphdr));
7149 	m_copydata(ip6cp->ip6c_m,
7150 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7151 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7152 	    (caddr_t)&sh);
7153 	memset(&src, 0, sizeof(struct sockaddr_in6));
7154 	src.sin6_family = AF_INET6;
7155 	src.sin6_len = sizeof(struct sockaddr_in6);
7156 	src.sin6_port = sh.src_port;
7157 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7158 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7159 		return;
7160 	}
7161 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7162 	dst.sin6_family = AF_INET6;
7163 	dst.sin6_len = sizeof(struct sockaddr_in6);
7164 	dst.sin6_port = sh.dest_port;
7165 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7166 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7167 		return;
7168 	}
7169 	inp = NULL;
7170 	net = NULL;
7171 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7172 	    (struct sockaddr *)&src,
7173 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7174 	if ((stcb != NULL) &&
7175 	    (net != NULL) &&
7176 	    (inp != NULL)) {
7177 		/* Check the UDP port numbers */
7178 		if ((udp.uh_dport != net->port) ||
7179 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7180 			SCTP_TCB_UNLOCK(stcb);
7181 			return;
7182 		}
7183 		/* Check the verification tag */
7184 		if (ntohl(sh.v_tag) != 0) {
7185 			/*
7186 			 * This must be the verification tag used for
7187 			 * sending out packets. We don't consider packets
7188 			 * reflecting the verification tag.
7189 			 */
7190 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7191 				SCTP_TCB_UNLOCK(stcb);
7192 				return;
7193 			}
7194 		} else {
7195 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7196 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7197 			    sizeof(struct sctphdr) +
7198 			    sizeof(struct sctp_chunkhdr) +
7199 			    offsetof(struct sctp_init, a_rwnd)) {
7200 				/*
7201 				 * In this case we can check if we got an
7202 				 * INIT chunk and if the initiate tag
7203 				 * matches.
7204 				 */
7205 				uint32_t initiate_tag;
7206 				uint8_t chunk_type;
7207 
7208 				m_copydata(ip6cp->ip6c_m,
7209 				    ip6cp->ip6c_off +
7210 				    sizeof(struct udphdr) +
7211 				    sizeof(struct sctphdr),
7212 				    sizeof(uint8_t),
7213 				    (caddr_t)&chunk_type);
7214 				m_copydata(ip6cp->ip6c_m,
7215 				    ip6cp->ip6c_off +
7216 				    sizeof(struct udphdr) +
7217 				    sizeof(struct sctphdr) +
7218 				    sizeof(struct sctp_chunkhdr),
7219 				    sizeof(uint32_t),
7220 				    (caddr_t)&initiate_tag);
7221 				if ((chunk_type != SCTP_INITIATION) ||
7222 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7223 					SCTP_TCB_UNLOCK(stcb);
7224 					return;
7225 				}
7226 			} else {
7227 				SCTP_TCB_UNLOCK(stcb);
7228 				return;
7229 			}
7230 		}
7231 		type = ip6cp->ip6c_icmp6->icmp6_type;
7232 		code = ip6cp->ip6c_icmp6->icmp6_code;
7233 		if ((type == ICMP6_DST_UNREACH) &&
7234 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7235 			type = ICMP6_PARAM_PROB;
7236 			code = ICMP6_PARAMPROB_NEXTHEADER;
7237 		}
7238 		sctp6_notify(inp, stcb, net, type, code,
7239 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7240 	} else {
7241 		if ((stcb == NULL) && (inp != NULL)) {
7242 			/* reduce inp's ref-count */
7243 			SCTP_INP_WLOCK(inp);
7244 			SCTP_INP_DECR_REF(inp);
7245 			SCTP_INP_WUNLOCK(inp);
7246 		}
7247 		if (stcb) {
7248 			SCTP_TCB_UNLOCK(stcb);
7249 		}
7250 	}
7251 }
7252 #endif
7253 
7254 void
7255 sctp_over_udp_stop(void)
7256 {
7257 	/*
7258 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7259 	 * for writting!
7260 	 */
7261 #ifdef INET
7262 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7263 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7264 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7265 	}
7266 #endif
7267 #ifdef INET6
7268 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7269 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7270 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7271 	}
7272 #endif
7273 }
7274 
7275 int
7276 sctp_over_udp_start(void)
7277 {
7278 	uint16_t port;
7279 	int ret;
7280 #ifdef INET
7281 	struct sockaddr_in sin;
7282 #endif
7283 #ifdef INET6
7284 	struct sockaddr_in6 sin6;
7285 #endif
7286 	/*
7287 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7288 	 * for writting!
7289 	 */
7290 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7291 	if (ntohs(port) == 0) {
7292 		/* Must have a port set */
7293 		return (EINVAL);
7294 	}
7295 #ifdef INET
7296 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7297 		/* Already running -- must stop first */
7298 		return (EALREADY);
7299 	}
7300 #endif
7301 #ifdef INET6
7302 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7303 		/* Already running -- must stop first */
7304 		return (EALREADY);
7305 	}
7306 #endif
7307 #ifdef INET
7308 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7309 	    SOCK_DGRAM, IPPROTO_UDP,
7310 	    curthread->td_ucred, curthread))) {
7311 		sctp_over_udp_stop();
7312 		return (ret);
7313 	}
7314 	/* Call the special UDP hook. */
7315 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7316 	    sctp_recv_udp_tunneled_packet,
7317 	    sctp_recv_icmp_tunneled_packet,
7318 	    NULL))) {
7319 		sctp_over_udp_stop();
7320 		return (ret);
7321 	}
7322 	/* Ok, we have a socket, bind it to the port. */
7323 	memset(&sin, 0, sizeof(struct sockaddr_in));
7324 	sin.sin_len = sizeof(struct sockaddr_in);
7325 	sin.sin_family = AF_INET;
7326 	sin.sin_port = htons(port);
7327 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7328 	    (struct sockaddr *)&sin, curthread))) {
7329 		sctp_over_udp_stop();
7330 		return (ret);
7331 	}
7332 #endif
7333 #ifdef INET6
7334 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7335 	    SOCK_DGRAM, IPPROTO_UDP,
7336 	    curthread->td_ucred, curthread))) {
7337 		sctp_over_udp_stop();
7338 		return (ret);
7339 	}
7340 	/* Call the special UDP hook. */
7341 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7342 	    sctp_recv_udp_tunneled_packet,
7343 	    sctp_recv_icmp6_tunneled_packet,
7344 	    NULL))) {
7345 		sctp_over_udp_stop();
7346 		return (ret);
7347 	}
7348 	/* Ok, we have a socket, bind it to the port. */
7349 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7350 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7351 	sin6.sin6_family = AF_INET6;
7352 	sin6.sin6_port = htons(port);
7353 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7354 	    (struct sockaddr *)&sin6, curthread))) {
7355 		sctp_over_udp_stop();
7356 		return (ret);
7357 	}
7358 #endif
7359 	return (0);
7360 }
7361 
7362 /*
7363  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7364  * If all arguments are zero, zero is returned.
7365  */
7366 uint32_t
7367 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7368 {
7369 	if (mtu1 > 0) {
7370 		if (mtu2 > 0) {
7371 			if (mtu3 > 0) {
7372 				return (min(mtu1, min(mtu2, mtu3)));
7373 			} else {
7374 				return (min(mtu1, mtu2));
7375 			}
7376 		} else {
7377 			if (mtu3 > 0) {
7378 				return (min(mtu1, mtu3));
7379 			} else {
7380 				return (mtu1);
7381 			}
7382 		}
7383 	} else {
7384 		if (mtu2 > 0) {
7385 			if (mtu3 > 0) {
7386 				return (min(mtu2, mtu3));
7387 			} else {
7388 				return (mtu2);
7389 			}
7390 		} else {
7391 			return (mtu3);
7392 		}
7393 	}
7394 }
7395 
7396 void
7397 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7398 {
7399 	struct in_conninfo inc;
7400 
7401 	memset(&inc, 0, sizeof(struct in_conninfo));
7402 	inc.inc_fibnum = fibnum;
7403 	switch (addr->sa.sa_family) {
7404 #ifdef INET
7405 	case AF_INET:
7406 		inc.inc_faddr = addr->sin.sin_addr;
7407 		break;
7408 #endif
7409 #ifdef INET6
7410 	case AF_INET6:
7411 		inc.inc_flags |= INC_ISIPV6;
7412 		inc.inc6_faddr = addr->sin6.sin6_addr;
7413 		break;
7414 #endif
7415 	default:
7416 		return;
7417 	}
7418 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7419 }
7420 
7421 uint32_t
7422 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7423 {
7424 	struct in_conninfo inc;
7425 
7426 	memset(&inc, 0, sizeof(struct in_conninfo));
7427 	inc.inc_fibnum = fibnum;
7428 	switch (addr->sa.sa_family) {
7429 #ifdef INET
7430 	case AF_INET:
7431 		inc.inc_faddr = addr->sin.sin_addr;
7432 		break;
7433 #endif
7434 #ifdef INET6
7435 	case AF_INET6:
7436 		inc.inc_flags |= INC_ISIPV6;
7437 		inc.inc6_faddr = addr->sin6.sin6_addr;
7438 		break;
7439 #endif
7440 	default:
7441 		return (0);
7442 	}
7443 	return ((uint32_t)tcp_hc_getmtu(&inc));
7444 }
7445 
7446 void
7447 sctp_set_state(struct sctp_tcb *stcb, int new_state)
7448 {
7449 #if defined(KDTRACE_HOOKS)
7450 	int old_state = stcb->asoc.state;
7451 #endif
7452 
7453 	KASSERT((new_state & ~SCTP_STATE_MASK) == 0,
7454 	    ("sctp_set_state: Can't set substate (new_state = %x)",
7455 	    new_state));
7456 	stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state;
7457 	if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) ||
7458 	    (new_state == SCTP_STATE_SHUTDOWN_SENT) ||
7459 	    (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7460 		SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
7461 	}
7462 #if defined(KDTRACE_HOOKS)
7463 	if (((old_state & SCTP_STATE_MASK) != new_state) &&
7464 	    !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) &&
7465 	    (new_state == SCTP_STATE_INUSE))) {
7466 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7467 	}
7468 #endif
7469 }
7470 
7471 void
7472 sctp_add_substate(struct sctp_tcb *stcb, int substate)
7473 {
7474 #if defined(KDTRACE_HOOKS)
7475 	int old_state = stcb->asoc.state;
7476 #endif
7477 
7478 	KASSERT((substate & SCTP_STATE_MASK) == 0,
7479 	    ("sctp_add_substate: Can't set state (substate = %x)",
7480 	    substate));
7481 	stcb->asoc.state |= substate;
7482 #if defined(KDTRACE_HOOKS)
7483 	if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) &&
7484 	    ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) ||
7485 	    ((substate & SCTP_STATE_SHUTDOWN_PENDING) &&
7486 	    ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) {
7487 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7488 	}
7489 #endif
7490 }
7491