xref: /freebsd/sys/netinet/sctputil.c (revision c1b2af731bbdd6f37d0f75386acab31b5ad86090)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #include <netinet6/sctp6_var.h>
45 #endif
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_output.h>
48 #include <netinet/sctp_uio.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_auth.h>
52 #include <netinet/sctp_asconf.h>
53 #include <netinet/sctp_bsd_addr.h>
54 #include <netinet/sctp_kdtrace.h>
55 #if defined(INET6) || defined(INET)
56 #include <netinet/tcp_var.h>
57 #endif
58 #include <netinet/udp.h>
59 #include <netinet/udp_var.h>
60 #include <sys/proc.h>
61 #ifdef INET6
62 #include <netinet/icmp6.h>
63 #endif
64 
65 
66 #ifndef KTR_SCTP
67 #define KTR_SCTP KTR_SUBSYS
68 #endif
69 
70 extern const struct sctp_cc_functions sctp_cc_functions[];
71 extern const struct sctp_ss_functions sctp_ss_functions[];
72 
73 void
74 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
75 {
76 #if defined(SCTP_LOCAL_TRACE_BUF)
77 	struct sctp_cwnd_log sctp_clog;
78 
79 	sctp_clog.x.sb.stcb = stcb;
80 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
81 	if (stcb)
82 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
83 	else
84 		sctp_clog.x.sb.stcb_sbcc = 0;
85 	sctp_clog.x.sb.incr = incr;
86 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
87 	    SCTP_LOG_EVENT_SB,
88 	    from,
89 	    sctp_clog.x.misc.log1,
90 	    sctp_clog.x.misc.log2,
91 	    sctp_clog.x.misc.log3,
92 	    sctp_clog.x.misc.log4);
93 #endif
94 }
95 
96 void
97 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
98 {
99 #if defined(SCTP_LOCAL_TRACE_BUF)
100 	struct sctp_cwnd_log sctp_clog;
101 
102 	sctp_clog.x.close.inp = (void *)inp;
103 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
104 	if (stcb) {
105 		sctp_clog.x.close.stcb = (void *)stcb;
106 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
107 	} else {
108 		sctp_clog.x.close.stcb = 0;
109 		sctp_clog.x.close.state = 0;
110 	}
111 	sctp_clog.x.close.loc = loc;
112 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
113 	    SCTP_LOG_EVENT_CLOSE,
114 	    0,
115 	    sctp_clog.x.misc.log1,
116 	    sctp_clog.x.misc.log2,
117 	    sctp_clog.x.misc.log3,
118 	    sctp_clog.x.misc.log4);
119 #endif
120 }
121 
122 void
123 rto_logging(struct sctp_nets *net, int from)
124 {
125 #if defined(SCTP_LOCAL_TRACE_BUF)
126 	struct sctp_cwnd_log sctp_clog;
127 
128 	memset(&sctp_clog, 0, sizeof(sctp_clog));
129 	sctp_clog.x.rto.net = (void *)net;
130 	sctp_clog.x.rto.rtt = net->rtt / 1000;
131 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
132 	    SCTP_LOG_EVENT_RTT,
133 	    from,
134 	    sctp_clog.x.misc.log1,
135 	    sctp_clog.x.misc.log2,
136 	    sctp_clog.x.misc.log3,
137 	    sctp_clog.x.misc.log4);
138 #endif
139 }
140 
141 void
142 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
143 {
144 #if defined(SCTP_LOCAL_TRACE_BUF)
145 	struct sctp_cwnd_log sctp_clog;
146 
147 	sctp_clog.x.strlog.stcb = stcb;
148 	sctp_clog.x.strlog.n_tsn = tsn;
149 	sctp_clog.x.strlog.n_sseq = sseq;
150 	sctp_clog.x.strlog.e_tsn = 0;
151 	sctp_clog.x.strlog.e_sseq = 0;
152 	sctp_clog.x.strlog.strm = stream;
153 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
154 	    SCTP_LOG_EVENT_STRM,
155 	    from,
156 	    sctp_clog.x.misc.log1,
157 	    sctp_clog.x.misc.log2,
158 	    sctp_clog.x.misc.log3,
159 	    sctp_clog.x.misc.log4);
160 #endif
161 }
162 
163 void
164 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
165 {
166 #if defined(SCTP_LOCAL_TRACE_BUF)
167 	struct sctp_cwnd_log sctp_clog;
168 
169 	sctp_clog.x.nagle.stcb = (void *)stcb;
170 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
171 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
172 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
173 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
174 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
175 	    SCTP_LOG_EVENT_NAGLE,
176 	    action,
177 	    sctp_clog.x.misc.log1,
178 	    sctp_clog.x.misc.log2,
179 	    sctp_clog.x.misc.log3,
180 	    sctp_clog.x.misc.log4);
181 #endif
182 }
183 
184 void
185 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
186 {
187 #if defined(SCTP_LOCAL_TRACE_BUF)
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	sctp_clog.x.sack.cumack = cumack;
191 	sctp_clog.x.sack.oldcumack = old_cumack;
192 	sctp_clog.x.sack.tsn = tsn;
193 	sctp_clog.x.sack.numGaps = gaps;
194 	sctp_clog.x.sack.numDups = dups;
195 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
196 	    SCTP_LOG_EVENT_SACK,
197 	    from,
198 	    sctp_clog.x.misc.log1,
199 	    sctp_clog.x.misc.log2,
200 	    sctp_clog.x.misc.log3,
201 	    sctp_clog.x.misc.log4);
202 #endif
203 }
204 
205 void
206 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
207 {
208 #if defined(SCTP_LOCAL_TRACE_BUF)
209 	struct sctp_cwnd_log sctp_clog;
210 
211 	memset(&sctp_clog, 0, sizeof(sctp_clog));
212 	sctp_clog.x.map.base = map;
213 	sctp_clog.x.map.cum = cum;
214 	sctp_clog.x.map.high = high;
215 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
216 	    SCTP_LOG_EVENT_MAP,
217 	    from,
218 	    sctp_clog.x.misc.log1,
219 	    sctp_clog.x.misc.log2,
220 	    sctp_clog.x.misc.log3,
221 	    sctp_clog.x.misc.log4);
222 #endif
223 }
224 
225 void
226 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
227 {
228 #if defined(SCTP_LOCAL_TRACE_BUF)
229 	struct sctp_cwnd_log sctp_clog;
230 
231 	memset(&sctp_clog, 0, sizeof(sctp_clog));
232 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
233 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
234 	sctp_clog.x.fr.tsn = tsn;
235 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
236 	    SCTP_LOG_EVENT_FR,
237 	    from,
238 	    sctp_clog.x.misc.log1,
239 	    sctp_clog.x.misc.log2,
240 	    sctp_clog.x.misc.log3,
241 	    sctp_clog.x.misc.log4);
242 #endif
243 }
244 
245 #ifdef SCTP_MBUF_LOGGING
246 void
247 sctp_log_mb(struct mbuf *m, int from)
248 {
249 #if defined(SCTP_LOCAL_TRACE_BUF)
250 	struct sctp_cwnd_log sctp_clog;
251 
252 	sctp_clog.x.mb.mp = m;
253 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
254 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
255 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
256 	if (SCTP_BUF_IS_EXTENDED(m)) {
257 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
258 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
259 	} else {
260 		sctp_clog.x.mb.ext = 0;
261 		sctp_clog.x.mb.refcnt = 0;
262 	}
263 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
264 	    SCTP_LOG_EVENT_MBUF,
265 	    from,
266 	    sctp_clog.x.misc.log1,
267 	    sctp_clog.x.misc.log2,
268 	    sctp_clog.x.misc.log3,
269 	    sctp_clog.x.misc.log4);
270 #endif
271 }
272 
273 void
274 sctp_log_mbc(struct mbuf *m, int from)
275 {
276 	struct mbuf *mat;
277 
278 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
279 		sctp_log_mb(mat, from);
280 	}
281 }
282 #endif
283 
284 void
285 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
286 {
287 #if defined(SCTP_LOCAL_TRACE_BUF)
288 	struct sctp_cwnd_log sctp_clog;
289 
290 	if (control == NULL) {
291 		SCTP_PRINTF("Gak log of NULL?\n");
292 		return;
293 	}
294 	sctp_clog.x.strlog.stcb = control->stcb;
295 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
296 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
297 	sctp_clog.x.strlog.strm = control->sinfo_stream;
298 	if (poschk != NULL) {
299 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
300 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
301 	} else {
302 		sctp_clog.x.strlog.e_tsn = 0;
303 		sctp_clog.x.strlog.e_sseq = 0;
304 	}
305 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
306 	    SCTP_LOG_EVENT_STRM,
307 	    from,
308 	    sctp_clog.x.misc.log1,
309 	    sctp_clog.x.misc.log2,
310 	    sctp_clog.x.misc.log3,
311 	    sctp_clog.x.misc.log4);
312 #endif
313 }
314 
315 void
316 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
317 {
318 #if defined(SCTP_LOCAL_TRACE_BUF)
319 	struct sctp_cwnd_log sctp_clog;
320 
321 	sctp_clog.x.cwnd.net = net;
322 	if (stcb->asoc.send_queue_cnt > 255)
323 		sctp_clog.x.cwnd.cnt_in_send = 255;
324 	else
325 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
326 	if (stcb->asoc.stream_queue_cnt > 255)
327 		sctp_clog.x.cwnd.cnt_in_str = 255;
328 	else
329 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
330 
331 	if (net) {
332 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
333 		sctp_clog.x.cwnd.inflight = net->flight_size;
334 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
335 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
336 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
337 	}
338 	if (SCTP_CWNDLOG_PRESEND == from) {
339 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
340 	}
341 	sctp_clog.x.cwnd.cwnd_augment = augment;
342 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
343 	    SCTP_LOG_EVENT_CWND,
344 	    from,
345 	    sctp_clog.x.misc.log1,
346 	    sctp_clog.x.misc.log2,
347 	    sctp_clog.x.misc.log3,
348 	    sctp_clog.x.misc.log4);
349 #endif
350 }
351 
352 void
353 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
354 {
355 #if defined(SCTP_LOCAL_TRACE_BUF)
356 	struct sctp_cwnd_log sctp_clog;
357 
358 	memset(&sctp_clog, 0, sizeof(sctp_clog));
359 	if (inp) {
360 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
361 
362 	} else {
363 		sctp_clog.x.lock.sock = (void *)NULL;
364 	}
365 	sctp_clog.x.lock.inp = (void *)inp;
366 	if (stcb) {
367 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
368 	} else {
369 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
370 	}
371 	if (inp) {
372 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
373 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
374 	} else {
375 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
376 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
377 	}
378 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
379 	if (inp && (inp->sctp_socket)) {
380 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
381 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
382 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
383 	} else {
384 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
385 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
386 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
387 	}
388 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
389 	    SCTP_LOG_LOCK_EVENT,
390 	    from,
391 	    sctp_clog.x.misc.log1,
392 	    sctp_clog.x.misc.log2,
393 	    sctp_clog.x.misc.log3,
394 	    sctp_clog.x.misc.log4);
395 #endif
396 }
397 
398 void
399 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
400 {
401 #if defined(SCTP_LOCAL_TRACE_BUF)
402 	struct sctp_cwnd_log sctp_clog;
403 
404 	memset(&sctp_clog, 0, sizeof(sctp_clog));
405 	sctp_clog.x.cwnd.net = net;
406 	sctp_clog.x.cwnd.cwnd_new_value = error;
407 	sctp_clog.x.cwnd.inflight = net->flight_size;
408 	sctp_clog.x.cwnd.cwnd_augment = burst;
409 	if (stcb->asoc.send_queue_cnt > 255)
410 		sctp_clog.x.cwnd.cnt_in_send = 255;
411 	else
412 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
413 	if (stcb->asoc.stream_queue_cnt > 255)
414 		sctp_clog.x.cwnd.cnt_in_str = 255;
415 	else
416 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
417 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
418 	    SCTP_LOG_EVENT_MAXBURST,
419 	    from,
420 	    sctp_clog.x.misc.log1,
421 	    sctp_clog.x.misc.log2,
422 	    sctp_clog.x.misc.log3,
423 	    sctp_clog.x.misc.log4);
424 #endif
425 }
426 
427 void
428 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
429 {
430 #if defined(SCTP_LOCAL_TRACE_BUF)
431 	struct sctp_cwnd_log sctp_clog;
432 
433 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
434 	sctp_clog.x.rwnd.send_size = snd_size;
435 	sctp_clog.x.rwnd.overhead = overhead;
436 	sctp_clog.x.rwnd.new_rwnd = 0;
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_EVENT_RWND,
439 	    from,
440 	    sctp_clog.x.misc.log1,
441 	    sctp_clog.x.misc.log2,
442 	    sctp_clog.x.misc.log3,
443 	    sctp_clog.x.misc.log4);
444 #endif
445 }
446 
447 void
448 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
449 {
450 #if defined(SCTP_LOCAL_TRACE_BUF)
451 	struct sctp_cwnd_log sctp_clog;
452 
453 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
454 	sctp_clog.x.rwnd.send_size = flight_size;
455 	sctp_clog.x.rwnd.overhead = overhead;
456 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
457 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
458 	    SCTP_LOG_EVENT_RWND,
459 	    from,
460 	    sctp_clog.x.misc.log1,
461 	    sctp_clog.x.misc.log2,
462 	    sctp_clog.x.misc.log3,
463 	    sctp_clog.x.misc.log4);
464 #endif
465 }
466 
467 #ifdef SCTP_MBCNT_LOGGING
468 static void
469 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
470 {
471 #if defined(SCTP_LOCAL_TRACE_BUF)
472 	struct sctp_cwnd_log sctp_clog;
473 
474 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
475 	sctp_clog.x.mbcnt.size_change = book;
476 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
477 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
478 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
479 	    SCTP_LOG_EVENT_MBCNT,
480 	    from,
481 	    sctp_clog.x.misc.log1,
482 	    sctp_clog.x.misc.log2,
483 	    sctp_clog.x.misc.log3,
484 	    sctp_clog.x.misc.log4);
485 #endif
486 }
487 #endif
488 
489 void
490 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
491 {
492 #if defined(SCTP_LOCAL_TRACE_BUF)
493 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
494 	    SCTP_LOG_MISC_EVENT,
495 	    from,
496 	    a, b, c, d);
497 #endif
498 }
499 
500 void
501 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
502 {
503 #if defined(SCTP_LOCAL_TRACE_BUF)
504 	struct sctp_cwnd_log sctp_clog;
505 
506 	sctp_clog.x.wake.stcb = (void *)stcb;
507 	sctp_clog.x.wake.wake_cnt = wake_cnt;
508 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
509 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
510 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
511 
512 	if (stcb->asoc.stream_queue_cnt < 0xff)
513 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
514 	else
515 		sctp_clog.x.wake.stream_qcnt = 0xff;
516 
517 	if (stcb->asoc.chunks_on_out_queue < 0xff)
518 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
519 	else
520 		sctp_clog.x.wake.chunks_on_oque = 0xff;
521 
522 	sctp_clog.x.wake.sctpflags = 0;
523 	/* set in the defered mode stuff */
524 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
525 		sctp_clog.x.wake.sctpflags |= 1;
526 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
527 		sctp_clog.x.wake.sctpflags |= 2;
528 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
529 		sctp_clog.x.wake.sctpflags |= 4;
530 	/* what about the sb */
531 	if (stcb->sctp_socket) {
532 		struct socket *so = stcb->sctp_socket;
533 
534 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
535 	} else {
536 		sctp_clog.x.wake.sbflags = 0xff;
537 	}
538 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
539 	    SCTP_LOG_EVENT_WAKE,
540 	    from,
541 	    sctp_clog.x.misc.log1,
542 	    sctp_clog.x.misc.log2,
543 	    sctp_clog.x.misc.log3,
544 	    sctp_clog.x.misc.log4);
545 #endif
546 }
547 
548 void
549 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen)
550 {
551 #if defined(SCTP_LOCAL_TRACE_BUF)
552 	struct sctp_cwnd_log sctp_clog;
553 
554 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
555 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
556 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
557 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
558 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
559 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
560 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
561 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
562 	    SCTP_LOG_EVENT_BLOCK,
563 	    from,
564 	    sctp_clog.x.misc.log1,
565 	    sctp_clog.x.misc.log2,
566 	    sctp_clog.x.misc.log3,
567 	    sctp_clog.x.misc.log4);
568 #endif
569 }
570 
571 int
572 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
573 {
574 	/* May need to fix this if ktrdump does not work */
575 	return (0);
576 }
577 
578 #ifdef SCTP_AUDITING_ENABLED
579 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
580 static int sctp_audit_indx = 0;
581 
582 static
583 void
584 sctp_print_audit_report(void)
585 {
586 	int i;
587 	int cnt;
588 
589 	cnt = 0;
590 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
591 		if ((sctp_audit_data[i][0] == 0xe0) &&
592 		    (sctp_audit_data[i][1] == 0x01)) {
593 			cnt = 0;
594 			SCTP_PRINTF("\n");
595 		} else if (sctp_audit_data[i][0] == 0xf0) {
596 			cnt = 0;
597 			SCTP_PRINTF("\n");
598 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
599 		    (sctp_audit_data[i][1] == 0x01)) {
600 			SCTP_PRINTF("\n");
601 			cnt = 0;
602 		}
603 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
604 		    (uint32_t)sctp_audit_data[i][1]);
605 		cnt++;
606 		if ((cnt % 14) == 0)
607 			SCTP_PRINTF("\n");
608 	}
609 	for (i = 0; i < sctp_audit_indx; i++) {
610 		if ((sctp_audit_data[i][0] == 0xe0) &&
611 		    (sctp_audit_data[i][1] == 0x01)) {
612 			cnt = 0;
613 			SCTP_PRINTF("\n");
614 		} else if (sctp_audit_data[i][0] == 0xf0) {
615 			cnt = 0;
616 			SCTP_PRINTF("\n");
617 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
618 		    (sctp_audit_data[i][1] == 0x01)) {
619 			SCTP_PRINTF("\n");
620 			cnt = 0;
621 		}
622 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
623 		    (uint32_t)sctp_audit_data[i][1]);
624 		cnt++;
625 		if ((cnt % 14) == 0)
626 			SCTP_PRINTF("\n");
627 	}
628 	SCTP_PRINTF("\n");
629 }
630 
631 void
632 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
633     struct sctp_nets *net)
634 {
635 	int resend_cnt, tot_out, rep, tot_book_cnt;
636 	struct sctp_nets *lnet;
637 	struct sctp_tmit_chunk *chk;
638 
639 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
640 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
641 	sctp_audit_indx++;
642 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
643 		sctp_audit_indx = 0;
644 	}
645 	if (inp == NULL) {
646 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
647 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
648 		sctp_audit_indx++;
649 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
650 			sctp_audit_indx = 0;
651 		}
652 		return;
653 	}
654 	if (stcb == NULL) {
655 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
656 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
657 		sctp_audit_indx++;
658 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
659 			sctp_audit_indx = 0;
660 		}
661 		return;
662 	}
663 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
664 	sctp_audit_data[sctp_audit_indx][1] =
665 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
666 	sctp_audit_indx++;
667 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
668 		sctp_audit_indx = 0;
669 	}
670 	rep = 0;
671 	tot_book_cnt = 0;
672 	resend_cnt = tot_out = 0;
673 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
674 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
675 			resend_cnt++;
676 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
677 			tot_out += chk->book_size;
678 			tot_book_cnt++;
679 		}
680 	}
681 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
682 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
683 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
684 		sctp_audit_indx++;
685 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
686 			sctp_audit_indx = 0;
687 		}
688 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
689 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
690 		rep = 1;
691 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
692 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
693 		sctp_audit_data[sctp_audit_indx][1] =
694 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
695 		sctp_audit_indx++;
696 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
697 			sctp_audit_indx = 0;
698 		}
699 	}
700 	if (tot_out != stcb->asoc.total_flight) {
701 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
702 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
703 		sctp_audit_indx++;
704 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
705 			sctp_audit_indx = 0;
706 		}
707 		rep = 1;
708 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
709 		    (int)stcb->asoc.total_flight);
710 		stcb->asoc.total_flight = tot_out;
711 	}
712 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
713 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
714 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
715 		sctp_audit_indx++;
716 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
717 			sctp_audit_indx = 0;
718 		}
719 		rep = 1;
720 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
721 
722 		stcb->asoc.total_flight_count = tot_book_cnt;
723 	}
724 	tot_out = 0;
725 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
726 		tot_out += lnet->flight_size;
727 	}
728 	if (tot_out != stcb->asoc.total_flight) {
729 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
730 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
731 		sctp_audit_indx++;
732 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
733 			sctp_audit_indx = 0;
734 		}
735 		rep = 1;
736 		SCTP_PRINTF("real flight:%d net total was %d\n",
737 		    stcb->asoc.total_flight, tot_out);
738 		/* now corrective action */
739 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
740 
741 			tot_out = 0;
742 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
743 				if ((chk->whoTo == lnet) &&
744 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
745 					tot_out += chk->book_size;
746 				}
747 			}
748 			if (lnet->flight_size != tot_out) {
749 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
750 				    (void *)lnet, lnet->flight_size,
751 				    tot_out);
752 				lnet->flight_size = tot_out;
753 			}
754 		}
755 	}
756 	if (rep) {
757 		sctp_print_audit_report();
758 	}
759 }
760 
761 void
762 sctp_audit_log(uint8_t ev, uint8_t fd)
763 {
764 
765 	sctp_audit_data[sctp_audit_indx][0] = ev;
766 	sctp_audit_data[sctp_audit_indx][1] = fd;
767 	sctp_audit_indx++;
768 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
769 		sctp_audit_indx = 0;
770 	}
771 }
772 
773 #endif
774 
775 /*
776  * sctp_stop_timers_for_shutdown() should be called
777  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
778  * state to make sure that all timers are stopped.
779  */
780 void
781 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
782 {
783 	struct sctp_inpcb *inp;
784 	struct sctp_nets *net;
785 
786 	inp = stcb->sctp_ep;
787 
788 	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
789 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_12);
790 	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
791 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_13);
792 	sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
793 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_14);
794 	sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
795 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_15);
796 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
797 		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
798 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_16);
799 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
800 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_17);
801 	}
802 }
803 
804 void
805 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer)
806 {
807 	struct sctp_inpcb *inp;
808 	struct sctp_nets *net;
809 
810 	inp = stcb->sctp_ep;
811 	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
812 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_18);
813 	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
814 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_19);
815 	if (stop_assoc_kill_timer) {
816 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
817 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_20);
818 	}
819 	sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
820 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_21);
821 	sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
822 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_22);
823 	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL,
824 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_23);
825 	/* Mobility adaptation */
826 	sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL,
827 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_24);
828 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
829 		sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
830 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_25);
831 		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net,
832 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_26);
833 		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net,
834 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_27);
835 		sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net,
836 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_28);
837 		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net,
838 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_29);
839 		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
840 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_30);
841 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
842 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_31);
843 	}
844 }
845 
846 /*
847  * A list of sizes based on typical mtu's, used only if next hop size not
848  * returned. These values MUST be multiples of 4 and MUST be ordered.
849  */
850 static uint32_t sctp_mtu_sizes[] = {
851 	68,
852 	296,
853 	508,
854 	512,
855 	544,
856 	576,
857 	1004,
858 	1492,
859 	1500,
860 	1536,
861 	2000,
862 	2048,
863 	4352,
864 	4464,
865 	8168,
866 	17912,
867 	32000,
868 	65532
869 };
870 
871 /*
872  * Return the largest MTU in sctp_mtu_sizes smaller than val.
873  * If val is smaller than the minimum, just return the largest
874  * multiple of 4 smaller or equal to val.
875  * Ensure that the result is a multiple of 4.
876  */
877 uint32_t
878 sctp_get_prev_mtu(uint32_t val)
879 {
880 	uint32_t i;
881 
882 	val &= 0xfffffffc;
883 	if (val <= sctp_mtu_sizes[0]) {
884 		return (val);
885 	}
886 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
887 		if (val <= sctp_mtu_sizes[i]) {
888 			break;
889 		}
890 	}
891 	KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0,
892 	    ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1));
893 	return (sctp_mtu_sizes[i - 1]);
894 }
895 
896 /*
897  * Return the smallest MTU in sctp_mtu_sizes larger than val.
898  * If val is larger than the maximum, just return the largest multiple of 4 smaller
899  * or equal to val.
900  * Ensure that the result is a multiple of 4.
901  */
902 uint32_t
903 sctp_get_next_mtu(uint32_t val)
904 {
905 	/* select another MTU that is just bigger than this one */
906 	uint32_t i;
907 
908 	val &= 0xfffffffc;
909 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
910 		if (val < sctp_mtu_sizes[i]) {
911 			KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0,
912 			    ("sctp_mtu_sizes[%u] not a multiple of 4", i));
913 			return (sctp_mtu_sizes[i]);
914 		}
915 	}
916 	return (val);
917 }
918 
919 void
920 sctp_fill_random_store(struct sctp_pcb *m)
921 {
922 	/*
923 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
924 	 * our counter. The result becomes our good random numbers and we
925 	 * then setup to give these out. Note that we do no locking to
926 	 * protect this. This is ok, since if competing folks call this we
927 	 * will get more gobbled gook in the random store which is what we
928 	 * want. There is a danger that two guys will use the same random
929 	 * numbers, but thats ok too since that is random as well :->
930 	 */
931 	m->store_at = 0;
932 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
933 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
934 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
935 	m->random_counter++;
936 }
937 
938 uint32_t
939 sctp_select_initial_TSN(struct sctp_pcb *inp)
940 {
941 	/*
942 	 * A true implementation should use random selection process to get
943 	 * the initial stream sequence number, using RFC1750 as a good
944 	 * guideline
945 	 */
946 	uint32_t x, *xp;
947 	uint8_t *p;
948 	int store_at, new_store;
949 
950 	if (inp->initial_sequence_debug != 0) {
951 		uint32_t ret;
952 
953 		ret = inp->initial_sequence_debug;
954 		inp->initial_sequence_debug++;
955 		return (ret);
956 	}
957 retry:
958 	store_at = inp->store_at;
959 	new_store = store_at + sizeof(uint32_t);
960 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
961 		new_store = 0;
962 	}
963 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
964 		goto retry;
965 	}
966 	if (new_store == 0) {
967 		/* Refill the random store */
968 		sctp_fill_random_store(inp);
969 	}
970 	p = &inp->random_store[store_at];
971 	xp = (uint32_t *)p;
972 	x = *xp;
973 	return (x);
974 }
975 
976 uint32_t
977 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
978 {
979 	uint32_t x;
980 	struct timeval now;
981 
982 	if (check) {
983 		(void)SCTP_GETTIME_TIMEVAL(&now);
984 	}
985 	for (;;) {
986 		x = sctp_select_initial_TSN(&inp->sctp_ep);
987 		if (x == 0) {
988 			/* we never use 0 */
989 			continue;
990 		}
991 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
992 			break;
993 		}
994 	}
995 	return (x);
996 }
997 
998 int32_t
999 sctp_map_assoc_state(int kernel_state)
1000 {
1001 	int32_t user_state;
1002 
1003 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
1004 		user_state = SCTP_CLOSED;
1005 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
1006 		user_state = SCTP_SHUTDOWN_PENDING;
1007 	} else {
1008 		switch (kernel_state & SCTP_STATE_MASK) {
1009 		case SCTP_STATE_EMPTY:
1010 			user_state = SCTP_CLOSED;
1011 			break;
1012 		case SCTP_STATE_INUSE:
1013 			user_state = SCTP_CLOSED;
1014 			break;
1015 		case SCTP_STATE_COOKIE_WAIT:
1016 			user_state = SCTP_COOKIE_WAIT;
1017 			break;
1018 		case SCTP_STATE_COOKIE_ECHOED:
1019 			user_state = SCTP_COOKIE_ECHOED;
1020 			break;
1021 		case SCTP_STATE_OPEN:
1022 			user_state = SCTP_ESTABLISHED;
1023 			break;
1024 		case SCTP_STATE_SHUTDOWN_SENT:
1025 			user_state = SCTP_SHUTDOWN_SENT;
1026 			break;
1027 		case SCTP_STATE_SHUTDOWN_RECEIVED:
1028 			user_state = SCTP_SHUTDOWN_RECEIVED;
1029 			break;
1030 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
1031 			user_state = SCTP_SHUTDOWN_ACK_SENT;
1032 			break;
1033 		default:
1034 			user_state = SCTP_CLOSED;
1035 			break;
1036 		}
1037 	}
1038 	return (user_state);
1039 }
1040 
1041 int
1042 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1043     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
1044 {
1045 	struct sctp_association *asoc;
1046 
1047 	/*
1048 	 * Anything set to zero is taken care of by the allocation routine's
1049 	 * bzero
1050 	 */
1051 
1052 	/*
1053 	 * Up front select what scoping to apply on addresses I tell my peer
1054 	 * Not sure what to do with these right now, we will need to come up
1055 	 * with a way to set them. We may need to pass them through from the
1056 	 * caller in the sctp_aloc_assoc() function.
1057 	 */
1058 	int i;
1059 #if defined(SCTP_DETAILED_STR_STATS)
1060 	int j;
1061 #endif
1062 
1063 	asoc = &stcb->asoc;
1064 	/* init all variables to a known value. */
1065 	SCTP_SET_STATE(stcb, SCTP_STATE_INUSE);
1066 	asoc->max_burst = inp->sctp_ep.max_burst;
1067 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
1068 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
1069 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
1070 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
1071 	asoc->ecn_supported = inp->ecn_supported;
1072 	asoc->prsctp_supported = inp->prsctp_supported;
1073 	asoc->idata_supported = inp->idata_supported;
1074 	asoc->auth_supported = inp->auth_supported;
1075 	asoc->asconf_supported = inp->asconf_supported;
1076 	asoc->reconfig_supported = inp->reconfig_supported;
1077 	asoc->nrsack_supported = inp->nrsack_supported;
1078 	asoc->pktdrop_supported = inp->pktdrop_supported;
1079 	asoc->idata_supported = inp->idata_supported;
1080 	asoc->sctp_cmt_pf = (uint8_t)0;
1081 	asoc->sctp_frag_point = inp->sctp_frag_point;
1082 	asoc->sctp_features = inp->sctp_features;
1083 	asoc->default_dscp = inp->sctp_ep.default_dscp;
1084 	asoc->max_cwnd = inp->max_cwnd;
1085 #ifdef INET6
1086 	if (inp->sctp_ep.default_flowlabel) {
1087 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
1088 	} else {
1089 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
1090 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
1091 			asoc->default_flowlabel &= 0x000fffff;
1092 			asoc->default_flowlabel |= 0x80000000;
1093 		} else {
1094 			asoc->default_flowlabel = 0;
1095 		}
1096 	}
1097 #endif
1098 	asoc->sb_send_resv = 0;
1099 	if (override_tag) {
1100 		asoc->my_vtag = override_tag;
1101 	} else {
1102 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1103 	}
1104 	/* Get the nonce tags */
1105 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1106 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1107 	asoc->vrf_id = vrf_id;
1108 
1109 #ifdef SCTP_ASOCLOG_OF_TSNS
1110 	asoc->tsn_in_at = 0;
1111 	asoc->tsn_out_at = 0;
1112 	asoc->tsn_in_wrapped = 0;
1113 	asoc->tsn_out_wrapped = 0;
1114 	asoc->cumack_log_at = 0;
1115 	asoc->cumack_log_atsnt = 0;
1116 #endif
1117 #ifdef SCTP_FS_SPEC_LOG
1118 	asoc->fs_index = 0;
1119 #endif
1120 	asoc->refcnt = 0;
1121 	asoc->assoc_up_sent = 0;
1122 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1123 	    sctp_select_initial_TSN(&inp->sctp_ep);
1124 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1125 	/* we are optimisitic here */
1126 	asoc->peer_supports_nat = 0;
1127 	asoc->sent_queue_retran_cnt = 0;
1128 
1129 	/* for CMT */
1130 	asoc->last_net_cmt_send_started = NULL;
1131 
1132 	/* This will need to be adjusted */
1133 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1134 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1135 	asoc->asconf_seq_in = asoc->last_acked_seq;
1136 
1137 	/* here we are different, we hold the next one we expect */
1138 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1139 
1140 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1141 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1142 
1143 	asoc->default_mtu = inp->sctp_ep.default_mtu;
1144 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1145 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1146 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1147 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1148 	asoc->free_chunk_cnt = 0;
1149 
1150 	asoc->iam_blocking = 0;
1151 	asoc->context = inp->sctp_context;
1152 	asoc->local_strreset_support = inp->local_strreset_support;
1153 	asoc->def_send = inp->def_send;
1154 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1155 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1156 	asoc->pr_sctp_cnt = 0;
1157 	asoc->total_output_queue_size = 0;
1158 
1159 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1160 		asoc->scope.ipv6_addr_legal = 1;
1161 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1162 			asoc->scope.ipv4_addr_legal = 1;
1163 		} else {
1164 			asoc->scope.ipv4_addr_legal = 0;
1165 		}
1166 	} else {
1167 		asoc->scope.ipv6_addr_legal = 0;
1168 		asoc->scope.ipv4_addr_legal = 1;
1169 	}
1170 
1171 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1172 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1173 
1174 	asoc->smallest_mtu = inp->sctp_frag_point;
1175 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1176 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1177 
1178 	asoc->stream_locked_on = 0;
1179 	asoc->ecn_echo_cnt_onq = 0;
1180 	asoc->stream_locked = 0;
1181 
1182 	asoc->send_sack = 1;
1183 
1184 	LIST_INIT(&asoc->sctp_restricted_addrs);
1185 
1186 	TAILQ_INIT(&asoc->nets);
1187 	TAILQ_INIT(&asoc->pending_reply_queue);
1188 	TAILQ_INIT(&asoc->asconf_ack_sent);
1189 	/* Setup to fill the hb random cache at first HB */
1190 	asoc->hb_random_idx = 4;
1191 
1192 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1193 
1194 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1195 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1196 
1197 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1198 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1199 
1200 	/*
1201 	 * Now the stream parameters, here we allocate space for all streams
1202 	 * that we request by default.
1203 	 */
1204 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1205 	    o_strms;
1206 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1207 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1208 	    SCTP_M_STRMO);
1209 	if (asoc->strmout == NULL) {
1210 		/* big trouble no memory */
1211 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1212 		return (ENOMEM);
1213 	}
1214 	for (i = 0; i < asoc->streamoutcnt; i++) {
1215 		/*
1216 		 * inbound side must be set to 0xffff, also NOTE when we get
1217 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1218 		 * count (streamoutcnt) but first check if we sent to any of
1219 		 * the upper streams that were dropped (if some were). Those
1220 		 * that were dropped must be notified to the upper layer as
1221 		 * failed to send.
1222 		 */
1223 		asoc->strmout[i].next_mid_ordered = 0;
1224 		asoc->strmout[i].next_mid_unordered = 0;
1225 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1226 		asoc->strmout[i].chunks_on_queues = 0;
1227 #if defined(SCTP_DETAILED_STR_STATS)
1228 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1229 			asoc->strmout[i].abandoned_sent[j] = 0;
1230 			asoc->strmout[i].abandoned_unsent[j] = 0;
1231 		}
1232 #else
1233 		asoc->strmout[i].abandoned_sent[0] = 0;
1234 		asoc->strmout[i].abandoned_unsent[0] = 0;
1235 #endif
1236 		asoc->strmout[i].sid = i;
1237 		asoc->strmout[i].last_msg_incomplete = 0;
1238 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1239 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1240 	}
1241 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1242 
1243 	/* Now the mapping array */
1244 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1245 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1246 	    SCTP_M_MAP);
1247 	if (asoc->mapping_array == NULL) {
1248 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1249 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1250 		return (ENOMEM);
1251 	}
1252 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1253 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1254 	    SCTP_M_MAP);
1255 	if (asoc->nr_mapping_array == NULL) {
1256 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1257 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1258 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1259 		return (ENOMEM);
1260 	}
1261 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1262 
1263 	/* Now the init of the other outqueues */
1264 	TAILQ_INIT(&asoc->free_chunks);
1265 	TAILQ_INIT(&asoc->control_send_queue);
1266 	TAILQ_INIT(&asoc->asconf_send_queue);
1267 	TAILQ_INIT(&asoc->send_queue);
1268 	TAILQ_INIT(&asoc->sent_queue);
1269 	TAILQ_INIT(&asoc->resetHead);
1270 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1271 	TAILQ_INIT(&asoc->asconf_queue);
1272 	/* authentication fields */
1273 	asoc->authinfo.random = NULL;
1274 	asoc->authinfo.active_keyid = 0;
1275 	asoc->authinfo.assoc_key = NULL;
1276 	asoc->authinfo.assoc_keyid = 0;
1277 	asoc->authinfo.recv_key = NULL;
1278 	asoc->authinfo.recv_keyid = 0;
1279 	LIST_INIT(&asoc->shared_keys);
1280 	asoc->marked_retrans = 0;
1281 	asoc->port = inp->sctp_ep.port;
1282 	asoc->timoinit = 0;
1283 	asoc->timodata = 0;
1284 	asoc->timosack = 0;
1285 	asoc->timoshutdown = 0;
1286 	asoc->timoheartbeat = 0;
1287 	asoc->timocookie = 0;
1288 	asoc->timoshutdownack = 0;
1289 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1290 	asoc->discontinuity_time = asoc->start_time;
1291 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1292 		asoc->abandoned_unsent[i] = 0;
1293 		asoc->abandoned_sent[i] = 0;
1294 	}
1295 	/*
1296 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1297 	 * freed later when the association is freed.
1298 	 */
1299 	return (0);
1300 }
1301 
1302 void
1303 sctp_print_mapping_array(struct sctp_association *asoc)
1304 {
1305 	unsigned int i, limit;
1306 
1307 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1308 	    asoc->mapping_array_size,
1309 	    asoc->mapping_array_base_tsn,
1310 	    asoc->cumulative_tsn,
1311 	    asoc->highest_tsn_inside_map,
1312 	    asoc->highest_tsn_inside_nr_map);
1313 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1314 		if (asoc->mapping_array[limit - 1] != 0) {
1315 			break;
1316 		}
1317 	}
1318 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1319 	for (i = 0; i < limit; i++) {
1320 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1321 	}
1322 	if (limit % 16)
1323 		SCTP_PRINTF("\n");
1324 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1325 		if (asoc->nr_mapping_array[limit - 1]) {
1326 			break;
1327 		}
1328 	}
1329 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1330 	for (i = 0; i < limit; i++) {
1331 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1332 	}
1333 	if (limit % 16)
1334 		SCTP_PRINTF("\n");
1335 }
1336 
1337 int
1338 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1339 {
1340 	/* mapping array needs to grow */
1341 	uint8_t *new_array1, *new_array2;
1342 	uint32_t new_size;
1343 
1344 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1345 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1346 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1347 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1348 		/* can't get more, forget it */
1349 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1350 		if (new_array1) {
1351 			SCTP_FREE(new_array1, SCTP_M_MAP);
1352 		}
1353 		if (new_array2) {
1354 			SCTP_FREE(new_array2, SCTP_M_MAP);
1355 		}
1356 		return (-1);
1357 	}
1358 	memset(new_array1, 0, new_size);
1359 	memset(new_array2, 0, new_size);
1360 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1361 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1362 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1363 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1364 	asoc->mapping_array = new_array1;
1365 	asoc->nr_mapping_array = new_array2;
1366 	asoc->mapping_array_size = new_size;
1367 	return (0);
1368 }
1369 
1370 
1371 static void
1372 sctp_iterator_work(struct sctp_iterator *it)
1373 {
1374 	struct epoch_tracker et;
1375 	struct sctp_inpcb *tinp;
1376 	int iteration_count = 0;
1377 	int inp_skip = 0;
1378 	int first_in = 1;
1379 
1380 	NET_EPOCH_ENTER(et);
1381 	SCTP_INP_INFO_RLOCK();
1382 	SCTP_ITERATOR_LOCK();
1383 	sctp_it_ctl.cur_it = it;
1384 	if (it->inp) {
1385 		SCTP_INP_RLOCK(it->inp);
1386 		SCTP_INP_DECR_REF(it->inp);
1387 	}
1388 	if (it->inp == NULL) {
1389 		/* iterator is complete */
1390 done_with_iterator:
1391 		sctp_it_ctl.cur_it = NULL;
1392 		SCTP_ITERATOR_UNLOCK();
1393 		SCTP_INP_INFO_RUNLOCK();
1394 		if (it->function_atend != NULL) {
1395 			(*it->function_atend) (it->pointer, it->val);
1396 		}
1397 		SCTP_FREE(it, SCTP_M_ITER);
1398 		NET_EPOCH_EXIT(et);
1399 		return;
1400 	}
1401 select_a_new_ep:
1402 	if (first_in) {
1403 		first_in = 0;
1404 	} else {
1405 		SCTP_INP_RLOCK(it->inp);
1406 	}
1407 	while (((it->pcb_flags) &&
1408 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1409 	    ((it->pcb_features) &&
1410 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1411 		/* endpoint flags or features don't match, so keep looking */
1412 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1413 			SCTP_INP_RUNLOCK(it->inp);
1414 			goto done_with_iterator;
1415 		}
1416 		tinp = it->inp;
1417 		it->inp = LIST_NEXT(it->inp, sctp_list);
1418 		SCTP_INP_RUNLOCK(tinp);
1419 		if (it->inp == NULL) {
1420 			goto done_with_iterator;
1421 		}
1422 		SCTP_INP_RLOCK(it->inp);
1423 	}
1424 	/* now go through each assoc which is in the desired state */
1425 	if (it->done_current_ep == 0) {
1426 		if (it->function_inp != NULL)
1427 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1428 		it->done_current_ep = 1;
1429 	}
1430 	if (it->stcb == NULL) {
1431 		/* run the per instance function */
1432 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1433 	}
1434 	if ((inp_skip) || it->stcb == NULL) {
1435 		if (it->function_inp_end != NULL) {
1436 			inp_skip = (*it->function_inp_end) (it->inp,
1437 			    it->pointer,
1438 			    it->val);
1439 		}
1440 		SCTP_INP_RUNLOCK(it->inp);
1441 		goto no_stcb;
1442 	}
1443 	while (it->stcb) {
1444 		SCTP_TCB_LOCK(it->stcb);
1445 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1446 			/* not in the right state... keep looking */
1447 			SCTP_TCB_UNLOCK(it->stcb);
1448 			goto next_assoc;
1449 		}
1450 		/* see if we have limited out the iterator loop */
1451 		iteration_count++;
1452 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1453 			/* Pause to let others grab the lock */
1454 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1455 			SCTP_TCB_UNLOCK(it->stcb);
1456 			SCTP_INP_INCR_REF(it->inp);
1457 			SCTP_INP_RUNLOCK(it->inp);
1458 			SCTP_ITERATOR_UNLOCK();
1459 			SCTP_INP_INFO_RUNLOCK();
1460 			SCTP_INP_INFO_RLOCK();
1461 			SCTP_ITERATOR_LOCK();
1462 			if (sctp_it_ctl.iterator_flags) {
1463 				/* We won't be staying here */
1464 				SCTP_INP_DECR_REF(it->inp);
1465 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1466 				if (sctp_it_ctl.iterator_flags &
1467 				    SCTP_ITERATOR_STOP_CUR_IT) {
1468 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1469 					goto done_with_iterator;
1470 				}
1471 				if (sctp_it_ctl.iterator_flags &
1472 				    SCTP_ITERATOR_STOP_CUR_INP) {
1473 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1474 					goto no_stcb;
1475 				}
1476 				/* If we reach here huh? */
1477 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1478 				    sctp_it_ctl.iterator_flags);
1479 				sctp_it_ctl.iterator_flags = 0;
1480 			}
1481 			SCTP_INP_RLOCK(it->inp);
1482 			SCTP_INP_DECR_REF(it->inp);
1483 			SCTP_TCB_LOCK(it->stcb);
1484 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1485 			iteration_count = 0;
1486 		}
1487 
1488 		/* run function on this one */
1489 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1490 
1491 		/*
1492 		 * we lie here, it really needs to have its own type but
1493 		 * first I must verify that this won't effect things :-0
1494 		 */
1495 		if (it->no_chunk_output == 0)
1496 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1497 
1498 		SCTP_TCB_UNLOCK(it->stcb);
1499 next_assoc:
1500 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1501 		if (it->stcb == NULL) {
1502 			/* Run last function */
1503 			if (it->function_inp_end != NULL) {
1504 				inp_skip = (*it->function_inp_end) (it->inp,
1505 				    it->pointer,
1506 				    it->val);
1507 			}
1508 		}
1509 	}
1510 	SCTP_INP_RUNLOCK(it->inp);
1511 no_stcb:
1512 	/* done with all assocs on this endpoint, move on to next endpoint */
1513 	it->done_current_ep = 0;
1514 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1515 		it->inp = NULL;
1516 	} else {
1517 		it->inp = LIST_NEXT(it->inp, sctp_list);
1518 	}
1519 	if (it->inp == NULL) {
1520 		goto done_with_iterator;
1521 	}
1522 	goto select_a_new_ep;
1523 }
1524 
1525 void
1526 sctp_iterator_worker(void)
1527 {
1528 	struct sctp_iterator *it;
1529 
1530 	/* This function is called with the WQ lock in place */
1531 	sctp_it_ctl.iterator_running = 1;
1532 	while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) {
1533 		/* now lets work on this one */
1534 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1535 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1536 		CURVNET_SET(it->vn);
1537 		sctp_iterator_work(it);
1538 		CURVNET_RESTORE();
1539 		SCTP_IPI_ITERATOR_WQ_LOCK();
1540 		/* sa_ignore FREED_MEMORY */
1541 	}
1542 	sctp_it_ctl.iterator_running = 0;
1543 	return;
1544 }
1545 
1546 
1547 static void
1548 sctp_handle_addr_wq(void)
1549 {
1550 	/* deal with the ADDR wq from the rtsock calls */
1551 	struct sctp_laddr *wi, *nwi;
1552 	struct sctp_asconf_iterator *asc;
1553 
1554 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1555 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1556 	if (asc == NULL) {
1557 		/* Try later, no memory */
1558 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1559 		    (struct sctp_inpcb *)NULL,
1560 		    (struct sctp_tcb *)NULL,
1561 		    (struct sctp_nets *)NULL);
1562 		return;
1563 	}
1564 	LIST_INIT(&asc->list_of_work);
1565 	asc->cnt = 0;
1566 
1567 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1568 		LIST_REMOVE(wi, sctp_nxt_addr);
1569 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1570 		asc->cnt++;
1571 	}
1572 
1573 	if (asc->cnt == 0) {
1574 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1575 	} else {
1576 		int ret;
1577 
1578 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1579 		    sctp_asconf_iterator_stcb,
1580 		    NULL,	/* No ep end for boundall */
1581 		    SCTP_PCB_FLAGS_BOUNDALL,
1582 		    SCTP_PCB_ANY_FEATURES,
1583 		    SCTP_ASOC_ANY_STATE,
1584 		    (void *)asc, 0,
1585 		    sctp_asconf_iterator_end, NULL, 0);
1586 		if (ret) {
1587 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1588 			/*
1589 			 * Freeing if we are stopping or put back on the
1590 			 * addr_wq.
1591 			 */
1592 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1593 				sctp_asconf_iterator_end(asc, 0);
1594 			} else {
1595 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1596 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1597 				}
1598 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1599 			}
1600 		}
1601 	}
1602 }
1603 
1604 /*-
1605  * The following table shows which pointers for the inp, stcb, or net are
1606  * stored for each timer after it was started.
1607  *
1608  *|Name                         |Timer                        |inp |stcb|net |
1609  *|-----------------------------|-----------------------------|----|----|----|
1610  *|SCTP_TIMER_TYPE_SEND         |net->rxt_timer               |Yes |Yes |Yes |
1611  *|SCTP_TIMER_TYPE_INIT         |net->rxt_timer               |Yes |Yes |Yes |
1612  *|SCTP_TIMER_TYPE_RECV         |stcb->asoc.dack_timer        |Yes |Yes |No  |
1613  *|SCTP_TIMER_TYPE_SHUTDOWN     |net->rxt_timer               |Yes |Yes |Yes |
1614  *|SCTP_TIMER_TYPE_HEARTBEAT    |net->hb_timer                |Yes |Yes |Yes |
1615  *|SCTP_TIMER_TYPE_COOKIE       |net->rxt_timer               |Yes |Yes |Yes |
1616  *|SCTP_TIMER_TYPE_NEWCOOKIE    |inp->sctp_ep.signature_change|Yes |No  |No  |
1617  *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer              |Yes |Yes |Yes |
1618  *|SCTP_TIMER_TYPE_SHUTDOWNACK  |net->rxt_timer               |Yes |Yes |Yes |
1619  *|SCTP_TIMER_TYPE_ASCONF       |stcb->asoc.asconf_timer      |Yes |Yes |Yes |
1620  *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer  |Yes |Yes |No  |
1621  *|SCTP_TIMER_TYPE_AUTOCLOSE    |stcb->asoc.autoclose_timer   |Yes |Yes |No  |
1622  *|SCTP_TIMER_TYPE_STRRESET     |stcb->asoc.strreset_timer    |Yes |Yes |No  |
1623  *|SCTP_TIMER_TYPE_INPKILL      |inp->sctp_ep.signature_change|Yes |No  |No  |
1624  *|SCTP_TIMER_TYPE_ASOCKILL     |stcb->asoc.strreset_timer    |Yes |Yes |No  |
1625  *|SCTP_TIMER_TYPE_ADDR_WQ      |SCTP_BASE_INFO(addr_wq_timer)|No  |No  |No  |
1626  *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No  |
1627  */
1628 
1629 void
1630 sctp_timeout_handler(void *t)
1631 {
1632 	struct epoch_tracker et;
1633 	struct timeval tv;
1634 	struct sctp_inpcb *inp;
1635 	struct sctp_tcb *stcb;
1636 	struct sctp_nets *net;
1637 	struct sctp_timer *tmr;
1638 	struct mbuf *op_err;
1639 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1640 	struct socket *so;
1641 #endif
1642 	int did_output;
1643 	int type;
1644 	int i, secret;
1645 
1646 	tmr = (struct sctp_timer *)t;
1647 	inp = (struct sctp_inpcb *)tmr->ep;
1648 	stcb = (struct sctp_tcb *)tmr->tcb;
1649 	net = (struct sctp_nets *)tmr->net;
1650 	CURVNET_SET((struct vnet *)tmr->vnet);
1651 	did_output = 1;
1652 
1653 #ifdef SCTP_AUDITING_ENABLED
1654 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1655 	sctp_auditing(3, inp, stcb, net);
1656 #endif
1657 
1658 	/* sanity checks... */
1659 	KASSERT(tmr->self == tmr, ("tmr->self corrupted"));
1660 	KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), ("Invalid timer type %d", tmr->type));
1661 	type = tmr->type;
1662 	if (inp) {
1663 		SCTP_INP_INCR_REF(inp);
1664 	}
1665 	tmr->stopped_from = 0xa001;
1666 	if (stcb) {
1667 		atomic_add_int(&stcb->asoc.refcnt, 1);
1668 		if (stcb->asoc.state == 0) {
1669 			atomic_add_int(&stcb->asoc.refcnt, -1);
1670 			if (inp) {
1671 				SCTP_INP_DECR_REF(inp);
1672 			}
1673 			SCTPDBG(SCTP_DEBUG_TIMER2,
1674 			    "Timer type %d handler exiting due to CLOSED association.\n",
1675 			    type);
1676 			CURVNET_RESTORE();
1677 			return;
1678 		}
1679 	}
1680 	tmr->stopped_from = 0xa002;
1681 	SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d goes off.\n", type);
1682 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1683 		if (inp) {
1684 			SCTP_INP_DECR_REF(inp);
1685 		}
1686 		if (stcb) {
1687 			atomic_add_int(&stcb->asoc.refcnt, -1);
1688 		}
1689 		SCTPDBG(SCTP_DEBUG_TIMER2,
1690 		    "Timer type %d handler exiting due to not being active.\n",
1691 		    type);
1692 		CURVNET_RESTORE();
1693 		return;
1694 	}
1695 
1696 	tmr->stopped_from = 0xa003;
1697 	if (stcb) {
1698 		SCTP_TCB_LOCK(stcb);
1699 		atomic_add_int(&stcb->asoc.refcnt, -1);
1700 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1701 		    ((stcb->asoc.state == 0) ||
1702 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1703 			SCTP_TCB_UNLOCK(stcb);
1704 			if (inp) {
1705 				SCTP_INP_DECR_REF(inp);
1706 			}
1707 			SCTPDBG(SCTP_DEBUG_TIMER2,
1708 			    "Timer type %d handler exiting due to CLOSED association.\n",
1709 			    type);
1710 			CURVNET_RESTORE();
1711 			return;
1712 		}
1713 	} else if (inp != NULL) {
1714 		SCTP_INP_WLOCK(inp);
1715 	} else {
1716 		SCTP_WQ_ADDR_LOCK();
1717 	}
1718 
1719 	/* Record in stopped_from which timeout occurred. */
1720 	tmr->stopped_from = type;
1721 	NET_EPOCH_ENTER(et);
1722 	/* mark as being serviced now */
1723 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1724 		/*
1725 		 * Callout has been rescheduled.
1726 		 */
1727 		goto get_out;
1728 	}
1729 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1730 		/*
1731 		 * Not active, so no action.
1732 		 */
1733 		goto get_out;
1734 	}
1735 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1736 
1737 	/* call the handler for the appropriate timer type */
1738 	switch (type) {
1739 	case SCTP_TIMER_TYPE_SEND:
1740 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1741 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1742 		    type, inp, stcb, net));
1743 		SCTP_STAT_INCR(sctps_timodata);
1744 		stcb->asoc.timodata++;
1745 		stcb->asoc.num_send_timers_up--;
1746 		if (stcb->asoc.num_send_timers_up < 0) {
1747 			stcb->asoc.num_send_timers_up = 0;
1748 		}
1749 		SCTP_TCB_LOCK_ASSERT(stcb);
1750 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1751 			/* no need to unlock on tcb its gone */
1752 
1753 			goto out_decr;
1754 		}
1755 		SCTP_TCB_LOCK_ASSERT(stcb);
1756 #ifdef SCTP_AUDITING_ENABLED
1757 		sctp_auditing(4, inp, stcb, net);
1758 #endif
1759 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1760 		if ((stcb->asoc.num_send_timers_up == 0) &&
1761 		    (stcb->asoc.sent_queue_cnt > 0)) {
1762 			struct sctp_tmit_chunk *chk;
1763 
1764 			/*
1765 			 * safeguard. If there on some on the sent queue
1766 			 * somewhere but no timers running something is
1767 			 * wrong... so we start a timer on the first chunk
1768 			 * on the send queue on whatever net it is sent to.
1769 			 */
1770 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1771 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1772 			    chk->whoTo);
1773 		}
1774 		break;
1775 	case SCTP_TIMER_TYPE_INIT:
1776 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1777 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1778 		    type, inp, stcb, net));
1779 		SCTP_STAT_INCR(sctps_timoinit);
1780 		stcb->asoc.timoinit++;
1781 		if (sctp_t1init_timer(inp, stcb, net)) {
1782 			/* no need to unlock on tcb its gone */
1783 			goto out_decr;
1784 		}
1785 		/* We do output but not here */
1786 		did_output = 0;
1787 		break;
1788 	case SCTP_TIMER_TYPE_RECV:
1789 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
1790 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1791 		    type, inp, stcb, net));
1792 		SCTP_STAT_INCR(sctps_timosack);
1793 		stcb->asoc.timosack++;
1794 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1795 #ifdef SCTP_AUDITING_ENABLED
1796 		sctp_auditing(4, inp, stcb, NULL);
1797 #endif
1798 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1799 		break;
1800 	case SCTP_TIMER_TYPE_SHUTDOWN:
1801 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1802 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1803 		    type, inp, stcb, net));
1804 		SCTP_STAT_INCR(sctps_timoshutdown);
1805 		stcb->asoc.timoshutdown++;
1806 		if (sctp_shutdown_timer(inp, stcb, net)) {
1807 			/* no need to unlock on tcb its gone */
1808 			goto out_decr;
1809 		}
1810 #ifdef SCTP_AUDITING_ENABLED
1811 		sctp_auditing(4, inp, stcb, net);
1812 #endif
1813 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1814 		break;
1815 	case SCTP_TIMER_TYPE_HEARTBEAT:
1816 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1817 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1818 		    type, inp, stcb, net));
1819 		SCTP_STAT_INCR(sctps_timoheartbeat);
1820 		stcb->asoc.timoheartbeat++;
1821 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1822 			/* no need to unlock on tcb its gone */
1823 			goto out_decr;
1824 		}
1825 #ifdef SCTP_AUDITING_ENABLED
1826 		sctp_auditing(4, inp, stcb, net);
1827 #endif
1828 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1829 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1830 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1831 		}
1832 		break;
1833 	case SCTP_TIMER_TYPE_COOKIE:
1834 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1835 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1836 		    type, inp, stcb, net));
1837 		SCTP_STAT_INCR(sctps_timocookie);
1838 		stcb->asoc.timocookie++;
1839 		if (sctp_cookie_timer(inp, stcb, net)) {
1840 			/* no need to unlock on tcb its gone */
1841 			goto out_decr;
1842 		}
1843 #ifdef SCTP_AUDITING_ENABLED
1844 		sctp_auditing(4, inp, stcb, net);
1845 #endif
1846 		/*
1847 		 * We consider T3 and Cookie timer pretty much the same with
1848 		 * respect to where from in chunk_output.
1849 		 */
1850 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1851 		break;
1852 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1853 		KASSERT(inp != NULL && stcb == NULL && net == NULL,
1854 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1855 		    type, inp, stcb, net));
1856 		SCTP_STAT_INCR(sctps_timosecret);
1857 		(void)SCTP_GETTIME_TIMEVAL(&tv);
1858 		inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1859 		inp->sctp_ep.last_secret_number =
1860 		    inp->sctp_ep.current_secret_number;
1861 		inp->sctp_ep.current_secret_number++;
1862 		if (inp->sctp_ep.current_secret_number >=
1863 		    SCTP_HOW_MANY_SECRETS) {
1864 			inp->sctp_ep.current_secret_number = 0;
1865 		}
1866 		secret = (int)inp->sctp_ep.current_secret_number;
1867 		for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1868 			inp->sctp_ep.secret_key[secret][i] =
1869 			    sctp_select_initial_TSN(&inp->sctp_ep);
1870 		}
1871 		sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL);
1872 		did_output = 0;
1873 		break;
1874 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1875 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1876 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1877 		    type, inp, stcb, net));
1878 		SCTP_STAT_INCR(sctps_timopathmtu);
1879 		sctp_pathmtu_timer(inp, stcb, net);
1880 		did_output = 0;
1881 		break;
1882 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1883 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1884 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1885 		    type, inp, stcb, net));
1886 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1887 			/* no need to unlock on tcb its gone */
1888 			goto out_decr;
1889 		}
1890 		SCTP_STAT_INCR(sctps_timoshutdownack);
1891 		stcb->asoc.timoshutdownack++;
1892 #ifdef SCTP_AUDITING_ENABLED
1893 		sctp_auditing(4, inp, stcb, net);
1894 #endif
1895 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1896 		break;
1897 	case SCTP_TIMER_TYPE_ASCONF:
1898 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1899 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1900 		    type, inp, stcb, net));
1901 		SCTP_STAT_INCR(sctps_timoasconf);
1902 		if (sctp_asconf_timer(inp, stcb, net)) {
1903 			/* no need to unlock on tcb its gone */
1904 			goto out_decr;
1905 		}
1906 #ifdef SCTP_AUDITING_ENABLED
1907 		sctp_auditing(4, inp, stcb, net);
1908 #endif
1909 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1910 		break;
1911 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1912 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
1913 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1914 		    type, inp, stcb, net));
1915 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1916 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1917 		    "Shutdown guard timer expired");
1918 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1919 		/* no need to unlock on tcb its gone */
1920 		goto out_decr;
1921 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1922 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
1923 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1924 		    type, inp, stcb, net));
1925 		SCTP_STAT_INCR(sctps_timoautoclose);
1926 		sctp_autoclose_timer(inp, stcb);
1927 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1928 		did_output = 0;
1929 		break;
1930 	case SCTP_TIMER_TYPE_STRRESET:
1931 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
1932 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1933 		    type, inp, stcb, net));
1934 		SCTP_STAT_INCR(sctps_timostrmrst);
1935 		if (sctp_strreset_timer(inp, stcb)) {
1936 			/* no need to unlock on tcb its gone */
1937 			goto out_decr;
1938 		}
1939 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1940 		break;
1941 	case SCTP_TIMER_TYPE_INPKILL:
1942 		KASSERT(inp != NULL && stcb == NULL && net == NULL,
1943 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1944 		    type, inp, stcb, net));
1945 		SCTP_STAT_INCR(sctps_timoinpkill);
1946 		/*
1947 		 * special case, take away our increment since WE are the
1948 		 * killer
1949 		 */
1950 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1951 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1952 		SCTP_INP_DECR_REF(inp);
1953 		SCTP_INP_WUNLOCK(inp);
1954 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1955 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1956 		inp = NULL;
1957 		goto out_no_decr;
1958 	case SCTP_TIMER_TYPE_ASOCKILL:
1959 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
1960 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1961 		    type, inp, stcb, net));
1962 		SCTP_STAT_INCR(sctps_timoassockill);
1963 		/* Can we free it yet? */
1964 		SCTP_INP_DECR_REF(inp);
1965 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1966 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1967 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1968 		so = SCTP_INP_SO(inp);
1969 		atomic_add_int(&stcb->asoc.refcnt, 1);
1970 		SCTP_TCB_UNLOCK(stcb);
1971 		SCTP_SOCKET_LOCK(so, 1);
1972 		SCTP_TCB_LOCK(stcb);
1973 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1974 #endif
1975 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1976 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1977 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1978 		SCTP_SOCKET_UNLOCK(so, 1);
1979 #endif
1980 		/*
1981 		 * free asoc, always unlocks (or destroy's) so prevent
1982 		 * duplicate unlock or unlock of a free mtx :-0
1983 		 */
1984 		stcb = NULL;
1985 		goto out_no_decr;
1986 	case SCTP_TIMER_TYPE_ADDR_WQ:
1987 		KASSERT(inp == NULL && stcb == NULL && net == NULL,
1988 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1989 		    type, inp, stcb, net));
1990 		sctp_handle_addr_wq();
1991 		break;
1992 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1993 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
1994 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1995 		    type, inp, stcb, net));
1996 		SCTP_STAT_INCR(sctps_timodelprim);
1997 		sctp_delete_prim_timer(inp, stcb);
1998 		break;
1999 	default:
2000 #ifdef INVARIANTS
2001 		panic("Unknown timer type %d", type);
2002 #else
2003 		goto get_out;
2004 #endif
2005 	}
2006 #ifdef SCTP_AUDITING_ENABLED
2007 	sctp_audit_log(0xF1, (uint8_t)type);
2008 	if (inp)
2009 		sctp_auditing(5, inp, stcb, net);
2010 #endif
2011 	if ((did_output) && stcb) {
2012 		/*
2013 		 * Now we need to clean up the control chunk chain if an
2014 		 * ECNE is on it. It must be marked as UNSENT again so next
2015 		 * call will continue to send it until such time that we get
2016 		 * a CWR, to remove it. It is, however, less likely that we
2017 		 * will find a ecn echo on the chain though.
2018 		 */
2019 		sctp_fix_ecn_echo(&stcb->asoc);
2020 	}
2021 get_out:
2022 	if (stcb) {
2023 		SCTP_TCB_UNLOCK(stcb);
2024 	} else if (inp != NULL) {
2025 		SCTP_INP_WUNLOCK(inp);
2026 	} else {
2027 		SCTP_WQ_ADDR_UNLOCK();
2028 	}
2029 
2030 out_decr:
2031 	if (inp) {
2032 		SCTP_INP_DECR_REF(inp);
2033 	}
2034 
2035 out_no_decr:
2036 	SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type %d handler finished.\n", type);
2037 	CURVNET_RESTORE();
2038 	NET_EPOCH_EXIT(et);
2039 }
2040 
2041 /*-
2042  * The following table shows which parameters must be provided
2043  * when calling sctp_timer_start(). For parameters not being
2044  * provided, NULL must be used.
2045  *
2046  * |Name                         |inp |stcb|net |
2047  * |-----------------------------|----|----|----|
2048  * |SCTP_TIMER_TYPE_SEND         |Yes |Yes |Yes |
2049  * |SCTP_TIMER_TYPE_INIT         |Yes |Yes |Yes |
2050  * |SCTP_TIMER_TYPE_RECV         |Yes |Yes |No  |
2051  * |SCTP_TIMER_TYPE_SHUTDOWN     |Yes |Yes |Yes |
2052  * |SCTP_TIMER_TYPE_HEARTBEAT    |Yes |Yes |Yes |
2053  * |SCTP_TIMER_TYPE_COOKIE       |Yes |Yes |Yes |
2054  * |SCTP_TIMER_TYPE_NEWCOOKIE    |Yes |No  |No  |
2055  * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes |
2056  * |SCTP_TIMER_TYPE_SHUTDOWNACK  |Yes |Yes |Yes |
2057  * |SCTP_TIMER_TYPE_ASCONF       |Yes |Yes |Yes |
2058  * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No  |
2059  * |SCTP_TIMER_TYPE_AUTOCLOSE    |Yes |Yes |No  |
2060  * |SCTP_TIMER_TYPE_STRRESET     |Yes |Yes |Yes |
2061  * |SCTP_TIMER_TYPE_INPKILL      |Yes |No  |No  |
2062  * |SCTP_TIMER_TYPE_ASOCKILL     |Yes |Yes |No  |
2063  * |SCTP_TIMER_TYPE_ADDR_WQ      |No  |No  |No  |
2064  * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No  |
2065  *
2066  */
2067 
2068 void
2069 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2070     struct sctp_nets *net)
2071 {
2072 	struct sctp_timer *tmr;
2073 	uint32_t to_ticks;
2074 	uint32_t rndval, jitter;
2075 
2076 	tmr = NULL;
2077 	to_ticks = 0;
2078 	if (stcb != NULL) {
2079 		SCTP_TCB_LOCK_ASSERT(stcb);
2080 	} else if (inp != NULL) {
2081 		SCTP_INP_WLOCK_ASSERT(inp);
2082 	} else {
2083 		SCTP_WQ_ADDR_LOCK_ASSERT();
2084 	}
2085 	if (stcb != NULL) {
2086 		/*
2087 		 * Don't restart timer on association that's about to be
2088 		 * killed.
2089 		 */
2090 		if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) &&
2091 		    (t_type != SCTP_TIMER_TYPE_ASOCKILL)) {
2092 			SCTPDBG(SCTP_DEBUG_TIMER2,
2093 			    "Timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n",
2094 			    t_type, inp, stcb, net);
2095 			return;
2096 		}
2097 		/* Don't restart timer on net that's been removed. */
2098 		if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) {
2099 			SCTPDBG(SCTP_DEBUG_TIMER2,
2100 			    "Timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n",
2101 			    t_type, inp, stcb, net);
2102 			return;
2103 		}
2104 	}
2105 	switch (t_type) {
2106 	case SCTP_TIMER_TYPE_SEND:
2107 		/* Here we use the RTO timer. */
2108 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2109 #ifdef INVARIANTS
2110 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2111 			    t_type, inp, stcb, net);
2112 #else
2113 			return;
2114 #endif
2115 		}
2116 		tmr = &net->rxt_timer;
2117 		if (net->RTO == 0) {
2118 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2119 		} else {
2120 			to_ticks = MSEC_TO_TICKS(net->RTO);
2121 		}
2122 		break;
2123 	case SCTP_TIMER_TYPE_INIT:
2124 		/*
2125 		 * Here we use the INIT timer default usually about 1
2126 		 * second.
2127 		 */
2128 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2129 #ifdef INVARIANTS
2130 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2131 			    t_type, inp, stcb, net);
2132 #else
2133 			return;
2134 #endif
2135 		}
2136 		tmr = &net->rxt_timer;
2137 		if (net->RTO == 0) {
2138 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2139 		} else {
2140 			to_ticks = MSEC_TO_TICKS(net->RTO);
2141 		}
2142 		break;
2143 	case SCTP_TIMER_TYPE_RECV:
2144 		/*
2145 		 * Here we use the Delayed-Ack timer value from the inp,
2146 		 * ususually about 200ms.
2147 		 */
2148 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2149 #ifdef INVARIANTS
2150 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2151 			    t_type, inp, stcb, net);
2152 #else
2153 			return;
2154 #endif
2155 		}
2156 		tmr = &stcb->asoc.dack_timer;
2157 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2158 		break;
2159 	case SCTP_TIMER_TYPE_SHUTDOWN:
2160 		/* Here we use the RTO of the destination. */
2161 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2162 #ifdef INVARIANTS
2163 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2164 			    t_type, inp, stcb, net);
2165 #else
2166 			return;
2167 #endif
2168 		}
2169 		tmr = &net->rxt_timer;
2170 		if (net->RTO == 0) {
2171 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2172 		} else {
2173 			to_ticks = MSEC_TO_TICKS(net->RTO);
2174 		}
2175 		break;
2176 	case SCTP_TIMER_TYPE_HEARTBEAT:
2177 		/*
2178 		 * The net is used here so that we can add in the RTO. Even
2179 		 * though we use a different timer. We also add the HB timer
2180 		 * PLUS a random jitter.
2181 		 */
2182 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2183 #ifdef INVARIANTS
2184 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2185 			    t_type, inp, stcb, net);
2186 #else
2187 			return;
2188 #endif
2189 		}
2190 		if ((net->dest_state & SCTP_ADDR_NOHB) &&
2191 		    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2192 			SCTPDBG(SCTP_DEBUG_TIMER2,
2193 			    "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n",
2194 			    t_type, inp, stcb, net);
2195 			return;
2196 		}
2197 		tmr = &net->hb_timer;
2198 		if (net->RTO == 0) {
2199 			to_ticks = stcb->asoc.initial_rto;
2200 		} else {
2201 			to_ticks = net->RTO;
2202 		}
2203 		rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2204 		jitter = rndval % to_ticks;
2205 		if (jitter >= (to_ticks >> 1)) {
2206 			to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2207 		} else {
2208 			to_ticks = to_ticks - jitter;
2209 		}
2210 		if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2211 		    !(net->dest_state & SCTP_ADDR_PF)) {
2212 			to_ticks += net->heart_beat_delay;
2213 		}
2214 		/*
2215 		 * Now we must convert the to_ticks that are now in ms to
2216 		 * ticks.
2217 		 */
2218 		to_ticks = MSEC_TO_TICKS(to_ticks);
2219 		break;
2220 	case SCTP_TIMER_TYPE_COOKIE:
2221 		/*
2222 		 * Here we can use the RTO timer from the network since one
2223 		 * RTT was complete. If a retransmission happened then we
2224 		 * will be using the RTO initial value.
2225 		 */
2226 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2227 #ifdef INVARIANTS
2228 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2229 			    t_type, inp, stcb, net);
2230 #else
2231 			return;
2232 #endif
2233 		}
2234 		tmr = &net->rxt_timer;
2235 		if (net->RTO == 0) {
2236 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2237 		} else {
2238 			to_ticks = MSEC_TO_TICKS(net->RTO);
2239 		}
2240 		break;
2241 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2242 		/*
2243 		 * Nothing needed but the endpoint here ususually about 60
2244 		 * minutes.
2245 		 */
2246 		if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2247 #ifdef INVARIANTS
2248 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2249 			    t_type, inp, stcb, net);
2250 #else
2251 			return;
2252 #endif
2253 		}
2254 		tmr = &inp->sctp_ep.signature_change;
2255 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2256 		break;
2257 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2258 		/*
2259 		 * Here we use the value found in the EP for PMTUD,
2260 		 * ususually about 10 minutes.
2261 		 */
2262 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2263 #ifdef INVARIANTS
2264 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2265 			    t_type, inp, stcb, net);
2266 #else
2267 			return;
2268 #endif
2269 		}
2270 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2271 			SCTPDBG(SCTP_DEBUG_TIMER2,
2272 			    "Timer type %d not started: inp=%p, stcb=%p, net=%p.\n",
2273 			    t_type, inp, stcb, net);
2274 			return;
2275 		}
2276 		tmr = &net->pmtu_timer;
2277 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2278 		break;
2279 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2280 		/* Here we use the RTO of the destination. */
2281 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2282 #ifdef INVARIANTS
2283 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2284 			    t_type, inp, stcb, net);
2285 #else
2286 			return;
2287 #endif
2288 		}
2289 		tmr = &net->rxt_timer;
2290 		if (net->RTO == 0) {
2291 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2292 		} else {
2293 			to_ticks = MSEC_TO_TICKS(net->RTO);
2294 		}
2295 		break;
2296 	case SCTP_TIMER_TYPE_ASCONF:
2297 		/*
2298 		 * Here the timer comes from the stcb but its value is from
2299 		 * the net's RTO.
2300 		 */
2301 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2302 #ifdef INVARIANTS
2303 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2304 			    t_type, inp, stcb, net);
2305 #else
2306 			return;
2307 #endif
2308 		}
2309 		tmr = &stcb->asoc.asconf_timer;
2310 		if (net->RTO == 0) {
2311 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2312 		} else {
2313 			to_ticks = MSEC_TO_TICKS(net->RTO);
2314 		}
2315 		break;
2316 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2317 		/*
2318 		 * Here we use the endpoints shutdown guard timer usually
2319 		 * about 3 minutes.
2320 		 */
2321 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2322 #ifdef INVARIANTS
2323 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2324 			    t_type, inp, stcb, net);
2325 #else
2326 			return;
2327 #endif
2328 		}
2329 		tmr = &stcb->asoc.shut_guard_timer;
2330 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2331 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2332 		} else {
2333 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2334 		}
2335 		break;
2336 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2337 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2338 #ifdef INVARIANTS
2339 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2340 			    t_type, inp, stcb, net);
2341 #else
2342 			return;
2343 #endif
2344 		}
2345 		tmr = &stcb->asoc.autoclose_timer;
2346 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2347 		break;
2348 	case SCTP_TIMER_TYPE_STRRESET:
2349 		/*
2350 		 * Here the timer comes from the stcb but its value is from
2351 		 * the net's RTO.
2352 		 */
2353 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2354 #ifdef INVARIANTS
2355 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2356 			    t_type, inp, stcb, net);
2357 #else
2358 			return;
2359 #endif
2360 		}
2361 		tmr = &stcb->asoc.strreset_timer;
2362 		if (net->RTO == 0) {
2363 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2364 		} else {
2365 			to_ticks = MSEC_TO_TICKS(net->RTO);
2366 		}
2367 		break;
2368 	case SCTP_TIMER_TYPE_INPKILL:
2369 		/*
2370 		 * The inp is setup to die. We re-use the signature_chage
2371 		 * timer since that has stopped and we are in the GONE
2372 		 * state.
2373 		 */
2374 		if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2375 #ifdef INVARIANTS
2376 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2377 			    t_type, inp, stcb, net);
2378 #else
2379 			return;
2380 #endif
2381 		}
2382 		tmr = &inp->sctp_ep.signature_change;
2383 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2384 		break;
2385 	case SCTP_TIMER_TYPE_ASOCKILL:
2386 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2387 #ifdef INVARIANTS
2388 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2389 			    t_type, inp, stcb, net);
2390 #else
2391 			return;
2392 #endif
2393 		}
2394 		tmr = &stcb->asoc.strreset_timer;
2395 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2396 		break;
2397 	case SCTP_TIMER_TYPE_ADDR_WQ:
2398 		if ((inp != NULL) || (stcb != NULL) || (net != NULL)) {
2399 #ifdef INVARIANTS
2400 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2401 			    t_type, inp, stcb, net);
2402 #else
2403 			return;
2404 #endif
2405 		}
2406 		/* Only 1 tick away :-) */
2407 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2408 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
2409 		break;
2410 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2411 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2412 #ifdef INVARIANTS
2413 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2414 			    t_type, inp, stcb, net);
2415 #else
2416 			return;
2417 #endif
2418 		}
2419 		tmr = &stcb->asoc.delete_prim_timer;
2420 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2421 		break;
2422 	default:
2423 #ifdef INVARIANTS
2424 		panic("Unknown timer type %d", t_type);
2425 #else
2426 		return;
2427 #endif
2428 	}
2429 	KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type));
2430 	KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type));
2431 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2432 		/*
2433 		 * We do NOT allow you to have it already running. If it is,
2434 		 * we leave the current one up unchanged.
2435 		 */
2436 		SCTPDBG(SCTP_DEBUG_TIMER2,
2437 		    "Timer type %d already running: inp=%p, stcb=%p, net=%p.\n",
2438 		    t_type, inp, stcb, net);
2439 		return;
2440 	}
2441 	/* At this point we can proceed. */
2442 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2443 		stcb->asoc.num_send_timers_up++;
2444 	}
2445 	tmr->stopped_from = 0;
2446 	tmr->type = t_type;
2447 	tmr->ep = (void *)inp;
2448 	tmr->tcb = (void *)stcb;
2449 	if (t_type == SCTP_TIMER_TYPE_STRRESET) {
2450 		tmr->net = NULL;
2451 	} else {
2452 		tmr->net = (void *)net;
2453 	}
2454 	tmr->self = (void *)tmr;
2455 	tmr->vnet = (void *)curvnet;
2456 	tmr->ticks = sctp_get_tick_count();
2457 	if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) {
2458 		SCTPDBG(SCTP_DEBUG_TIMER2,
2459 		    "Timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n",
2460 		    t_type, to_ticks, inp, stcb, net);
2461 	} else {
2462 		/*
2463 		 * This should not happen, since we checked for pending
2464 		 * above.
2465 		 */
2466 		SCTPDBG(SCTP_DEBUG_TIMER2,
2467 		    "Timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n",
2468 		    t_type, to_ticks, inp, stcb, net);
2469 	}
2470 	return;
2471 }
2472 
2473 /*-
2474  * The following table shows which parameters must be provided
2475  * when calling sctp_timer_stop(). For parameters not being
2476  * provided, NULL must be used.
2477  *
2478  * |Name                         |inp |stcb|net |
2479  * |-----------------------------|----|----|----|
2480  * |SCTP_TIMER_TYPE_SEND         |Yes |Yes |Yes |
2481  * |SCTP_TIMER_TYPE_INIT         |Yes |Yes |Yes |
2482  * |SCTP_TIMER_TYPE_RECV         |Yes |Yes |No  |
2483  * |SCTP_TIMER_TYPE_SHUTDOWN     |Yes |Yes |Yes |
2484  * |SCTP_TIMER_TYPE_HEARTBEAT    |Yes |Yes |Yes |
2485  * |SCTP_TIMER_TYPE_COOKIE       |Yes |Yes |Yes |
2486  * |SCTP_TIMER_TYPE_NEWCOOKIE    |Yes |No  |No  |
2487  * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes |
2488  * |SCTP_TIMER_TYPE_SHUTDOWNACK  |Yes |Yes |Yes |
2489  * |SCTP_TIMER_TYPE_ASCONF       |Yes |Yes |No  |
2490  * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No  |
2491  * |SCTP_TIMER_TYPE_AUTOCLOSE    |Yes |Yes |No  |
2492  * |SCTP_TIMER_TYPE_STRRESET     |Yes |Yes |No  |
2493  * |SCTP_TIMER_TYPE_INPKILL      |Yes |No  |No  |
2494  * |SCTP_TIMER_TYPE_ASOCKILL     |Yes |Yes |No  |
2495  * |SCTP_TIMER_TYPE_ADDR_WQ      |No  |No  |No  |
2496  * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No  |
2497  *
2498  */
2499 
2500 void
2501 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2502     struct sctp_nets *net, uint32_t from)
2503 {
2504 	struct sctp_timer *tmr;
2505 
2506 	if (stcb != NULL) {
2507 		SCTP_TCB_LOCK_ASSERT(stcb);
2508 	} else if (inp != NULL) {
2509 		SCTP_INP_WLOCK_ASSERT(inp);
2510 	} else {
2511 		SCTP_WQ_ADDR_LOCK_ASSERT();
2512 	}
2513 	tmr = NULL;
2514 	switch (t_type) {
2515 	case SCTP_TIMER_TYPE_SEND:
2516 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2517 #ifdef INVARIANTS
2518 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2519 			    t_type, inp, stcb, net);
2520 #else
2521 			return;
2522 #endif
2523 		}
2524 		tmr = &net->rxt_timer;
2525 		break;
2526 	case SCTP_TIMER_TYPE_INIT:
2527 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2528 #ifdef INVARIANTS
2529 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2530 			    t_type, inp, stcb, net);
2531 #else
2532 			return;
2533 #endif
2534 		}
2535 		tmr = &net->rxt_timer;
2536 		break;
2537 	case SCTP_TIMER_TYPE_RECV:
2538 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2539 #ifdef INVARIANTS
2540 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2541 			    t_type, inp, stcb, net);
2542 #else
2543 			return;
2544 #endif
2545 		}
2546 		tmr = &stcb->asoc.dack_timer;
2547 		break;
2548 	case SCTP_TIMER_TYPE_SHUTDOWN:
2549 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2550 #ifdef INVARIANTS
2551 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2552 			    t_type, inp, stcb, net);
2553 #else
2554 			return;
2555 #endif
2556 		}
2557 		tmr = &net->rxt_timer;
2558 		break;
2559 	case SCTP_TIMER_TYPE_HEARTBEAT:
2560 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2561 #ifdef INVARIANTS
2562 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2563 			    t_type, inp, stcb, net);
2564 #else
2565 			return;
2566 #endif
2567 		}
2568 		tmr = &net->hb_timer;
2569 		break;
2570 	case SCTP_TIMER_TYPE_COOKIE:
2571 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2572 #ifdef INVARIANTS
2573 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2574 			    t_type, inp, stcb, net);
2575 #else
2576 			return;
2577 #endif
2578 		}
2579 		tmr = &net->rxt_timer;
2580 		break;
2581 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2582 		if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2583 #ifdef INVARIANTS
2584 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2585 			    t_type, inp, stcb, net);
2586 #else
2587 			return;
2588 #endif
2589 		}
2590 		tmr = &inp->sctp_ep.signature_change;
2591 		break;
2592 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2593 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2594 #ifdef INVARIANTS
2595 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2596 			    t_type, inp, stcb, net);
2597 #else
2598 			return;
2599 #endif
2600 		}
2601 		tmr = &net->pmtu_timer;
2602 		break;
2603 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2604 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2605 #ifdef INVARIANTS
2606 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2607 			    t_type, inp, stcb, net);
2608 #else
2609 			return;
2610 #endif
2611 		}
2612 		tmr = &net->rxt_timer;
2613 		break;
2614 	case SCTP_TIMER_TYPE_ASCONF:
2615 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2616 #ifdef INVARIANTS
2617 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2618 			    t_type, inp, stcb, net);
2619 #else
2620 			return;
2621 #endif
2622 		}
2623 		tmr = &stcb->asoc.asconf_timer;
2624 		break;
2625 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2626 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2627 #ifdef INVARIANTS
2628 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2629 			    t_type, inp, stcb, net);
2630 #else
2631 			return;
2632 #endif
2633 		}
2634 		tmr = &stcb->asoc.shut_guard_timer;
2635 		break;
2636 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2637 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2638 #ifdef INVARIANTS
2639 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2640 			    t_type, inp, stcb, net);
2641 #else
2642 			return;
2643 #endif
2644 		}
2645 		tmr = &stcb->asoc.autoclose_timer;
2646 		break;
2647 	case SCTP_TIMER_TYPE_STRRESET:
2648 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2649 #ifdef INVARIANTS
2650 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2651 			    t_type, inp, stcb, net);
2652 #else
2653 			return;
2654 #endif
2655 		}
2656 		tmr = &stcb->asoc.strreset_timer;
2657 		break;
2658 	case SCTP_TIMER_TYPE_INPKILL:
2659 		/*
2660 		 * The inp is setup to die. We re-use the signature_chage
2661 		 * timer since that has stopped and we are in the GONE
2662 		 * state.
2663 		 */
2664 		if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2665 #ifdef INVARIANTS
2666 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2667 			    t_type, inp, stcb, net);
2668 #else
2669 			return;
2670 #endif
2671 		}
2672 		tmr = &inp->sctp_ep.signature_change;
2673 		break;
2674 	case SCTP_TIMER_TYPE_ASOCKILL:
2675 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2676 #ifdef INVARIANTS
2677 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2678 			    t_type, inp, stcb, net);
2679 #else
2680 			return;
2681 #endif
2682 		}
2683 		tmr = &stcb->asoc.strreset_timer;
2684 		break;
2685 	case SCTP_TIMER_TYPE_ADDR_WQ:
2686 		if ((inp != NULL) || (stcb != NULL) || (net != NULL)) {
2687 #ifdef INVARIANTS
2688 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2689 			    t_type, inp, stcb, net);
2690 #else
2691 			return;
2692 #endif
2693 		}
2694 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2695 		break;
2696 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2697 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2698 #ifdef INVARIANTS
2699 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2700 			    t_type, inp, stcb, net);
2701 #else
2702 			return;
2703 #endif
2704 		}
2705 		tmr = &stcb->asoc.delete_prim_timer;
2706 		break;
2707 	default:
2708 #ifdef INVARIANTS
2709 		panic("Unknown timer type %d", t_type);
2710 #else
2711 		return;
2712 #endif
2713 	}
2714 	KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type));
2715 	if ((tmr->type != SCTP_TIMER_TYPE_NONE) &&
2716 	    (tmr->type != t_type)) {
2717 		/*
2718 		 * Ok we have a timer that is under joint use. Cookie timer
2719 		 * per chance with the SEND timer. We therefore are NOT
2720 		 * running the timer that the caller wants stopped.  So just
2721 		 * return.
2722 		 */
2723 		SCTPDBG(SCTP_DEBUG_TIMER2,
2724 		    "Shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n",
2725 		    t_type, inp, stcb, net);
2726 		return;
2727 	}
2728 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2729 		stcb->asoc.num_send_timers_up--;
2730 		if (stcb->asoc.num_send_timers_up < 0) {
2731 			stcb->asoc.num_send_timers_up = 0;
2732 		}
2733 	}
2734 	tmr->self = NULL;
2735 	tmr->stopped_from = from;
2736 	if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) {
2737 		KASSERT(tmr->ep == inp,
2738 		    ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p",
2739 		    t_type, inp, tmr->ep));
2740 		KASSERT(tmr->tcb == stcb,
2741 		    ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p",
2742 		    t_type, stcb, tmr->tcb));
2743 		KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) ||
2744 		    ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)),
2745 		    ("sctp_timer_stop of type %d: net = %p, tmr->net = %p",
2746 		    t_type, net, tmr->net));
2747 		SCTPDBG(SCTP_DEBUG_TIMER2,
2748 		    "Timer type %d stopped: inp=%p, stcb=%p, net=%p.\n",
2749 		    t_type, inp, stcb, net);
2750 		tmr->ep = NULL;
2751 		tmr->tcb = NULL;
2752 		tmr->net = NULL;
2753 	} else {
2754 		SCTPDBG(SCTP_DEBUG_TIMER2,
2755 		    "Timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n",
2756 		    t_type, inp, stcb, net);
2757 	}
2758 	return;
2759 }
2760 
2761 uint32_t
2762 sctp_calculate_len(struct mbuf *m)
2763 {
2764 	uint32_t tlen = 0;
2765 	struct mbuf *at;
2766 
2767 	at = m;
2768 	while (at) {
2769 		tlen += SCTP_BUF_LEN(at);
2770 		at = SCTP_BUF_NEXT(at);
2771 	}
2772 	return (tlen);
2773 }
2774 
2775 void
2776 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2777     struct sctp_association *asoc, uint32_t mtu)
2778 {
2779 	/*
2780 	 * Reset the P-MTU size on this association, this involves changing
2781 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2782 	 * allow the DF flag to be cleared.
2783 	 */
2784 	struct sctp_tmit_chunk *chk;
2785 	unsigned int eff_mtu, ovh;
2786 
2787 	asoc->smallest_mtu = mtu;
2788 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2789 		ovh = SCTP_MIN_OVERHEAD;
2790 	} else {
2791 		ovh = SCTP_MIN_V4_OVERHEAD;
2792 	}
2793 	eff_mtu = mtu - ovh;
2794 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2795 		if (chk->send_size > eff_mtu) {
2796 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2797 		}
2798 	}
2799 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2800 		if (chk->send_size > eff_mtu) {
2801 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2802 		}
2803 	}
2804 }
2805 
2806 
2807 /*
2808  * Given an association and starting time of the current RTT period, update
2809  * RTO in number of msecs. net should point to the current network.
2810  * Return 1, if an RTO update was performed, return 0 if no update was
2811  * performed due to invalid starting point.
2812  */
2813 
2814 int
2815 sctp_calculate_rto(struct sctp_tcb *stcb,
2816     struct sctp_association *asoc,
2817     struct sctp_nets *net,
2818     struct timeval *old,
2819     int rtt_from_sack)
2820 {
2821 	struct timeval now;
2822 	uint64_t rtt_us;	/* RTT in us */
2823 	int32_t rtt;		/* RTT in ms */
2824 	uint32_t new_rto;
2825 	int first_measure = 0;
2826 
2827 	/************************/
2828 	/* 1. calculate new RTT */
2829 	/************************/
2830 	/* get the current time */
2831 	if (stcb->asoc.use_precise_time) {
2832 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2833 	} else {
2834 		(void)SCTP_GETTIME_TIMEVAL(&now);
2835 	}
2836 	if ((old->tv_sec > now.tv_sec) ||
2837 	    ((old->tv_sec == now.tv_sec) && (old->tv_sec > now.tv_sec))) {
2838 		/* The starting point is in the future. */
2839 		return (0);
2840 	}
2841 	timevalsub(&now, old);
2842 	rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec;
2843 	if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) {
2844 		/* The RTT is larger than a sane value. */
2845 		return (0);
2846 	}
2847 	/* store the current RTT in us */
2848 	net->rtt = rtt_us;
2849 	/* compute rtt in ms */
2850 	rtt = (int32_t)(net->rtt / 1000);
2851 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2852 		/*
2853 		 * Tell the CC module that a new update has just occurred
2854 		 * from a sack
2855 		 */
2856 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2857 	}
2858 	/*
2859 	 * Do we need to determine the lan? We do this only on sacks i.e.
2860 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2861 	 */
2862 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2863 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2864 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2865 			net->lan_type = SCTP_LAN_INTERNET;
2866 		} else {
2867 			net->lan_type = SCTP_LAN_LOCAL;
2868 		}
2869 	}
2870 
2871 	/***************************/
2872 	/* 2. update RTTVAR & SRTT */
2873 	/***************************/
2874 	/*-
2875 	 * Compute the scaled average lastsa and the
2876 	 * scaled variance lastsv as described in van Jacobson
2877 	 * Paper "Congestion Avoidance and Control", Annex A.
2878 	 *
2879 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2880 	 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar
2881 	 */
2882 	if (net->RTO_measured) {
2883 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2884 		net->lastsa += rtt;
2885 		if (rtt < 0) {
2886 			rtt = -rtt;
2887 		}
2888 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2889 		net->lastsv += rtt;
2890 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2891 			rto_logging(net, SCTP_LOG_RTTVAR);
2892 		}
2893 	} else {
2894 		/* First RTO measurment */
2895 		net->RTO_measured = 1;
2896 		first_measure = 1;
2897 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2898 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2899 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2900 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2901 		}
2902 	}
2903 	if (net->lastsv == 0) {
2904 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2905 	}
2906 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2907 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2908 	    (stcb->asoc.sat_network_lockout == 0)) {
2909 		stcb->asoc.sat_network = 1;
2910 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2911 		stcb->asoc.sat_network = 0;
2912 		stcb->asoc.sat_network_lockout = 1;
2913 	}
2914 	/* bound it, per C6/C7 in Section 5.3.1 */
2915 	if (new_rto < stcb->asoc.minrto) {
2916 		new_rto = stcb->asoc.minrto;
2917 	}
2918 	if (new_rto > stcb->asoc.maxrto) {
2919 		new_rto = stcb->asoc.maxrto;
2920 	}
2921 	net->RTO = new_rto;
2922 	return (1);
2923 }
2924 
2925 /*
2926  * return a pointer to a contiguous piece of data from the given mbuf chain
2927  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2928  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2929  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2930  */
2931 caddr_t
2932 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2933 {
2934 	uint32_t count;
2935 	uint8_t *ptr;
2936 
2937 	ptr = in_ptr;
2938 	if ((off < 0) || (len <= 0))
2939 		return (NULL);
2940 
2941 	/* find the desired start location */
2942 	while ((m != NULL) && (off > 0)) {
2943 		if (off < SCTP_BUF_LEN(m))
2944 			break;
2945 		off -= SCTP_BUF_LEN(m);
2946 		m = SCTP_BUF_NEXT(m);
2947 	}
2948 	if (m == NULL)
2949 		return (NULL);
2950 
2951 	/* is the current mbuf large enough (eg. contiguous)? */
2952 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2953 		return (mtod(m, caddr_t)+off);
2954 	} else {
2955 		/* else, it spans more than one mbuf, so save a temp copy... */
2956 		while ((m != NULL) && (len > 0)) {
2957 			count = min(SCTP_BUF_LEN(m) - off, len);
2958 			memcpy(ptr, mtod(m, caddr_t)+off, count);
2959 			len -= count;
2960 			ptr += count;
2961 			off = 0;
2962 			m = SCTP_BUF_NEXT(m);
2963 		}
2964 		if ((m == NULL) && (len > 0))
2965 			return (NULL);
2966 		else
2967 			return ((caddr_t)in_ptr);
2968 	}
2969 }
2970 
2971 
2972 
2973 struct sctp_paramhdr *
2974 sctp_get_next_param(struct mbuf *m,
2975     int offset,
2976     struct sctp_paramhdr *pull,
2977     int pull_limit)
2978 {
2979 	/* This just provides a typed signature to Peter's Pull routine */
2980 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2981 	    (uint8_t *)pull));
2982 }
2983 
2984 
2985 struct mbuf *
2986 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2987 {
2988 	struct mbuf *m_last;
2989 	caddr_t dp;
2990 
2991 	if (padlen > 3) {
2992 		return (NULL);
2993 	}
2994 	if (padlen <= M_TRAILINGSPACE(m)) {
2995 		/*
2996 		 * The easy way. We hope the majority of the time we hit
2997 		 * here :)
2998 		 */
2999 		m_last = m;
3000 	} else {
3001 		/* Hard way we must grow the mbuf chain */
3002 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
3003 		if (m_last == NULL) {
3004 			return (NULL);
3005 		}
3006 		SCTP_BUF_LEN(m_last) = 0;
3007 		SCTP_BUF_NEXT(m_last) = NULL;
3008 		SCTP_BUF_NEXT(m) = m_last;
3009 	}
3010 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
3011 	SCTP_BUF_LEN(m_last) += padlen;
3012 	memset(dp, 0, padlen);
3013 	return (m_last);
3014 }
3015 
3016 struct mbuf *
3017 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
3018 {
3019 	/* find the last mbuf in chain and pad it */
3020 	struct mbuf *m_at;
3021 
3022 	if (last_mbuf != NULL) {
3023 		return (sctp_add_pad_tombuf(last_mbuf, padval));
3024 	} else {
3025 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3026 			if (SCTP_BUF_NEXT(m_at) == NULL) {
3027 				return (sctp_add_pad_tombuf(m_at, padval));
3028 			}
3029 		}
3030 	}
3031 	return (NULL);
3032 }
3033 
3034 static void
3035 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
3036     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
3037 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3038     SCTP_UNUSED
3039 #endif
3040 )
3041 {
3042 	struct mbuf *m_notify;
3043 	struct sctp_assoc_change *sac;
3044 	struct sctp_queued_to_read *control;
3045 	unsigned int notif_len;
3046 	uint16_t abort_len;
3047 	unsigned int i;
3048 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3049 	struct socket *so;
3050 #endif
3051 
3052 	if (stcb == NULL) {
3053 		return;
3054 	}
3055 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
3056 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
3057 		if (abort != NULL) {
3058 			abort_len = ntohs(abort->ch.chunk_length);
3059 			/*
3060 			 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
3061 			 * contiguous.
3062 			 */
3063 			if (abort_len > SCTP_CHUNK_BUFFER_SIZE) {
3064 				abort_len = SCTP_CHUNK_BUFFER_SIZE;
3065 			}
3066 		} else {
3067 			abort_len = 0;
3068 		}
3069 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
3070 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
3071 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
3072 			notif_len += abort_len;
3073 		}
3074 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3075 		if (m_notify == NULL) {
3076 			/* Retry with smaller value. */
3077 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
3078 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3079 			if (m_notify == NULL) {
3080 				goto set_error;
3081 			}
3082 		}
3083 		SCTP_BUF_NEXT(m_notify) = NULL;
3084 		sac = mtod(m_notify, struct sctp_assoc_change *);
3085 		memset(sac, 0, notif_len);
3086 		sac->sac_type = SCTP_ASSOC_CHANGE;
3087 		sac->sac_flags = 0;
3088 		sac->sac_length = sizeof(struct sctp_assoc_change);
3089 		sac->sac_state = state;
3090 		sac->sac_error = error;
3091 		/* XXX verify these stream counts */
3092 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
3093 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
3094 		sac->sac_assoc_id = sctp_get_associd(stcb);
3095 		if (notif_len > sizeof(struct sctp_assoc_change)) {
3096 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
3097 				i = 0;
3098 				if (stcb->asoc.prsctp_supported == 1) {
3099 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
3100 				}
3101 				if (stcb->asoc.auth_supported == 1) {
3102 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
3103 				}
3104 				if (stcb->asoc.asconf_supported == 1) {
3105 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
3106 				}
3107 				if (stcb->asoc.idata_supported == 1) {
3108 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
3109 				}
3110 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
3111 				if (stcb->asoc.reconfig_supported == 1) {
3112 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
3113 				}
3114 				sac->sac_length += i;
3115 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
3116 				memcpy(sac->sac_info, abort, abort_len);
3117 				sac->sac_length += abort_len;
3118 			}
3119 		}
3120 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
3121 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3122 		    0, 0, stcb->asoc.context, 0, 0, 0,
3123 		    m_notify);
3124 		if (control != NULL) {
3125 			control->length = SCTP_BUF_LEN(m_notify);
3126 			control->spec_flags = M_NOTIFICATION;
3127 			/* not that we need this */
3128 			control->tail_mbuf = m_notify;
3129 			sctp_add_to_readq(stcb->sctp_ep, stcb,
3130 			    control,
3131 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
3132 			    so_locked);
3133 		} else {
3134 			sctp_m_freem(m_notify);
3135 		}
3136 	}
3137 	/*
3138 	 * For 1-to-1 style sockets, we send up and error when an ABORT
3139 	 * comes in.
3140 	 */
3141 set_error:
3142 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3143 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
3144 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
3145 		SOCK_LOCK(stcb->sctp_socket);
3146 		if (from_peer) {
3147 			if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
3148 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
3149 				stcb->sctp_socket->so_error = ECONNREFUSED;
3150 			} else {
3151 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
3152 				stcb->sctp_socket->so_error = ECONNRESET;
3153 			}
3154 		} else {
3155 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3156 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3157 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
3158 				stcb->sctp_socket->so_error = ETIMEDOUT;
3159 			} else {
3160 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
3161 				stcb->sctp_socket->so_error = ECONNABORTED;
3162 			}
3163 		}
3164 		SOCK_UNLOCK(stcb->sctp_socket);
3165 	}
3166 	/* Wake ANY sleepers */
3167 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3168 	so = SCTP_INP_SO(stcb->sctp_ep);
3169 	if (!so_locked) {
3170 		atomic_add_int(&stcb->asoc.refcnt, 1);
3171 		SCTP_TCB_UNLOCK(stcb);
3172 		SCTP_SOCKET_LOCK(so, 1);
3173 		SCTP_TCB_LOCK(stcb);
3174 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3175 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3176 			SCTP_SOCKET_UNLOCK(so, 1);
3177 			return;
3178 		}
3179 	}
3180 #endif
3181 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3182 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
3183 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
3184 		socantrcvmore(stcb->sctp_socket);
3185 	}
3186 	sorwakeup(stcb->sctp_socket);
3187 	sowwakeup(stcb->sctp_socket);
3188 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3189 	if (!so_locked) {
3190 		SCTP_SOCKET_UNLOCK(so, 1);
3191 	}
3192 #endif
3193 }
3194 
3195 static void
3196 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
3197     struct sockaddr *sa, uint32_t error, int so_locked
3198 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3199     SCTP_UNUSED
3200 #endif
3201 )
3202 {
3203 	struct mbuf *m_notify;
3204 	struct sctp_paddr_change *spc;
3205 	struct sctp_queued_to_read *control;
3206 
3207 	if ((stcb == NULL) ||
3208 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
3209 		/* event not enabled */
3210 		return;
3211 	}
3212 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
3213 	if (m_notify == NULL)
3214 		return;
3215 	SCTP_BUF_LEN(m_notify) = 0;
3216 	spc = mtod(m_notify, struct sctp_paddr_change *);
3217 	memset(spc, 0, sizeof(struct sctp_paddr_change));
3218 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
3219 	spc->spc_flags = 0;
3220 	spc->spc_length = sizeof(struct sctp_paddr_change);
3221 	switch (sa->sa_family) {
3222 #ifdef INET
3223 	case AF_INET:
3224 #ifdef INET6
3225 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
3226 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
3227 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
3228 		} else {
3229 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3230 		}
3231 #else
3232 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3233 #endif
3234 		break;
3235 #endif
3236 #ifdef INET6
3237 	case AF_INET6:
3238 		{
3239 			struct sockaddr_in6 *sin6;
3240 
3241 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
3242 
3243 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
3244 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
3245 				if (sin6->sin6_scope_id == 0) {
3246 					/* recover scope_id for user */
3247 					(void)sa6_recoverscope(sin6);
3248 				} else {
3249 					/* clear embedded scope_id for user */
3250 					in6_clearscope(&sin6->sin6_addr);
3251 				}
3252 			}
3253 			break;
3254 		}
3255 #endif
3256 	default:
3257 		/* TSNH */
3258 		break;
3259 	}
3260 	spc->spc_state = state;
3261 	spc->spc_error = error;
3262 	spc->spc_assoc_id = sctp_get_associd(stcb);
3263 
3264 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
3265 	SCTP_BUF_NEXT(m_notify) = NULL;
3266 
3267 	/* append to socket */
3268 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3269 	    0, 0, stcb->asoc.context, 0, 0, 0,
3270 	    m_notify);
3271 	if (control == NULL) {
3272 		/* no memory */
3273 		sctp_m_freem(m_notify);
3274 		return;
3275 	}
3276 	control->length = SCTP_BUF_LEN(m_notify);
3277 	control->spec_flags = M_NOTIFICATION;
3278 	/* not that we need this */
3279 	control->tail_mbuf = m_notify;
3280 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3281 	    control,
3282 	    &stcb->sctp_socket->so_rcv, 1,
3283 	    SCTP_READ_LOCK_NOT_HELD,
3284 	    so_locked);
3285 }
3286 
3287 
3288 static void
3289 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
3290     struct sctp_tmit_chunk *chk, int so_locked
3291 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3292     SCTP_UNUSED
3293 #endif
3294 )
3295 {
3296 	struct mbuf *m_notify;
3297 	struct sctp_send_failed *ssf;
3298 	struct sctp_send_failed_event *ssfe;
3299 	struct sctp_queued_to_read *control;
3300 	struct sctp_chunkhdr *chkhdr;
3301 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
3302 
3303 	if ((stcb == NULL) ||
3304 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3305 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3306 		/* event not enabled */
3307 		return;
3308 	}
3309 
3310 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3311 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3312 	} else {
3313 		notifhdr_len = sizeof(struct sctp_send_failed);
3314 	}
3315 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3316 	if (m_notify == NULL)
3317 		/* no space left */
3318 		return;
3319 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3320 	if (stcb->asoc.idata_supported) {
3321 		chkhdr_len = sizeof(struct sctp_idata_chunk);
3322 	} else {
3323 		chkhdr_len = sizeof(struct sctp_data_chunk);
3324 	}
3325 	/* Use some defaults in case we can't access the chunk header */
3326 	if (chk->send_size >= chkhdr_len) {
3327 		payload_len = chk->send_size - chkhdr_len;
3328 	} else {
3329 		payload_len = 0;
3330 	}
3331 	padding_len = 0;
3332 	if (chk->data != NULL) {
3333 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
3334 		if (chkhdr != NULL) {
3335 			chk_len = ntohs(chkhdr->chunk_length);
3336 			if ((chk_len >= chkhdr_len) &&
3337 			    (chk->send_size >= chk_len) &&
3338 			    (chk->send_size - chk_len < 4)) {
3339 				padding_len = chk->send_size - chk_len;
3340 				payload_len = chk->send_size - chkhdr_len - padding_len;
3341 			}
3342 		}
3343 	}
3344 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3345 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3346 		memset(ssfe, 0, notifhdr_len);
3347 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3348 		if (sent) {
3349 			ssfe->ssfe_flags = SCTP_DATA_SENT;
3350 		} else {
3351 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3352 		}
3353 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
3354 		ssfe->ssfe_error = error;
3355 		/* not exactly what the user sent in, but should be close :) */
3356 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
3357 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
3358 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
3359 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
3360 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3361 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3362 	} else {
3363 		ssf = mtod(m_notify, struct sctp_send_failed *);
3364 		memset(ssf, 0, notifhdr_len);
3365 		ssf->ssf_type = SCTP_SEND_FAILED;
3366 		if (sent) {
3367 			ssf->ssf_flags = SCTP_DATA_SENT;
3368 		} else {
3369 			ssf->ssf_flags = SCTP_DATA_UNSENT;
3370 		}
3371 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3372 		ssf->ssf_error = error;
3373 		/* not exactly what the user sent in, but should be close :) */
3374 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3375 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3376 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3377 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3378 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3379 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3380 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3381 	}
3382 	if (chk->data != NULL) {
3383 		/* Trim off the sctp chunk header (it should be there) */
3384 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3385 			m_adj(chk->data, chkhdr_len);
3386 			m_adj(chk->data, -padding_len);
3387 			sctp_mbuf_crush(chk->data);
3388 			chk->send_size -= (chkhdr_len + padding_len);
3389 		}
3390 	}
3391 	SCTP_BUF_NEXT(m_notify) = chk->data;
3392 	/* Steal off the mbuf */
3393 	chk->data = NULL;
3394 	/*
3395 	 * For this case, we check the actual socket buffer, since the assoc
3396 	 * is going away we don't want to overfill the socket buffer for a
3397 	 * non-reader
3398 	 */
3399 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3400 		sctp_m_freem(m_notify);
3401 		return;
3402 	}
3403 	/* append to socket */
3404 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3405 	    0, 0, stcb->asoc.context, 0, 0, 0,
3406 	    m_notify);
3407 	if (control == NULL) {
3408 		/* no memory */
3409 		sctp_m_freem(m_notify);
3410 		return;
3411 	}
3412 	control->length = SCTP_BUF_LEN(m_notify);
3413 	control->spec_flags = M_NOTIFICATION;
3414 	/* not that we need this */
3415 	control->tail_mbuf = m_notify;
3416 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3417 	    control,
3418 	    &stcb->sctp_socket->so_rcv, 1,
3419 	    SCTP_READ_LOCK_NOT_HELD,
3420 	    so_locked);
3421 }
3422 
3423 
3424 static void
3425 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3426     struct sctp_stream_queue_pending *sp, int so_locked
3427 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3428     SCTP_UNUSED
3429 #endif
3430 )
3431 {
3432 	struct mbuf *m_notify;
3433 	struct sctp_send_failed *ssf;
3434 	struct sctp_send_failed_event *ssfe;
3435 	struct sctp_queued_to_read *control;
3436 	int notifhdr_len;
3437 
3438 	if ((stcb == NULL) ||
3439 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3440 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3441 		/* event not enabled */
3442 		return;
3443 	}
3444 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3445 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3446 	} else {
3447 		notifhdr_len = sizeof(struct sctp_send_failed);
3448 	}
3449 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3450 	if (m_notify == NULL) {
3451 		/* no space left */
3452 		return;
3453 	}
3454 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3455 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3456 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3457 		memset(ssfe, 0, notifhdr_len);
3458 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3459 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3460 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3461 		ssfe->ssfe_error = error;
3462 		/* not exactly what the user sent in, but should be close :) */
3463 		ssfe->ssfe_info.snd_sid = sp->sid;
3464 		if (sp->some_taken) {
3465 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3466 		} else {
3467 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3468 		}
3469 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3470 		ssfe->ssfe_info.snd_context = sp->context;
3471 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3472 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3473 	} else {
3474 		ssf = mtod(m_notify, struct sctp_send_failed *);
3475 		memset(ssf, 0, notifhdr_len);
3476 		ssf->ssf_type = SCTP_SEND_FAILED;
3477 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3478 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3479 		ssf->ssf_error = error;
3480 		/* not exactly what the user sent in, but should be close :) */
3481 		ssf->ssf_info.sinfo_stream = sp->sid;
3482 		ssf->ssf_info.sinfo_ssn = 0;
3483 		if (sp->some_taken) {
3484 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3485 		} else {
3486 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3487 		}
3488 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3489 		ssf->ssf_info.sinfo_context = sp->context;
3490 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3491 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3492 	}
3493 	SCTP_BUF_NEXT(m_notify) = sp->data;
3494 
3495 	/* Steal off the mbuf */
3496 	sp->data = NULL;
3497 	/*
3498 	 * For this case, we check the actual socket buffer, since the assoc
3499 	 * is going away we don't want to overfill the socket buffer for a
3500 	 * non-reader
3501 	 */
3502 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3503 		sctp_m_freem(m_notify);
3504 		return;
3505 	}
3506 	/* append to socket */
3507 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3508 	    0, 0, stcb->asoc.context, 0, 0, 0,
3509 	    m_notify);
3510 	if (control == NULL) {
3511 		/* no memory */
3512 		sctp_m_freem(m_notify);
3513 		return;
3514 	}
3515 	control->length = SCTP_BUF_LEN(m_notify);
3516 	control->spec_flags = M_NOTIFICATION;
3517 	/* not that we need this */
3518 	control->tail_mbuf = m_notify;
3519 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3520 	    control,
3521 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3522 }
3523 
3524 
3525 
3526 static void
3527 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3528 {
3529 	struct mbuf *m_notify;
3530 	struct sctp_adaptation_event *sai;
3531 	struct sctp_queued_to_read *control;
3532 
3533 	if ((stcb == NULL) ||
3534 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3535 		/* event not enabled */
3536 		return;
3537 	}
3538 
3539 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3540 	if (m_notify == NULL)
3541 		/* no space left */
3542 		return;
3543 	SCTP_BUF_LEN(m_notify) = 0;
3544 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3545 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3546 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3547 	sai->sai_flags = 0;
3548 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3549 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3550 	sai->sai_assoc_id = sctp_get_associd(stcb);
3551 
3552 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3553 	SCTP_BUF_NEXT(m_notify) = NULL;
3554 
3555 	/* append to socket */
3556 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3557 	    0, 0, stcb->asoc.context, 0, 0, 0,
3558 	    m_notify);
3559 	if (control == NULL) {
3560 		/* no memory */
3561 		sctp_m_freem(m_notify);
3562 		return;
3563 	}
3564 	control->length = SCTP_BUF_LEN(m_notify);
3565 	control->spec_flags = M_NOTIFICATION;
3566 	/* not that we need this */
3567 	control->tail_mbuf = m_notify;
3568 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3569 	    control,
3570 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3571 }
3572 
3573 /* This always must be called with the read-queue LOCKED in the INP */
3574 static void
3575 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3576     uint32_t val, int so_locked
3577 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3578     SCTP_UNUSED
3579 #endif
3580 )
3581 {
3582 	struct mbuf *m_notify;
3583 	struct sctp_pdapi_event *pdapi;
3584 	struct sctp_queued_to_read *control;
3585 	struct sockbuf *sb;
3586 
3587 	if ((stcb == NULL) ||
3588 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3589 		/* event not enabled */
3590 		return;
3591 	}
3592 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3593 		return;
3594 	}
3595 
3596 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3597 	if (m_notify == NULL)
3598 		/* no space left */
3599 		return;
3600 	SCTP_BUF_LEN(m_notify) = 0;
3601 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3602 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3603 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3604 	pdapi->pdapi_flags = 0;
3605 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3606 	pdapi->pdapi_indication = error;
3607 	pdapi->pdapi_stream = (val >> 16);
3608 	pdapi->pdapi_seq = (val & 0x0000ffff);
3609 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3610 
3611 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3612 	SCTP_BUF_NEXT(m_notify) = NULL;
3613 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3614 	    0, 0, stcb->asoc.context, 0, 0, 0,
3615 	    m_notify);
3616 	if (control == NULL) {
3617 		/* no memory */
3618 		sctp_m_freem(m_notify);
3619 		return;
3620 	}
3621 	control->length = SCTP_BUF_LEN(m_notify);
3622 	control->spec_flags = M_NOTIFICATION;
3623 	/* not that we need this */
3624 	control->tail_mbuf = m_notify;
3625 	sb = &stcb->sctp_socket->so_rcv;
3626 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3627 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3628 	}
3629 	sctp_sballoc(stcb, sb, m_notify);
3630 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3631 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3632 	}
3633 	control->end_added = 1;
3634 	if (stcb->asoc.control_pdapi)
3635 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3636 	else {
3637 		/* we really should not see this case */
3638 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3639 	}
3640 	if (stcb->sctp_ep && stcb->sctp_socket) {
3641 		/* This should always be the case */
3642 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3643 		struct socket *so;
3644 
3645 		so = SCTP_INP_SO(stcb->sctp_ep);
3646 		if (!so_locked) {
3647 			atomic_add_int(&stcb->asoc.refcnt, 1);
3648 			SCTP_TCB_UNLOCK(stcb);
3649 			SCTP_SOCKET_LOCK(so, 1);
3650 			SCTP_TCB_LOCK(stcb);
3651 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3652 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3653 				SCTP_SOCKET_UNLOCK(so, 1);
3654 				return;
3655 			}
3656 		}
3657 #endif
3658 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3659 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3660 		if (!so_locked) {
3661 			SCTP_SOCKET_UNLOCK(so, 1);
3662 		}
3663 #endif
3664 	}
3665 }
3666 
3667 static void
3668 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3669 {
3670 	struct mbuf *m_notify;
3671 	struct sctp_shutdown_event *sse;
3672 	struct sctp_queued_to_read *control;
3673 
3674 	/*
3675 	 * For TCP model AND UDP connected sockets we will send an error up
3676 	 * when an SHUTDOWN completes
3677 	 */
3678 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3679 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3680 		/* mark socket closed for read/write and wakeup! */
3681 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3682 		struct socket *so;
3683 
3684 		so = SCTP_INP_SO(stcb->sctp_ep);
3685 		atomic_add_int(&stcb->asoc.refcnt, 1);
3686 		SCTP_TCB_UNLOCK(stcb);
3687 		SCTP_SOCKET_LOCK(so, 1);
3688 		SCTP_TCB_LOCK(stcb);
3689 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3690 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3691 			SCTP_SOCKET_UNLOCK(so, 1);
3692 			return;
3693 		}
3694 #endif
3695 		socantsendmore(stcb->sctp_socket);
3696 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3697 		SCTP_SOCKET_UNLOCK(so, 1);
3698 #endif
3699 	}
3700 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3701 		/* event not enabled */
3702 		return;
3703 	}
3704 
3705 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3706 	if (m_notify == NULL)
3707 		/* no space left */
3708 		return;
3709 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3710 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3711 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3712 	sse->sse_flags = 0;
3713 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3714 	sse->sse_assoc_id = sctp_get_associd(stcb);
3715 
3716 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3717 	SCTP_BUF_NEXT(m_notify) = NULL;
3718 
3719 	/* append to socket */
3720 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3721 	    0, 0, stcb->asoc.context, 0, 0, 0,
3722 	    m_notify);
3723 	if (control == NULL) {
3724 		/* no memory */
3725 		sctp_m_freem(m_notify);
3726 		return;
3727 	}
3728 	control->length = SCTP_BUF_LEN(m_notify);
3729 	control->spec_flags = M_NOTIFICATION;
3730 	/* not that we need this */
3731 	control->tail_mbuf = m_notify;
3732 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3733 	    control,
3734 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3735 }
3736 
3737 static void
3738 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3739     int so_locked
3740 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3741     SCTP_UNUSED
3742 #endif
3743 )
3744 {
3745 	struct mbuf *m_notify;
3746 	struct sctp_sender_dry_event *event;
3747 	struct sctp_queued_to_read *control;
3748 
3749 	if ((stcb == NULL) ||
3750 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3751 		/* event not enabled */
3752 		return;
3753 	}
3754 
3755 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3756 	if (m_notify == NULL) {
3757 		/* no space left */
3758 		return;
3759 	}
3760 	SCTP_BUF_LEN(m_notify) = 0;
3761 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3762 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3763 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3764 	event->sender_dry_flags = 0;
3765 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3766 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3767 
3768 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3769 	SCTP_BUF_NEXT(m_notify) = NULL;
3770 
3771 	/* append to socket */
3772 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3773 	    0, 0, stcb->asoc.context, 0, 0, 0,
3774 	    m_notify);
3775 	if (control == NULL) {
3776 		/* no memory */
3777 		sctp_m_freem(m_notify);
3778 		return;
3779 	}
3780 	control->length = SCTP_BUF_LEN(m_notify);
3781 	control->spec_flags = M_NOTIFICATION;
3782 	/* not that we need this */
3783 	control->tail_mbuf = m_notify;
3784 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3785 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3786 }
3787 
3788 
3789 void
3790 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3791 {
3792 	struct mbuf *m_notify;
3793 	struct sctp_queued_to_read *control;
3794 	struct sctp_stream_change_event *stradd;
3795 
3796 	if ((stcb == NULL) ||
3797 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3798 		/* event not enabled */
3799 		return;
3800 	}
3801 	if ((stcb->asoc.peer_req_out) && flag) {
3802 		/* Peer made the request, don't tell the local user */
3803 		stcb->asoc.peer_req_out = 0;
3804 		return;
3805 	}
3806 	stcb->asoc.peer_req_out = 0;
3807 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3808 	if (m_notify == NULL)
3809 		/* no space left */
3810 		return;
3811 	SCTP_BUF_LEN(m_notify) = 0;
3812 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3813 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3814 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3815 	stradd->strchange_flags = flag;
3816 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3817 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3818 	stradd->strchange_instrms = numberin;
3819 	stradd->strchange_outstrms = numberout;
3820 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3821 	SCTP_BUF_NEXT(m_notify) = NULL;
3822 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3823 		/* no space */
3824 		sctp_m_freem(m_notify);
3825 		return;
3826 	}
3827 	/* append to socket */
3828 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3829 	    0, 0, stcb->asoc.context, 0, 0, 0,
3830 	    m_notify);
3831 	if (control == NULL) {
3832 		/* no memory */
3833 		sctp_m_freem(m_notify);
3834 		return;
3835 	}
3836 	control->length = SCTP_BUF_LEN(m_notify);
3837 	control->spec_flags = M_NOTIFICATION;
3838 	/* not that we need this */
3839 	control->tail_mbuf = m_notify;
3840 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3841 	    control,
3842 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3843 }
3844 
3845 void
3846 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3847 {
3848 	struct mbuf *m_notify;
3849 	struct sctp_queued_to_read *control;
3850 	struct sctp_assoc_reset_event *strasoc;
3851 
3852 	if ((stcb == NULL) ||
3853 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3854 		/* event not enabled */
3855 		return;
3856 	}
3857 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3858 	if (m_notify == NULL)
3859 		/* no space left */
3860 		return;
3861 	SCTP_BUF_LEN(m_notify) = 0;
3862 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3863 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3864 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3865 	strasoc->assocreset_flags = flag;
3866 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3867 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3868 	strasoc->assocreset_local_tsn = sending_tsn;
3869 	strasoc->assocreset_remote_tsn = recv_tsn;
3870 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3871 	SCTP_BUF_NEXT(m_notify) = NULL;
3872 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3873 		/* no space */
3874 		sctp_m_freem(m_notify);
3875 		return;
3876 	}
3877 	/* append to socket */
3878 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3879 	    0, 0, stcb->asoc.context, 0, 0, 0,
3880 	    m_notify);
3881 	if (control == NULL) {
3882 		/* no memory */
3883 		sctp_m_freem(m_notify);
3884 		return;
3885 	}
3886 	control->length = SCTP_BUF_LEN(m_notify);
3887 	control->spec_flags = M_NOTIFICATION;
3888 	/* not that we need this */
3889 	control->tail_mbuf = m_notify;
3890 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3891 	    control,
3892 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3893 }
3894 
3895 
3896 
3897 static void
3898 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3899     int number_entries, uint16_t *list, int flag)
3900 {
3901 	struct mbuf *m_notify;
3902 	struct sctp_queued_to_read *control;
3903 	struct sctp_stream_reset_event *strreset;
3904 	int len;
3905 
3906 	if ((stcb == NULL) ||
3907 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3908 		/* event not enabled */
3909 		return;
3910 	}
3911 
3912 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3913 	if (m_notify == NULL)
3914 		/* no space left */
3915 		return;
3916 	SCTP_BUF_LEN(m_notify) = 0;
3917 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3918 	if (len > M_TRAILINGSPACE(m_notify)) {
3919 		/* never enough room */
3920 		sctp_m_freem(m_notify);
3921 		return;
3922 	}
3923 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3924 	memset(strreset, 0, len);
3925 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3926 	strreset->strreset_flags = flag;
3927 	strreset->strreset_length = len;
3928 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3929 	if (number_entries) {
3930 		int i;
3931 
3932 		for (i = 0; i < number_entries; i++) {
3933 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3934 		}
3935 	}
3936 	SCTP_BUF_LEN(m_notify) = len;
3937 	SCTP_BUF_NEXT(m_notify) = NULL;
3938 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3939 		/* no space */
3940 		sctp_m_freem(m_notify);
3941 		return;
3942 	}
3943 	/* append to socket */
3944 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3945 	    0, 0, stcb->asoc.context, 0, 0, 0,
3946 	    m_notify);
3947 	if (control == NULL) {
3948 		/* no memory */
3949 		sctp_m_freem(m_notify);
3950 		return;
3951 	}
3952 	control->length = SCTP_BUF_LEN(m_notify);
3953 	control->spec_flags = M_NOTIFICATION;
3954 	/* not that we need this */
3955 	control->tail_mbuf = m_notify;
3956 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3957 	    control,
3958 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3959 }
3960 
3961 
3962 static void
3963 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3964 {
3965 	struct mbuf *m_notify;
3966 	struct sctp_remote_error *sre;
3967 	struct sctp_queued_to_read *control;
3968 	unsigned int notif_len;
3969 	uint16_t chunk_len;
3970 
3971 	if ((stcb == NULL) ||
3972 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3973 		return;
3974 	}
3975 	if (chunk != NULL) {
3976 		chunk_len = ntohs(chunk->ch.chunk_length);
3977 		/*
3978 		 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
3979 		 * contiguous.
3980 		 */
3981 		if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) {
3982 			chunk_len = SCTP_CHUNK_BUFFER_SIZE;
3983 		}
3984 	} else {
3985 		chunk_len = 0;
3986 	}
3987 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3988 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3989 	if (m_notify == NULL) {
3990 		/* Retry with smaller value. */
3991 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3992 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3993 		if (m_notify == NULL) {
3994 			return;
3995 		}
3996 	}
3997 	SCTP_BUF_NEXT(m_notify) = NULL;
3998 	sre = mtod(m_notify, struct sctp_remote_error *);
3999 	memset(sre, 0, notif_len);
4000 	sre->sre_type = SCTP_REMOTE_ERROR;
4001 	sre->sre_flags = 0;
4002 	sre->sre_length = sizeof(struct sctp_remote_error);
4003 	sre->sre_error = error;
4004 	sre->sre_assoc_id = sctp_get_associd(stcb);
4005 	if (notif_len > sizeof(struct sctp_remote_error)) {
4006 		memcpy(sre->sre_data, chunk, chunk_len);
4007 		sre->sre_length += chunk_len;
4008 	}
4009 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
4010 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
4011 	    0, 0, stcb->asoc.context, 0, 0, 0,
4012 	    m_notify);
4013 	if (control != NULL) {
4014 		control->length = SCTP_BUF_LEN(m_notify);
4015 		control->spec_flags = M_NOTIFICATION;
4016 		/* not that we need this */
4017 		control->tail_mbuf = m_notify;
4018 		sctp_add_to_readq(stcb->sctp_ep, stcb,
4019 		    control,
4020 		    &stcb->sctp_socket->so_rcv, 1,
4021 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
4022 	} else {
4023 		sctp_m_freem(m_notify);
4024 	}
4025 }
4026 
4027 
4028 void
4029 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
4030     uint32_t error, void *data, int so_locked
4031 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4032     SCTP_UNUSED
4033 #endif
4034 )
4035 {
4036 	if ((stcb == NULL) ||
4037 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4038 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4039 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4040 		/* If the socket is gone we are out of here */
4041 		return;
4042 	}
4043 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
4044 		return;
4045 	}
4046 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4047 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4048 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
4049 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
4050 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
4051 			/* Don't report these in front states */
4052 			return;
4053 		}
4054 	}
4055 	switch (notification) {
4056 	case SCTP_NOTIFY_ASSOC_UP:
4057 		if (stcb->asoc.assoc_up_sent == 0) {
4058 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
4059 			stcb->asoc.assoc_up_sent = 1;
4060 		}
4061 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
4062 			sctp_notify_adaptation_layer(stcb);
4063 		}
4064 		if (stcb->asoc.auth_supported == 0) {
4065 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
4066 			    NULL, so_locked);
4067 		}
4068 		break;
4069 	case SCTP_NOTIFY_ASSOC_DOWN:
4070 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
4071 		break;
4072 	case SCTP_NOTIFY_INTERFACE_DOWN:
4073 		{
4074 			struct sctp_nets *net;
4075 
4076 			net = (struct sctp_nets *)data;
4077 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
4078 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4079 			break;
4080 		}
4081 	case SCTP_NOTIFY_INTERFACE_UP:
4082 		{
4083 			struct sctp_nets *net;
4084 
4085 			net = (struct sctp_nets *)data;
4086 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
4087 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4088 			break;
4089 		}
4090 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
4091 		{
4092 			struct sctp_nets *net;
4093 
4094 			net = (struct sctp_nets *)data;
4095 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
4096 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4097 			break;
4098 		}
4099 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
4100 		sctp_notify_send_failed2(stcb, error,
4101 		    (struct sctp_stream_queue_pending *)data, so_locked);
4102 		break;
4103 	case SCTP_NOTIFY_SENT_DG_FAIL:
4104 		sctp_notify_send_failed(stcb, 1, error,
4105 		    (struct sctp_tmit_chunk *)data, so_locked);
4106 		break;
4107 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
4108 		sctp_notify_send_failed(stcb, 0, error,
4109 		    (struct sctp_tmit_chunk *)data, so_locked);
4110 		break;
4111 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
4112 		{
4113 			uint32_t val;
4114 
4115 			val = *((uint32_t *)data);
4116 
4117 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
4118 			break;
4119 		}
4120 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
4121 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4122 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4123 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
4124 		} else {
4125 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
4126 		}
4127 		break;
4128 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
4129 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4130 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4131 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
4132 		} else {
4133 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
4134 		}
4135 		break;
4136 	case SCTP_NOTIFY_ASSOC_RESTART:
4137 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
4138 		if (stcb->asoc.auth_supported == 0) {
4139 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
4140 			    NULL, so_locked);
4141 		}
4142 		break;
4143 	case SCTP_NOTIFY_STR_RESET_SEND:
4144 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
4145 		break;
4146 	case SCTP_NOTIFY_STR_RESET_RECV:
4147 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
4148 		break;
4149 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
4150 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
4151 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
4152 		break;
4153 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
4154 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
4155 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
4156 		break;
4157 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
4158 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
4159 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
4160 		break;
4161 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
4162 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
4163 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
4164 		break;
4165 	case SCTP_NOTIFY_ASCONF_ADD_IP:
4166 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
4167 		    error, so_locked);
4168 		break;
4169 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
4170 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
4171 		    error, so_locked);
4172 		break;
4173 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
4174 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
4175 		    error, so_locked);
4176 		break;
4177 	case SCTP_NOTIFY_PEER_SHUTDOWN:
4178 		sctp_notify_shutdown_event(stcb);
4179 		break;
4180 	case SCTP_NOTIFY_AUTH_NEW_KEY:
4181 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
4182 		    (uint16_t)(uintptr_t)data,
4183 		    so_locked);
4184 		break;
4185 	case SCTP_NOTIFY_AUTH_FREE_KEY:
4186 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
4187 		    (uint16_t)(uintptr_t)data,
4188 		    so_locked);
4189 		break;
4190 	case SCTP_NOTIFY_NO_PEER_AUTH:
4191 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
4192 		    (uint16_t)(uintptr_t)data,
4193 		    so_locked);
4194 		break;
4195 	case SCTP_NOTIFY_SENDER_DRY:
4196 		sctp_notify_sender_dry_event(stcb, so_locked);
4197 		break;
4198 	case SCTP_NOTIFY_REMOTE_ERROR:
4199 		sctp_notify_remote_error(stcb, error, data);
4200 		break;
4201 	default:
4202 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
4203 		    __func__, notification, notification);
4204 		break;
4205 	}			/* end switch */
4206 }
4207 
4208 void
4209 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
4210 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4211     SCTP_UNUSED
4212 #endif
4213 )
4214 {
4215 	struct sctp_association *asoc;
4216 	struct sctp_stream_out *outs;
4217 	struct sctp_tmit_chunk *chk, *nchk;
4218 	struct sctp_stream_queue_pending *sp, *nsp;
4219 	int i;
4220 
4221 	if (stcb == NULL) {
4222 		return;
4223 	}
4224 	asoc = &stcb->asoc;
4225 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4226 		/* already being freed */
4227 		return;
4228 	}
4229 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4230 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4231 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
4232 		return;
4233 	}
4234 	/* now through all the gunk freeing chunks */
4235 	if (holds_lock == 0) {
4236 		SCTP_TCB_SEND_LOCK(stcb);
4237 	}
4238 	/* sent queue SHOULD be empty */
4239 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
4240 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
4241 		asoc->sent_queue_cnt--;
4242 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
4243 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
4244 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
4245 #ifdef INVARIANTS
4246 			} else {
4247 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
4248 #endif
4249 			}
4250 		}
4251 		if (chk->data != NULL) {
4252 			sctp_free_bufspace(stcb, asoc, chk, 1);
4253 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
4254 			    error, chk, so_locked);
4255 			if (chk->data) {
4256 				sctp_m_freem(chk->data);
4257 				chk->data = NULL;
4258 			}
4259 		}
4260 		sctp_free_a_chunk(stcb, chk, so_locked);
4261 		/* sa_ignore FREED_MEMORY */
4262 	}
4263 	/* pending send queue SHOULD be empty */
4264 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
4265 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
4266 		asoc->send_queue_cnt--;
4267 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
4268 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
4269 #ifdef INVARIANTS
4270 		} else {
4271 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
4272 #endif
4273 		}
4274 		if (chk->data != NULL) {
4275 			sctp_free_bufspace(stcb, asoc, chk, 1);
4276 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
4277 			    error, chk, so_locked);
4278 			if (chk->data) {
4279 				sctp_m_freem(chk->data);
4280 				chk->data = NULL;
4281 			}
4282 		}
4283 		sctp_free_a_chunk(stcb, chk, so_locked);
4284 		/* sa_ignore FREED_MEMORY */
4285 	}
4286 	for (i = 0; i < asoc->streamoutcnt; i++) {
4287 		/* For each stream */
4288 		outs = &asoc->strmout[i];
4289 		/* clean up any sends there */
4290 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
4291 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
4292 			TAILQ_REMOVE(&outs->outqueue, sp, next);
4293 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
4294 			sctp_free_spbufspace(stcb, asoc, sp);
4295 			if (sp->data) {
4296 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
4297 				    error, (void *)sp, so_locked);
4298 				if (sp->data) {
4299 					sctp_m_freem(sp->data);
4300 					sp->data = NULL;
4301 					sp->tail_mbuf = NULL;
4302 					sp->length = 0;
4303 				}
4304 			}
4305 			if (sp->net) {
4306 				sctp_free_remote_addr(sp->net);
4307 				sp->net = NULL;
4308 			}
4309 			/* Free the chunk */
4310 			sctp_free_a_strmoq(stcb, sp, so_locked);
4311 			/* sa_ignore FREED_MEMORY */
4312 		}
4313 	}
4314 
4315 	if (holds_lock == 0) {
4316 		SCTP_TCB_SEND_UNLOCK(stcb);
4317 	}
4318 }
4319 
4320 void
4321 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
4322     struct sctp_abort_chunk *abort, int so_locked
4323 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4324     SCTP_UNUSED
4325 #endif
4326 )
4327 {
4328 	if (stcb == NULL) {
4329 		return;
4330 	}
4331 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
4332 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4333 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
4334 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
4335 	}
4336 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4337 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4338 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4339 		return;
4340 	}
4341 	/* Tell them we lost the asoc */
4342 	sctp_report_all_outbound(stcb, error, 0, so_locked);
4343 	if (from_peer) {
4344 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
4345 	} else {
4346 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
4347 	}
4348 }
4349 
4350 void
4351 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4352     struct mbuf *m, int iphlen,
4353     struct sockaddr *src, struct sockaddr *dst,
4354     struct sctphdr *sh, struct mbuf *op_err,
4355     uint8_t mflowtype, uint32_t mflowid,
4356     uint32_t vrf_id, uint16_t port)
4357 {
4358 	uint32_t vtag;
4359 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4360 	struct socket *so;
4361 #endif
4362 
4363 	vtag = 0;
4364 	if (stcb != NULL) {
4365 		vtag = stcb->asoc.peer_vtag;
4366 		vrf_id = stcb->asoc.vrf_id;
4367 	}
4368 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4369 	    mflowtype, mflowid, inp->fibnum,
4370 	    vrf_id, port);
4371 	if (stcb != NULL) {
4372 		/* We have a TCB to abort, send notification too */
4373 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4374 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4375 		/* Ok, now lets free it */
4376 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4377 		so = SCTP_INP_SO(inp);
4378 		atomic_add_int(&stcb->asoc.refcnt, 1);
4379 		SCTP_TCB_UNLOCK(stcb);
4380 		SCTP_SOCKET_LOCK(so, 1);
4381 		SCTP_TCB_LOCK(stcb);
4382 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4383 #endif
4384 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4385 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4386 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4387 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4388 		}
4389 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4390 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4391 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4392 		SCTP_SOCKET_UNLOCK(so, 1);
4393 #endif
4394 	}
4395 }
4396 #ifdef SCTP_ASOCLOG_OF_TSNS
4397 void
4398 sctp_print_out_track_log(struct sctp_tcb *stcb)
4399 {
4400 #ifdef NOSIY_PRINTS
4401 	int i;
4402 
4403 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4404 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4405 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4406 		SCTP_PRINTF("None rcvd\n");
4407 		goto none_in;
4408 	}
4409 	if (stcb->asoc.tsn_in_wrapped) {
4410 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4411 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4412 			    stcb->asoc.in_tsnlog[i].tsn,
4413 			    stcb->asoc.in_tsnlog[i].strm,
4414 			    stcb->asoc.in_tsnlog[i].seq,
4415 			    stcb->asoc.in_tsnlog[i].flgs,
4416 			    stcb->asoc.in_tsnlog[i].sz);
4417 		}
4418 	}
4419 	if (stcb->asoc.tsn_in_at) {
4420 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4421 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4422 			    stcb->asoc.in_tsnlog[i].tsn,
4423 			    stcb->asoc.in_tsnlog[i].strm,
4424 			    stcb->asoc.in_tsnlog[i].seq,
4425 			    stcb->asoc.in_tsnlog[i].flgs,
4426 			    stcb->asoc.in_tsnlog[i].sz);
4427 		}
4428 	}
4429 none_in:
4430 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4431 	if ((stcb->asoc.tsn_out_at == 0) &&
4432 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4433 		SCTP_PRINTF("None sent\n");
4434 	}
4435 	if (stcb->asoc.tsn_out_wrapped) {
4436 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4437 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4438 			    stcb->asoc.out_tsnlog[i].tsn,
4439 			    stcb->asoc.out_tsnlog[i].strm,
4440 			    stcb->asoc.out_tsnlog[i].seq,
4441 			    stcb->asoc.out_tsnlog[i].flgs,
4442 			    stcb->asoc.out_tsnlog[i].sz);
4443 		}
4444 	}
4445 	if (stcb->asoc.tsn_out_at) {
4446 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4447 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4448 			    stcb->asoc.out_tsnlog[i].tsn,
4449 			    stcb->asoc.out_tsnlog[i].strm,
4450 			    stcb->asoc.out_tsnlog[i].seq,
4451 			    stcb->asoc.out_tsnlog[i].flgs,
4452 			    stcb->asoc.out_tsnlog[i].sz);
4453 		}
4454 	}
4455 #endif
4456 }
4457 #endif
4458 
4459 void
4460 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4461     struct mbuf *op_err,
4462     int so_locked
4463 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4464     SCTP_UNUSED
4465 #endif
4466 )
4467 {
4468 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4469 	struct socket *so;
4470 #endif
4471 
4472 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4473 	so = SCTP_INP_SO(inp);
4474 #endif
4475 	if (stcb == NULL) {
4476 		/* Got to have a TCB */
4477 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4478 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4479 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4480 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4481 			}
4482 		}
4483 		return;
4484 	} else {
4485 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4486 	}
4487 	/* notify the peer */
4488 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4489 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4490 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4491 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4492 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4493 	}
4494 	/* notify the ulp */
4495 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4496 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4497 	}
4498 	/* now free the asoc */
4499 #ifdef SCTP_ASOCLOG_OF_TSNS
4500 	sctp_print_out_track_log(stcb);
4501 #endif
4502 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4503 	if (!so_locked) {
4504 		atomic_add_int(&stcb->asoc.refcnt, 1);
4505 		SCTP_TCB_UNLOCK(stcb);
4506 		SCTP_SOCKET_LOCK(so, 1);
4507 		SCTP_TCB_LOCK(stcb);
4508 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4509 	}
4510 #endif
4511 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4512 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4513 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4514 	if (!so_locked) {
4515 		SCTP_SOCKET_UNLOCK(so, 1);
4516 	}
4517 #endif
4518 }
4519 
4520 void
4521 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4522     struct sockaddr *src, struct sockaddr *dst,
4523     struct sctphdr *sh, struct sctp_inpcb *inp,
4524     struct mbuf *cause,
4525     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4526     uint32_t vrf_id, uint16_t port)
4527 {
4528 	struct sctp_chunkhdr *ch, chunk_buf;
4529 	unsigned int chk_length;
4530 	int contains_init_chunk;
4531 
4532 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4533 	/* Generate a TO address for future reference */
4534 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4535 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4536 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4537 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4538 		}
4539 	}
4540 	contains_init_chunk = 0;
4541 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4542 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4543 	while (ch != NULL) {
4544 		chk_length = ntohs(ch->chunk_length);
4545 		if (chk_length < sizeof(*ch)) {
4546 			/* break to abort land */
4547 			break;
4548 		}
4549 		switch (ch->chunk_type) {
4550 		case SCTP_INIT:
4551 			contains_init_chunk = 1;
4552 			break;
4553 		case SCTP_PACKET_DROPPED:
4554 			/* we don't respond to pkt-dropped */
4555 			return;
4556 		case SCTP_ABORT_ASSOCIATION:
4557 			/* we don't respond with an ABORT to an ABORT */
4558 			return;
4559 		case SCTP_SHUTDOWN_COMPLETE:
4560 			/*
4561 			 * we ignore it since we are not waiting for it and
4562 			 * peer is gone
4563 			 */
4564 			return;
4565 		case SCTP_SHUTDOWN_ACK:
4566 			sctp_send_shutdown_complete2(src, dst, sh,
4567 			    mflowtype, mflowid, fibnum,
4568 			    vrf_id, port);
4569 			return;
4570 		default:
4571 			break;
4572 		}
4573 		offset += SCTP_SIZE32(chk_length);
4574 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4575 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4576 	}
4577 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4578 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4579 	    (contains_init_chunk == 0))) {
4580 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4581 		    mflowtype, mflowid, fibnum,
4582 		    vrf_id, port);
4583 	}
4584 }
4585 
4586 /*
4587  * check the inbound datagram to make sure there is not an abort inside it,
4588  * if there is return 1, else return 0.
4589  */
4590 int
4591 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4592 {
4593 	struct sctp_chunkhdr *ch;
4594 	struct sctp_init_chunk *init_chk, chunk_buf;
4595 	int offset;
4596 	unsigned int chk_length;
4597 
4598 	offset = iphlen + sizeof(struct sctphdr);
4599 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4600 	    (uint8_t *)&chunk_buf);
4601 	while (ch != NULL) {
4602 		chk_length = ntohs(ch->chunk_length);
4603 		if (chk_length < sizeof(*ch)) {
4604 			/* packet is probably corrupt */
4605 			break;
4606 		}
4607 		/* we seem to be ok, is it an abort? */
4608 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4609 			/* yep, tell them */
4610 			return (1);
4611 		}
4612 		if (ch->chunk_type == SCTP_INITIATION) {
4613 			/* need to update the Vtag */
4614 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4615 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4616 			if (init_chk != NULL) {
4617 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4618 			}
4619 		}
4620 		/* Nope, move to the next chunk */
4621 		offset += SCTP_SIZE32(chk_length);
4622 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4623 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4624 	}
4625 	return (0);
4626 }
4627 
4628 /*
4629  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4630  * set (i.e. it's 0) so, create this function to compare link local scopes
4631  */
4632 #ifdef INET6
4633 uint32_t
4634 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4635 {
4636 	struct sockaddr_in6 a, b;
4637 
4638 	/* save copies */
4639 	a = *addr1;
4640 	b = *addr2;
4641 
4642 	if (a.sin6_scope_id == 0)
4643 		if (sa6_recoverscope(&a)) {
4644 			/* can't get scope, so can't match */
4645 			return (0);
4646 		}
4647 	if (b.sin6_scope_id == 0)
4648 		if (sa6_recoverscope(&b)) {
4649 			/* can't get scope, so can't match */
4650 			return (0);
4651 		}
4652 	if (a.sin6_scope_id != b.sin6_scope_id)
4653 		return (0);
4654 
4655 	return (1);
4656 }
4657 
4658 /*
4659  * returns a sockaddr_in6 with embedded scope recovered and removed
4660  */
4661 struct sockaddr_in6 *
4662 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4663 {
4664 	/* check and strip embedded scope junk */
4665 	if (addr->sin6_family == AF_INET6) {
4666 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4667 			if (addr->sin6_scope_id == 0) {
4668 				*store = *addr;
4669 				if (!sa6_recoverscope(store)) {
4670 					/* use the recovered scope */
4671 					addr = store;
4672 				}
4673 			} else {
4674 				/* else, return the original "to" addr */
4675 				in6_clearscope(&addr->sin6_addr);
4676 			}
4677 		}
4678 	}
4679 	return (addr);
4680 }
4681 #endif
4682 
4683 /*
4684  * are the two addresses the same?  currently a "scopeless" check returns: 1
4685  * if same, 0 if not
4686  */
4687 int
4688 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4689 {
4690 
4691 	/* must be valid */
4692 	if (sa1 == NULL || sa2 == NULL)
4693 		return (0);
4694 
4695 	/* must be the same family */
4696 	if (sa1->sa_family != sa2->sa_family)
4697 		return (0);
4698 
4699 	switch (sa1->sa_family) {
4700 #ifdef INET6
4701 	case AF_INET6:
4702 		{
4703 			/* IPv6 addresses */
4704 			struct sockaddr_in6 *sin6_1, *sin6_2;
4705 
4706 			sin6_1 = (struct sockaddr_in6 *)sa1;
4707 			sin6_2 = (struct sockaddr_in6 *)sa2;
4708 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4709 			    sin6_2));
4710 		}
4711 #endif
4712 #ifdef INET
4713 	case AF_INET:
4714 		{
4715 			/* IPv4 addresses */
4716 			struct sockaddr_in *sin_1, *sin_2;
4717 
4718 			sin_1 = (struct sockaddr_in *)sa1;
4719 			sin_2 = (struct sockaddr_in *)sa2;
4720 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4721 		}
4722 #endif
4723 	default:
4724 		/* we don't do these... */
4725 		return (0);
4726 	}
4727 }
4728 
4729 void
4730 sctp_print_address(struct sockaddr *sa)
4731 {
4732 #ifdef INET6
4733 	char ip6buf[INET6_ADDRSTRLEN];
4734 #endif
4735 
4736 	switch (sa->sa_family) {
4737 #ifdef INET6
4738 	case AF_INET6:
4739 		{
4740 			struct sockaddr_in6 *sin6;
4741 
4742 			sin6 = (struct sockaddr_in6 *)sa;
4743 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4744 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4745 			    ntohs(sin6->sin6_port),
4746 			    sin6->sin6_scope_id);
4747 			break;
4748 		}
4749 #endif
4750 #ifdef INET
4751 	case AF_INET:
4752 		{
4753 			struct sockaddr_in *sin;
4754 			unsigned char *p;
4755 
4756 			sin = (struct sockaddr_in *)sa;
4757 			p = (unsigned char *)&sin->sin_addr;
4758 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4759 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4760 			break;
4761 		}
4762 #endif
4763 	default:
4764 		SCTP_PRINTF("?\n");
4765 		break;
4766 	}
4767 }
4768 
4769 void
4770 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4771     struct sctp_inpcb *new_inp,
4772     struct sctp_tcb *stcb,
4773     int waitflags)
4774 {
4775 	/*
4776 	 * go through our old INP and pull off any control structures that
4777 	 * belong to stcb and move then to the new inp.
4778 	 */
4779 	struct socket *old_so, *new_so;
4780 	struct sctp_queued_to_read *control, *nctl;
4781 	struct sctp_readhead tmp_queue;
4782 	struct mbuf *m;
4783 	int error = 0;
4784 
4785 	old_so = old_inp->sctp_socket;
4786 	new_so = new_inp->sctp_socket;
4787 	TAILQ_INIT(&tmp_queue);
4788 	error = sblock(&old_so->so_rcv, waitflags);
4789 	if (error) {
4790 		/*
4791 		 * Gak, can't get sblock, we have a problem. data will be
4792 		 * left stranded.. and we don't dare look at it since the
4793 		 * other thread may be reading something. Oh well, its a
4794 		 * screwed up app that does a peeloff OR a accept while
4795 		 * reading from the main socket... actually its only the
4796 		 * peeloff() case, since I think read will fail on a
4797 		 * listening socket..
4798 		 */
4799 		return;
4800 	}
4801 	/* lock the socket buffers */
4802 	SCTP_INP_READ_LOCK(old_inp);
4803 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4804 		/* Pull off all for out target stcb */
4805 		if (control->stcb == stcb) {
4806 			/* remove it we want it */
4807 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4808 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4809 			m = control->data;
4810 			while (m) {
4811 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4812 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4813 				}
4814 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4815 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4816 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4817 				}
4818 				m = SCTP_BUF_NEXT(m);
4819 			}
4820 		}
4821 	}
4822 	SCTP_INP_READ_UNLOCK(old_inp);
4823 	/* Remove the sb-lock on the old socket */
4824 
4825 	sbunlock(&old_so->so_rcv);
4826 	/* Now we move them over to the new socket buffer */
4827 	SCTP_INP_READ_LOCK(new_inp);
4828 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4829 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4830 		m = control->data;
4831 		while (m) {
4832 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4833 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4834 			}
4835 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4836 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4837 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4838 			}
4839 			m = SCTP_BUF_NEXT(m);
4840 		}
4841 	}
4842 	SCTP_INP_READ_UNLOCK(new_inp);
4843 }
4844 
4845 void
4846 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4847     struct sctp_tcb *stcb,
4848     int so_locked
4849 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4850     SCTP_UNUSED
4851 #endif
4852 )
4853 {
4854 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4855 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4856 		struct socket *so;
4857 
4858 		so = SCTP_INP_SO(inp);
4859 		if (!so_locked) {
4860 			if (stcb) {
4861 				atomic_add_int(&stcb->asoc.refcnt, 1);
4862 				SCTP_TCB_UNLOCK(stcb);
4863 			}
4864 			SCTP_SOCKET_LOCK(so, 1);
4865 			if (stcb) {
4866 				SCTP_TCB_LOCK(stcb);
4867 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4868 			}
4869 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4870 				SCTP_SOCKET_UNLOCK(so, 1);
4871 				return;
4872 			}
4873 		}
4874 #endif
4875 		sctp_sorwakeup(inp, inp->sctp_socket);
4876 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4877 		if (!so_locked) {
4878 			SCTP_SOCKET_UNLOCK(so, 1);
4879 		}
4880 #endif
4881 	}
4882 }
4883 
4884 void
4885 sctp_add_to_readq(struct sctp_inpcb *inp,
4886     struct sctp_tcb *stcb,
4887     struct sctp_queued_to_read *control,
4888     struct sockbuf *sb,
4889     int end,
4890     int inp_read_lock_held,
4891     int so_locked
4892 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4893     SCTP_UNUSED
4894 #endif
4895 )
4896 {
4897 	/*
4898 	 * Here we must place the control on the end of the socket read
4899 	 * queue AND increment sb_cc so that select will work properly on
4900 	 * read.
4901 	 */
4902 	struct mbuf *m, *prev = NULL;
4903 
4904 	if (inp == NULL) {
4905 		/* Gak, TSNH!! */
4906 #ifdef INVARIANTS
4907 		panic("Gak, inp NULL on add_to_readq");
4908 #endif
4909 		return;
4910 	}
4911 	if (inp_read_lock_held == 0)
4912 		SCTP_INP_READ_LOCK(inp);
4913 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4914 		if (!control->on_strm_q) {
4915 			sctp_free_remote_addr(control->whoFrom);
4916 			if (control->data) {
4917 				sctp_m_freem(control->data);
4918 				control->data = NULL;
4919 			}
4920 			sctp_free_a_readq(stcb, control);
4921 		}
4922 		if (inp_read_lock_held == 0)
4923 			SCTP_INP_READ_UNLOCK(inp);
4924 		return;
4925 	}
4926 	if (!(control->spec_flags & M_NOTIFICATION)) {
4927 		atomic_add_int(&inp->total_recvs, 1);
4928 		if (!control->do_not_ref_stcb) {
4929 			atomic_add_int(&stcb->total_recvs, 1);
4930 		}
4931 	}
4932 	m = control->data;
4933 	control->held_length = 0;
4934 	control->length = 0;
4935 	while (m) {
4936 		if (SCTP_BUF_LEN(m) == 0) {
4937 			/* Skip mbufs with NO length */
4938 			if (prev == NULL) {
4939 				/* First one */
4940 				control->data = sctp_m_free(m);
4941 				m = control->data;
4942 			} else {
4943 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4944 				m = SCTP_BUF_NEXT(prev);
4945 			}
4946 			if (m == NULL) {
4947 				control->tail_mbuf = prev;
4948 			}
4949 			continue;
4950 		}
4951 		prev = m;
4952 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4953 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4954 		}
4955 		sctp_sballoc(stcb, sb, m);
4956 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4957 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4958 		}
4959 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4960 		m = SCTP_BUF_NEXT(m);
4961 	}
4962 	if (prev != NULL) {
4963 		control->tail_mbuf = prev;
4964 	} else {
4965 		/* Everything got collapsed out?? */
4966 		if (!control->on_strm_q) {
4967 			sctp_free_remote_addr(control->whoFrom);
4968 			sctp_free_a_readq(stcb, control);
4969 		}
4970 		if (inp_read_lock_held == 0)
4971 			SCTP_INP_READ_UNLOCK(inp);
4972 		return;
4973 	}
4974 	if (end) {
4975 		control->end_added = 1;
4976 	}
4977 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4978 	control->on_read_q = 1;
4979 	if (inp_read_lock_held == 0)
4980 		SCTP_INP_READ_UNLOCK(inp);
4981 	if (inp && inp->sctp_socket) {
4982 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4983 	}
4984 }
4985 
4986 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4987  *************ALTERNATE ROUTING CODE
4988  */
4989 
4990 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4991  *************ALTERNATE ROUTING CODE
4992  */
4993 
4994 struct mbuf *
4995 sctp_generate_cause(uint16_t code, char *info)
4996 {
4997 	struct mbuf *m;
4998 	struct sctp_gen_error_cause *cause;
4999 	size_t info_len;
5000 	uint16_t len;
5001 
5002 	if ((code == 0) || (info == NULL)) {
5003 		return (NULL);
5004 	}
5005 	info_len = strlen(info);
5006 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
5007 		return (NULL);
5008 	}
5009 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
5010 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5011 	if (m != NULL) {
5012 		SCTP_BUF_LEN(m) = len;
5013 		cause = mtod(m, struct sctp_gen_error_cause *);
5014 		cause->code = htons(code);
5015 		cause->length = htons(len);
5016 		memcpy(cause->info, info, info_len);
5017 	}
5018 	return (m);
5019 }
5020 
5021 struct mbuf *
5022 sctp_generate_no_user_data_cause(uint32_t tsn)
5023 {
5024 	struct mbuf *m;
5025 	struct sctp_error_no_user_data *no_user_data_cause;
5026 	uint16_t len;
5027 
5028 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
5029 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5030 	if (m != NULL) {
5031 		SCTP_BUF_LEN(m) = len;
5032 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
5033 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
5034 		no_user_data_cause->cause.length = htons(len);
5035 		no_user_data_cause->tsn = htonl(tsn);
5036 	}
5037 	return (m);
5038 }
5039 
5040 #ifdef SCTP_MBCNT_LOGGING
5041 void
5042 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
5043     struct sctp_tmit_chunk *tp1, int chk_cnt)
5044 {
5045 	if (tp1->data == NULL) {
5046 		return;
5047 	}
5048 	asoc->chunks_on_out_queue -= chk_cnt;
5049 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
5050 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
5051 		    asoc->total_output_queue_size,
5052 		    tp1->book_size,
5053 		    0,
5054 		    tp1->mbcnt);
5055 	}
5056 	if (asoc->total_output_queue_size >= tp1->book_size) {
5057 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
5058 	} else {
5059 		asoc->total_output_queue_size = 0;
5060 	}
5061 
5062 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
5063 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
5064 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
5065 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
5066 		} else {
5067 			stcb->sctp_socket->so_snd.sb_cc = 0;
5068 
5069 		}
5070 	}
5071 }
5072 
5073 #endif
5074 
5075 int
5076 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
5077     uint8_t sent, int so_locked
5078 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
5079     SCTP_UNUSED
5080 #endif
5081 )
5082 {
5083 	struct sctp_stream_out *strq;
5084 	struct sctp_tmit_chunk *chk = NULL, *tp2;
5085 	struct sctp_stream_queue_pending *sp;
5086 	uint32_t mid;
5087 	uint16_t sid;
5088 	uint8_t foundeom = 0;
5089 	int ret_sz = 0;
5090 	int notdone;
5091 	int do_wakeup_routine = 0;
5092 
5093 	sid = tp1->rec.data.sid;
5094 	mid = tp1->rec.data.mid;
5095 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
5096 		stcb->asoc.abandoned_sent[0]++;
5097 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5098 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
5099 #if defined(SCTP_DETAILED_STR_STATS)
5100 		stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5101 #endif
5102 	} else {
5103 		stcb->asoc.abandoned_unsent[0]++;
5104 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5105 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
5106 #if defined(SCTP_DETAILED_STR_STATS)
5107 		stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5108 #endif
5109 	}
5110 	do {
5111 		ret_sz += tp1->book_size;
5112 		if (tp1->data != NULL) {
5113 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5114 				sctp_flight_size_decrease(tp1);
5115 				sctp_total_flight_decrease(stcb, tp1);
5116 			}
5117 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5118 			stcb->asoc.peers_rwnd += tp1->send_size;
5119 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
5120 			if (sent) {
5121 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5122 			} else {
5123 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5124 			}
5125 			if (tp1->data) {
5126 				sctp_m_freem(tp1->data);
5127 				tp1->data = NULL;
5128 			}
5129 			do_wakeup_routine = 1;
5130 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
5131 				stcb->asoc.sent_queue_cnt_removeable--;
5132 			}
5133 		}
5134 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
5135 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
5136 		    SCTP_DATA_NOT_FRAG) {
5137 			/* not frag'ed we ae done   */
5138 			notdone = 0;
5139 			foundeom = 1;
5140 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5141 			/* end of frag, we are done */
5142 			notdone = 0;
5143 			foundeom = 1;
5144 		} else {
5145 			/*
5146 			 * Its a begin or middle piece, we must mark all of
5147 			 * it
5148 			 */
5149 			notdone = 1;
5150 			tp1 = TAILQ_NEXT(tp1, sctp_next);
5151 		}
5152 	} while (tp1 && notdone);
5153 	if (foundeom == 0) {
5154 		/*
5155 		 * The multi-part message was scattered across the send and
5156 		 * sent queue.
5157 		 */
5158 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
5159 			if ((tp1->rec.data.sid != sid) ||
5160 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
5161 				break;
5162 			}
5163 			/*
5164 			 * save to chk in case we have some on stream out
5165 			 * queue. If so and we have an un-transmitted one we
5166 			 * don't have to fudge the TSN.
5167 			 */
5168 			chk = tp1;
5169 			ret_sz += tp1->book_size;
5170 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5171 			if (sent) {
5172 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5173 			} else {
5174 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5175 			}
5176 			if (tp1->data) {
5177 				sctp_m_freem(tp1->data);
5178 				tp1->data = NULL;
5179 			}
5180 			/* No flight involved here book the size to 0 */
5181 			tp1->book_size = 0;
5182 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5183 				foundeom = 1;
5184 			}
5185 			do_wakeup_routine = 1;
5186 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
5187 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
5188 			/*
5189 			 * on to the sent queue so we can wait for it to be
5190 			 * passed by.
5191 			 */
5192 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
5193 			    sctp_next);
5194 			stcb->asoc.send_queue_cnt--;
5195 			stcb->asoc.sent_queue_cnt++;
5196 		}
5197 	}
5198 	if (foundeom == 0) {
5199 		/*
5200 		 * Still no eom found. That means there is stuff left on the
5201 		 * stream out queue.. yuck.
5202 		 */
5203 		SCTP_TCB_SEND_LOCK(stcb);
5204 		strq = &stcb->asoc.strmout[sid];
5205 		sp = TAILQ_FIRST(&strq->outqueue);
5206 		if (sp != NULL) {
5207 			sp->discard_rest = 1;
5208 			/*
5209 			 * We may need to put a chunk on the queue that
5210 			 * holds the TSN that would have been sent with the
5211 			 * LAST bit.
5212 			 */
5213 			if (chk == NULL) {
5214 				/* Yep, we have to */
5215 				sctp_alloc_a_chunk(stcb, chk);
5216 				if (chk == NULL) {
5217 					/*
5218 					 * we are hosed. All we can do is
5219 					 * nothing.. which will cause an
5220 					 * abort if the peer is paying
5221 					 * attention.
5222 					 */
5223 					goto oh_well;
5224 				}
5225 				memset(chk, 0, sizeof(*chk));
5226 				chk->rec.data.rcv_flags = 0;
5227 				chk->sent = SCTP_FORWARD_TSN_SKIP;
5228 				chk->asoc = &stcb->asoc;
5229 				if (stcb->asoc.idata_supported == 0) {
5230 					if (sp->sinfo_flags & SCTP_UNORDERED) {
5231 						chk->rec.data.mid = 0;
5232 					} else {
5233 						chk->rec.data.mid = strq->next_mid_ordered;
5234 					}
5235 				} else {
5236 					if (sp->sinfo_flags & SCTP_UNORDERED) {
5237 						chk->rec.data.mid = strq->next_mid_unordered;
5238 					} else {
5239 						chk->rec.data.mid = strq->next_mid_ordered;
5240 					}
5241 				}
5242 				chk->rec.data.sid = sp->sid;
5243 				chk->rec.data.ppid = sp->ppid;
5244 				chk->rec.data.context = sp->context;
5245 				chk->flags = sp->act_flags;
5246 				chk->whoTo = NULL;
5247 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
5248 				strq->chunks_on_queues++;
5249 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
5250 				stcb->asoc.sent_queue_cnt++;
5251 				stcb->asoc.pr_sctp_cnt++;
5252 			}
5253 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
5254 			if (sp->sinfo_flags & SCTP_UNORDERED) {
5255 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
5256 			}
5257 			if (stcb->asoc.idata_supported == 0) {
5258 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
5259 					strq->next_mid_ordered++;
5260 				}
5261 			} else {
5262 				if (sp->sinfo_flags & SCTP_UNORDERED) {
5263 					strq->next_mid_unordered++;
5264 				} else {
5265 					strq->next_mid_ordered++;
5266 				}
5267 			}
5268 	oh_well:
5269 			if (sp->data) {
5270 				/*
5271 				 * Pull any data to free up the SB and allow
5272 				 * sender to "add more" while we will throw
5273 				 * away :-)
5274 				 */
5275 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
5276 				ret_sz += sp->length;
5277 				do_wakeup_routine = 1;
5278 				sp->some_taken = 1;
5279 				sctp_m_freem(sp->data);
5280 				sp->data = NULL;
5281 				sp->tail_mbuf = NULL;
5282 				sp->length = 0;
5283 			}
5284 		}
5285 		SCTP_TCB_SEND_UNLOCK(stcb);
5286 	}
5287 	if (do_wakeup_routine) {
5288 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5289 		struct socket *so;
5290 
5291 		so = SCTP_INP_SO(stcb->sctp_ep);
5292 		if (!so_locked) {
5293 			atomic_add_int(&stcb->asoc.refcnt, 1);
5294 			SCTP_TCB_UNLOCK(stcb);
5295 			SCTP_SOCKET_LOCK(so, 1);
5296 			SCTP_TCB_LOCK(stcb);
5297 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
5298 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5299 				/* assoc was freed while we were unlocked */
5300 				SCTP_SOCKET_UNLOCK(so, 1);
5301 				return (ret_sz);
5302 			}
5303 		}
5304 #endif
5305 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5306 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5307 		if (!so_locked) {
5308 			SCTP_SOCKET_UNLOCK(so, 1);
5309 		}
5310 #endif
5311 	}
5312 	return (ret_sz);
5313 }
5314 
5315 /*
5316  * checks to see if the given address, sa, is one that is currently known by
5317  * the kernel note: can't distinguish the same address on multiple interfaces
5318  * and doesn't handle multiple addresses with different zone/scope id's note:
5319  * ifa_ifwithaddr() compares the entire sockaddr struct
5320  */
5321 struct sctp_ifa *
5322 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5323     int holds_lock)
5324 {
5325 	struct sctp_laddr *laddr;
5326 
5327 	if (holds_lock == 0) {
5328 		SCTP_INP_RLOCK(inp);
5329 	}
5330 
5331 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5332 		if (laddr->ifa == NULL)
5333 			continue;
5334 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5335 			continue;
5336 #ifdef INET
5337 		if (addr->sa_family == AF_INET) {
5338 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5339 			    laddr->ifa->address.sin.sin_addr.s_addr) {
5340 				/* found him. */
5341 				if (holds_lock == 0) {
5342 					SCTP_INP_RUNLOCK(inp);
5343 				}
5344 				return (laddr->ifa);
5345 				break;
5346 			}
5347 		}
5348 #endif
5349 #ifdef INET6
5350 		if (addr->sa_family == AF_INET6) {
5351 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5352 			    &laddr->ifa->address.sin6)) {
5353 				/* found him. */
5354 				if (holds_lock == 0) {
5355 					SCTP_INP_RUNLOCK(inp);
5356 				}
5357 				return (laddr->ifa);
5358 				break;
5359 			}
5360 		}
5361 #endif
5362 	}
5363 	if (holds_lock == 0) {
5364 		SCTP_INP_RUNLOCK(inp);
5365 	}
5366 	return (NULL);
5367 }
5368 
5369 uint32_t
5370 sctp_get_ifa_hash_val(struct sockaddr *addr)
5371 {
5372 	switch (addr->sa_family) {
5373 #ifdef INET
5374 	case AF_INET:
5375 		{
5376 			struct sockaddr_in *sin;
5377 
5378 			sin = (struct sockaddr_in *)addr;
5379 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5380 		}
5381 #endif
5382 #ifdef INET6
5383 	case AF_INET6:
5384 		{
5385 			struct sockaddr_in6 *sin6;
5386 			uint32_t hash_of_addr;
5387 
5388 			sin6 = (struct sockaddr_in6 *)addr;
5389 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5390 			    sin6->sin6_addr.s6_addr32[1] +
5391 			    sin6->sin6_addr.s6_addr32[2] +
5392 			    sin6->sin6_addr.s6_addr32[3]);
5393 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5394 			return (hash_of_addr);
5395 		}
5396 #endif
5397 	default:
5398 		break;
5399 	}
5400 	return (0);
5401 }
5402 
5403 struct sctp_ifa *
5404 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5405 {
5406 	struct sctp_ifa *sctp_ifap;
5407 	struct sctp_vrf *vrf;
5408 	struct sctp_ifalist *hash_head;
5409 	uint32_t hash_of_addr;
5410 
5411 	if (holds_lock == 0)
5412 		SCTP_IPI_ADDR_RLOCK();
5413 
5414 	vrf = sctp_find_vrf(vrf_id);
5415 	if (vrf == NULL) {
5416 		if (holds_lock == 0)
5417 			SCTP_IPI_ADDR_RUNLOCK();
5418 		return (NULL);
5419 	}
5420 
5421 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5422 
5423 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5424 	if (hash_head == NULL) {
5425 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5426 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5427 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5428 		sctp_print_address(addr);
5429 		SCTP_PRINTF("No such bucket for address\n");
5430 		if (holds_lock == 0)
5431 			SCTP_IPI_ADDR_RUNLOCK();
5432 
5433 		return (NULL);
5434 	}
5435 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5436 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5437 			continue;
5438 #ifdef INET
5439 		if (addr->sa_family == AF_INET) {
5440 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5441 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5442 				/* found him. */
5443 				if (holds_lock == 0)
5444 					SCTP_IPI_ADDR_RUNLOCK();
5445 				return (sctp_ifap);
5446 				break;
5447 			}
5448 		}
5449 #endif
5450 #ifdef INET6
5451 		if (addr->sa_family == AF_INET6) {
5452 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5453 			    &sctp_ifap->address.sin6)) {
5454 				/* found him. */
5455 				if (holds_lock == 0)
5456 					SCTP_IPI_ADDR_RUNLOCK();
5457 				return (sctp_ifap);
5458 				break;
5459 			}
5460 		}
5461 #endif
5462 	}
5463 	if (holds_lock == 0)
5464 		SCTP_IPI_ADDR_RUNLOCK();
5465 	return (NULL);
5466 }
5467 
5468 static void
5469 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5470     uint32_t rwnd_req)
5471 {
5472 	/* User pulled some data, do we need a rwnd update? */
5473 	struct epoch_tracker et;
5474 	int r_unlocked = 0;
5475 	uint32_t dif, rwnd;
5476 	struct socket *so = NULL;
5477 
5478 	if (stcb == NULL)
5479 		return;
5480 
5481 	atomic_add_int(&stcb->asoc.refcnt, 1);
5482 
5483 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
5484 	    (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) {
5485 		/* Pre-check If we are freeing no update */
5486 		goto no_lock;
5487 	}
5488 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5489 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5490 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5491 		goto out;
5492 	}
5493 	so = stcb->sctp_socket;
5494 	if (so == NULL) {
5495 		goto out;
5496 	}
5497 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5498 	/* Have you have freed enough to look */
5499 	*freed_so_far = 0;
5500 	/* Yep, its worth a look and the lock overhead */
5501 
5502 	/* Figure out what the rwnd would be */
5503 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5504 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5505 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5506 	} else {
5507 		dif = 0;
5508 	}
5509 	if (dif >= rwnd_req) {
5510 		if (hold_rlock) {
5511 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5512 			r_unlocked = 1;
5513 		}
5514 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5515 			/*
5516 			 * One last check before we allow the guy possibly
5517 			 * to get in. There is a race, where the guy has not
5518 			 * reached the gate. In that case
5519 			 */
5520 			goto out;
5521 		}
5522 		SCTP_TCB_LOCK(stcb);
5523 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5524 			/* No reports here */
5525 			SCTP_TCB_UNLOCK(stcb);
5526 			goto out;
5527 		}
5528 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5529 		NET_EPOCH_ENTER(et);
5530 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5531 
5532 		sctp_chunk_output(stcb->sctp_ep, stcb,
5533 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5534 		/* make sure no timer is running */
5535 		NET_EPOCH_EXIT(et);
5536 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5537 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5538 		SCTP_TCB_UNLOCK(stcb);
5539 	} else {
5540 		/* Update how much we have pending */
5541 		stcb->freed_by_sorcv_sincelast = dif;
5542 	}
5543 out:
5544 	if (so && r_unlocked && hold_rlock) {
5545 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5546 	}
5547 
5548 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5549 no_lock:
5550 	atomic_add_int(&stcb->asoc.refcnt, -1);
5551 	return;
5552 }
5553 
5554 int
5555 sctp_sorecvmsg(struct socket *so,
5556     struct uio *uio,
5557     struct mbuf **mp,
5558     struct sockaddr *from,
5559     int fromlen,
5560     int *msg_flags,
5561     struct sctp_sndrcvinfo *sinfo,
5562     int filling_sinfo)
5563 {
5564 	/*
5565 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5566 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5567 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5568 	 * On the way out we may send out any combination of:
5569 	 * MSG_NOTIFICATION MSG_EOR
5570 	 *
5571 	 */
5572 	struct sctp_inpcb *inp = NULL;
5573 	ssize_t my_len = 0;
5574 	ssize_t cp_len = 0;
5575 	int error = 0;
5576 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5577 	struct mbuf *m = NULL;
5578 	struct sctp_tcb *stcb = NULL;
5579 	int wakeup_read_socket = 0;
5580 	int freecnt_applied = 0;
5581 	int out_flags = 0, in_flags = 0;
5582 	int block_allowed = 1;
5583 	uint32_t freed_so_far = 0;
5584 	ssize_t copied_so_far = 0;
5585 	int in_eeor_mode = 0;
5586 	int no_rcv_needed = 0;
5587 	uint32_t rwnd_req = 0;
5588 	int hold_sblock = 0;
5589 	int hold_rlock = 0;
5590 	ssize_t slen = 0;
5591 	uint32_t held_length = 0;
5592 	int sockbuf_lock = 0;
5593 
5594 	if (uio == NULL) {
5595 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5596 		return (EINVAL);
5597 	}
5598 
5599 	if (msg_flags) {
5600 		in_flags = *msg_flags;
5601 		if (in_flags & MSG_PEEK)
5602 			SCTP_STAT_INCR(sctps_read_peeks);
5603 	} else {
5604 		in_flags = 0;
5605 	}
5606 	slen = uio->uio_resid;
5607 
5608 	/* Pull in and set up our int flags */
5609 	if (in_flags & MSG_OOB) {
5610 		/* Out of band's NOT supported */
5611 		return (EOPNOTSUPP);
5612 	}
5613 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5614 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5615 		return (EINVAL);
5616 	}
5617 	if ((in_flags & (MSG_DONTWAIT
5618 	    | MSG_NBIO
5619 	    )) ||
5620 	    SCTP_SO_IS_NBIO(so)) {
5621 		block_allowed = 0;
5622 	}
5623 	/* setup the endpoint */
5624 	inp = (struct sctp_inpcb *)so->so_pcb;
5625 	if (inp == NULL) {
5626 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5627 		return (EFAULT);
5628 	}
5629 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5630 	/* Must be at least a MTU's worth */
5631 	if (rwnd_req < SCTP_MIN_RWND)
5632 		rwnd_req = SCTP_MIN_RWND;
5633 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5634 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5635 		sctp_misc_ints(SCTP_SORECV_ENTER,
5636 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5637 	}
5638 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5639 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5640 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5641 	}
5642 
5643 
5644 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5645 	if (error) {
5646 		goto release_unlocked;
5647 	}
5648 	sockbuf_lock = 1;
5649 restart:
5650 
5651 
5652 restart_nosblocks:
5653 	if (hold_sblock == 0) {
5654 		SOCKBUF_LOCK(&so->so_rcv);
5655 		hold_sblock = 1;
5656 	}
5657 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5658 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5659 		goto out;
5660 	}
5661 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5662 		if (so->so_error) {
5663 			error = so->so_error;
5664 			if ((in_flags & MSG_PEEK) == 0)
5665 				so->so_error = 0;
5666 			goto out;
5667 		} else {
5668 			if (so->so_rcv.sb_cc == 0) {
5669 				/* indicate EOF */
5670 				error = 0;
5671 				goto out;
5672 			}
5673 		}
5674 	}
5675 	if (so->so_rcv.sb_cc <= held_length) {
5676 		if (so->so_error) {
5677 			error = so->so_error;
5678 			if ((in_flags & MSG_PEEK) == 0) {
5679 				so->so_error = 0;
5680 			}
5681 			goto out;
5682 		}
5683 		if ((so->so_rcv.sb_cc == 0) &&
5684 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5685 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5686 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5687 				/*
5688 				 * For active open side clear flags for
5689 				 * re-use passive open is blocked by
5690 				 * connect.
5691 				 */
5692 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5693 					/*
5694 					 * You were aborted, passive side
5695 					 * always hits here
5696 					 */
5697 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5698 					error = ECONNRESET;
5699 				}
5700 				so->so_state &= ~(SS_ISCONNECTING |
5701 				    SS_ISDISCONNECTING |
5702 				    SS_ISCONFIRMING |
5703 				    SS_ISCONNECTED);
5704 				if (error == 0) {
5705 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5706 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5707 						error = ENOTCONN;
5708 					}
5709 				}
5710 				goto out;
5711 			}
5712 		}
5713 		if (block_allowed) {
5714 			error = sbwait(&so->so_rcv);
5715 			if (error) {
5716 				goto out;
5717 			}
5718 			held_length = 0;
5719 			goto restart_nosblocks;
5720 		} else {
5721 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5722 			error = EWOULDBLOCK;
5723 			goto out;
5724 		}
5725 	}
5726 	if (hold_sblock == 1) {
5727 		SOCKBUF_UNLOCK(&so->so_rcv);
5728 		hold_sblock = 0;
5729 	}
5730 	/* we possibly have data we can read */
5731 	/* sa_ignore FREED_MEMORY */
5732 	control = TAILQ_FIRST(&inp->read_queue);
5733 	if (control == NULL) {
5734 		/*
5735 		 * This could be happening since the appender did the
5736 		 * increment but as not yet did the tailq insert onto the
5737 		 * read_queue
5738 		 */
5739 		if (hold_rlock == 0) {
5740 			SCTP_INP_READ_LOCK(inp);
5741 		}
5742 		control = TAILQ_FIRST(&inp->read_queue);
5743 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5744 #ifdef INVARIANTS
5745 			panic("Huh, its non zero and nothing on control?");
5746 #endif
5747 			so->so_rcv.sb_cc = 0;
5748 		}
5749 		SCTP_INP_READ_UNLOCK(inp);
5750 		hold_rlock = 0;
5751 		goto restart;
5752 	}
5753 
5754 	if ((control->length == 0) &&
5755 	    (control->do_not_ref_stcb)) {
5756 		/*
5757 		 * Clean up code for freeing assoc that left behind a
5758 		 * pdapi.. maybe a peer in EEOR that just closed after
5759 		 * sending and never indicated a EOR.
5760 		 */
5761 		if (hold_rlock == 0) {
5762 			hold_rlock = 1;
5763 			SCTP_INP_READ_LOCK(inp);
5764 		}
5765 		control->held_length = 0;
5766 		if (control->data) {
5767 			/* Hmm there is data here .. fix */
5768 			struct mbuf *m_tmp;
5769 			int cnt = 0;
5770 
5771 			m_tmp = control->data;
5772 			while (m_tmp) {
5773 				cnt += SCTP_BUF_LEN(m_tmp);
5774 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5775 					control->tail_mbuf = m_tmp;
5776 					control->end_added = 1;
5777 				}
5778 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5779 			}
5780 			control->length = cnt;
5781 		} else {
5782 			/* remove it */
5783 			TAILQ_REMOVE(&inp->read_queue, control, next);
5784 			/* Add back any hiddend data */
5785 			sctp_free_remote_addr(control->whoFrom);
5786 			sctp_free_a_readq(stcb, control);
5787 		}
5788 		if (hold_rlock) {
5789 			hold_rlock = 0;
5790 			SCTP_INP_READ_UNLOCK(inp);
5791 		}
5792 		goto restart;
5793 	}
5794 	if ((control->length == 0) &&
5795 	    (control->end_added == 1)) {
5796 		/*
5797 		 * Do we also need to check for (control->pdapi_aborted ==
5798 		 * 1)?
5799 		 */
5800 		if (hold_rlock == 0) {
5801 			hold_rlock = 1;
5802 			SCTP_INP_READ_LOCK(inp);
5803 		}
5804 		TAILQ_REMOVE(&inp->read_queue, control, next);
5805 		if (control->data) {
5806 #ifdef INVARIANTS
5807 			panic("control->data not null but control->length == 0");
5808 #else
5809 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5810 			sctp_m_freem(control->data);
5811 			control->data = NULL;
5812 #endif
5813 		}
5814 		if (control->aux_data) {
5815 			sctp_m_free(control->aux_data);
5816 			control->aux_data = NULL;
5817 		}
5818 #ifdef INVARIANTS
5819 		if (control->on_strm_q) {
5820 			panic("About to free ctl:%p so:%p and its in %d",
5821 			    control, so, control->on_strm_q);
5822 		}
5823 #endif
5824 		sctp_free_remote_addr(control->whoFrom);
5825 		sctp_free_a_readq(stcb, control);
5826 		if (hold_rlock) {
5827 			hold_rlock = 0;
5828 			SCTP_INP_READ_UNLOCK(inp);
5829 		}
5830 		goto restart;
5831 	}
5832 	if (control->length == 0) {
5833 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5834 		    (filling_sinfo)) {
5835 			/* find a more suitable one then this */
5836 			ctl = TAILQ_NEXT(control, next);
5837 			while (ctl) {
5838 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5839 				    (ctl->some_taken ||
5840 				    (ctl->spec_flags & M_NOTIFICATION) ||
5841 				    ((ctl->do_not_ref_stcb == 0) &&
5842 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5843 				    ) {
5844 					/*-
5845 					 * If we have a different TCB next, and there is data
5846 					 * present. If we have already taken some (pdapi), OR we can
5847 					 * ref the tcb and no delivery as started on this stream, we
5848 					 * take it. Note we allow a notification on a different
5849 					 * assoc to be delivered..
5850 					 */
5851 					control = ctl;
5852 					goto found_one;
5853 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5854 					    (ctl->length) &&
5855 					    ((ctl->some_taken) ||
5856 					    ((ctl->do_not_ref_stcb == 0) &&
5857 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5858 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5859 					/*-
5860 					 * If we have the same tcb, and there is data present, and we
5861 					 * have the strm interleave feature present. Then if we have
5862 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5863 					 * not started a delivery for this stream, we can take it.
5864 					 * Note we do NOT allow a notificaiton on the same assoc to
5865 					 * be delivered.
5866 					 */
5867 					control = ctl;
5868 					goto found_one;
5869 				}
5870 				ctl = TAILQ_NEXT(ctl, next);
5871 			}
5872 		}
5873 		/*
5874 		 * if we reach here, not suitable replacement is available
5875 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5876 		 * into the our held count, and its time to sleep again.
5877 		 */
5878 		held_length = so->so_rcv.sb_cc;
5879 		control->held_length = so->so_rcv.sb_cc;
5880 		goto restart;
5881 	}
5882 	/* Clear the held length since there is something to read */
5883 	control->held_length = 0;
5884 found_one:
5885 	/*
5886 	 * If we reach here, control has a some data for us to read off.
5887 	 * Note that stcb COULD be NULL.
5888 	 */
5889 	if (hold_rlock == 0) {
5890 		hold_rlock = 1;
5891 		SCTP_INP_READ_LOCK(inp);
5892 	}
5893 	control->some_taken++;
5894 	stcb = control->stcb;
5895 	if (stcb) {
5896 		if ((control->do_not_ref_stcb == 0) &&
5897 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5898 			if (freecnt_applied == 0)
5899 				stcb = NULL;
5900 		} else if (control->do_not_ref_stcb == 0) {
5901 			/* you can't free it on me please */
5902 			/*
5903 			 * The lock on the socket buffer protects us so the
5904 			 * free code will stop. But since we used the
5905 			 * socketbuf lock and the sender uses the tcb_lock
5906 			 * to increment, we need to use the atomic add to
5907 			 * the refcnt
5908 			 */
5909 			if (freecnt_applied) {
5910 #ifdef INVARIANTS
5911 				panic("refcnt already incremented");
5912 #else
5913 				SCTP_PRINTF("refcnt already incremented?\n");
5914 #endif
5915 			} else {
5916 				atomic_add_int(&stcb->asoc.refcnt, 1);
5917 				freecnt_applied = 1;
5918 			}
5919 			/*
5920 			 * Setup to remember how much we have not yet told
5921 			 * the peer our rwnd has opened up. Note we grab the
5922 			 * value from the tcb from last time. Note too that
5923 			 * sack sending clears this when a sack is sent,
5924 			 * which is fine. Once we hit the rwnd_req, we then
5925 			 * will go to the sctp_user_rcvd() that will not
5926 			 * lock until it KNOWs it MUST send a WUP-SACK.
5927 			 */
5928 			freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast;
5929 			stcb->freed_by_sorcv_sincelast = 0;
5930 		}
5931 	}
5932 	if (stcb &&
5933 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5934 	    control->do_not_ref_stcb == 0) {
5935 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5936 	}
5937 
5938 	/* First lets get off the sinfo and sockaddr info */
5939 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5940 		sinfo->sinfo_stream = control->sinfo_stream;
5941 		sinfo->sinfo_ssn = (uint16_t)control->mid;
5942 		sinfo->sinfo_flags = control->sinfo_flags;
5943 		sinfo->sinfo_ppid = control->sinfo_ppid;
5944 		sinfo->sinfo_context = control->sinfo_context;
5945 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5946 		sinfo->sinfo_tsn = control->sinfo_tsn;
5947 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5948 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5949 		nxt = TAILQ_NEXT(control, next);
5950 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5951 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5952 			struct sctp_extrcvinfo *s_extra;
5953 
5954 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5955 			if ((nxt) &&
5956 			    (nxt->length)) {
5957 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5958 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5959 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5960 				}
5961 				if (nxt->spec_flags & M_NOTIFICATION) {
5962 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5963 				}
5964 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5965 				s_extra->serinfo_next_length = nxt->length;
5966 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5967 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5968 				if (nxt->tail_mbuf != NULL) {
5969 					if (nxt->end_added) {
5970 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5971 					}
5972 				}
5973 			} else {
5974 				/*
5975 				 * we explicitly 0 this, since the memcpy
5976 				 * got some other things beyond the older
5977 				 * sinfo_ that is on the control's structure
5978 				 * :-D
5979 				 */
5980 				nxt = NULL;
5981 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5982 				s_extra->serinfo_next_aid = 0;
5983 				s_extra->serinfo_next_length = 0;
5984 				s_extra->serinfo_next_ppid = 0;
5985 				s_extra->serinfo_next_stream = 0;
5986 			}
5987 		}
5988 		/*
5989 		 * update off the real current cum-ack, if we have an stcb.
5990 		 */
5991 		if ((control->do_not_ref_stcb == 0) && stcb)
5992 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5993 		/*
5994 		 * mask off the high bits, we keep the actual chunk bits in
5995 		 * there.
5996 		 */
5997 		sinfo->sinfo_flags &= 0x00ff;
5998 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5999 			sinfo->sinfo_flags |= SCTP_UNORDERED;
6000 		}
6001 	}
6002 #ifdef SCTP_ASOCLOG_OF_TSNS
6003 	{
6004 		int index, newindex;
6005 		struct sctp_pcbtsn_rlog *entry;
6006 
6007 		do {
6008 			index = inp->readlog_index;
6009 			newindex = index + 1;
6010 			if (newindex >= SCTP_READ_LOG_SIZE) {
6011 				newindex = 0;
6012 			}
6013 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
6014 		entry = &inp->readlog[index];
6015 		entry->vtag = control->sinfo_assoc_id;
6016 		entry->strm = control->sinfo_stream;
6017 		entry->seq = (uint16_t)control->mid;
6018 		entry->sz = control->length;
6019 		entry->flgs = control->sinfo_flags;
6020 	}
6021 #endif
6022 	if ((fromlen > 0) && (from != NULL)) {
6023 		union sctp_sockstore store;
6024 		size_t len;
6025 
6026 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
6027 #ifdef INET6
6028 		case AF_INET6:
6029 			len = sizeof(struct sockaddr_in6);
6030 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
6031 			store.sin6.sin6_port = control->port_from;
6032 			break;
6033 #endif
6034 #ifdef INET
6035 		case AF_INET:
6036 #ifdef INET6
6037 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
6038 				len = sizeof(struct sockaddr_in6);
6039 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
6040 				    &store.sin6);
6041 				store.sin6.sin6_port = control->port_from;
6042 			} else {
6043 				len = sizeof(struct sockaddr_in);
6044 				store.sin = control->whoFrom->ro._l_addr.sin;
6045 				store.sin.sin_port = control->port_from;
6046 			}
6047 #else
6048 			len = sizeof(struct sockaddr_in);
6049 			store.sin = control->whoFrom->ro._l_addr.sin;
6050 			store.sin.sin_port = control->port_from;
6051 #endif
6052 			break;
6053 #endif
6054 		default:
6055 			len = 0;
6056 			break;
6057 		}
6058 		memcpy(from, &store, min((size_t)fromlen, len));
6059 #ifdef INET6
6060 		{
6061 			struct sockaddr_in6 lsa6, *from6;
6062 
6063 			from6 = (struct sockaddr_in6 *)from;
6064 			sctp_recover_scope_mac(from6, (&lsa6));
6065 		}
6066 #endif
6067 	}
6068 	if (hold_rlock) {
6069 		SCTP_INP_READ_UNLOCK(inp);
6070 		hold_rlock = 0;
6071 	}
6072 	if (hold_sblock) {
6073 		SOCKBUF_UNLOCK(&so->so_rcv);
6074 		hold_sblock = 0;
6075 	}
6076 	/* now copy out what data we can */
6077 	if (mp == NULL) {
6078 		/* copy out each mbuf in the chain up to length */
6079 get_more_data:
6080 		m = control->data;
6081 		while (m) {
6082 			/* Move out all we can */
6083 			cp_len = uio->uio_resid;
6084 			my_len = SCTP_BUF_LEN(m);
6085 			if (cp_len > my_len) {
6086 				/* not enough in this buf */
6087 				cp_len = my_len;
6088 			}
6089 			if (hold_rlock) {
6090 				SCTP_INP_READ_UNLOCK(inp);
6091 				hold_rlock = 0;
6092 			}
6093 			if (cp_len > 0)
6094 				error = uiomove(mtod(m, char *), (int)cp_len, uio);
6095 			/* re-read */
6096 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
6097 				goto release;
6098 			}
6099 
6100 			if ((control->do_not_ref_stcb == 0) && stcb &&
6101 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
6102 				no_rcv_needed = 1;
6103 			}
6104 			if (error) {
6105 				/* error we are out of here */
6106 				goto release;
6107 			}
6108 			SCTP_INP_READ_LOCK(inp);
6109 			hold_rlock = 1;
6110 			if (cp_len == SCTP_BUF_LEN(m)) {
6111 				if ((SCTP_BUF_NEXT(m) == NULL) &&
6112 				    (control->end_added)) {
6113 					out_flags |= MSG_EOR;
6114 					if ((control->do_not_ref_stcb == 0) &&
6115 					    (control->stcb != NULL) &&
6116 					    ((control->spec_flags & M_NOTIFICATION) == 0))
6117 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6118 				}
6119 				if (control->spec_flags & M_NOTIFICATION) {
6120 					out_flags |= MSG_NOTIFICATION;
6121 				}
6122 				/* we ate up the mbuf */
6123 				if (in_flags & MSG_PEEK) {
6124 					/* just looking */
6125 					m = SCTP_BUF_NEXT(m);
6126 					copied_so_far += cp_len;
6127 				} else {
6128 					/* dispose of the mbuf */
6129 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6130 						sctp_sblog(&so->so_rcv,
6131 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6132 					}
6133 					sctp_sbfree(control, stcb, &so->so_rcv, m);
6134 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6135 						sctp_sblog(&so->so_rcv,
6136 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6137 					}
6138 					copied_so_far += cp_len;
6139 					freed_so_far += (uint32_t)cp_len;
6140 					freed_so_far += MSIZE;
6141 					atomic_subtract_int(&control->length, cp_len);
6142 					control->data = sctp_m_free(m);
6143 					m = control->data;
6144 					/*
6145 					 * been through it all, must hold sb
6146 					 * lock ok to null tail
6147 					 */
6148 					if (control->data == NULL) {
6149 #ifdef INVARIANTS
6150 						if ((control->end_added == 0) ||
6151 						    (TAILQ_NEXT(control, next) == NULL)) {
6152 							/*
6153 							 * If the end is not
6154 							 * added, OR the
6155 							 * next is NOT null
6156 							 * we MUST have the
6157 							 * lock.
6158 							 */
6159 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
6160 								panic("Hmm we don't own the lock?");
6161 							}
6162 						}
6163 #endif
6164 						control->tail_mbuf = NULL;
6165 #ifdef INVARIANTS
6166 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
6167 							panic("end_added, nothing left and no MSG_EOR");
6168 						}
6169 #endif
6170 					}
6171 				}
6172 			} else {
6173 				/* Do we need to trim the mbuf? */
6174 				if (control->spec_flags & M_NOTIFICATION) {
6175 					out_flags |= MSG_NOTIFICATION;
6176 				}
6177 				if ((in_flags & MSG_PEEK) == 0) {
6178 					SCTP_BUF_RESV_UF(m, cp_len);
6179 					SCTP_BUF_LEN(m) -= (int)cp_len;
6180 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6181 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len);
6182 					}
6183 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
6184 					if ((control->do_not_ref_stcb == 0) &&
6185 					    stcb) {
6186 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
6187 					}
6188 					copied_so_far += cp_len;
6189 					freed_so_far += (uint32_t)cp_len;
6190 					freed_so_far += MSIZE;
6191 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6192 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
6193 						    SCTP_LOG_SBRESULT, 0);
6194 					}
6195 					atomic_subtract_int(&control->length, cp_len);
6196 				} else {
6197 					copied_so_far += cp_len;
6198 				}
6199 			}
6200 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
6201 				break;
6202 			}
6203 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6204 			    (control->do_not_ref_stcb == 0) &&
6205 			    (freed_so_far >= rwnd_req)) {
6206 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6207 			}
6208 		}		/* end while(m) */
6209 		/*
6210 		 * At this point we have looked at it all and we either have
6211 		 * a MSG_EOR/or read all the user wants... <OR>
6212 		 * control->length == 0.
6213 		 */
6214 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
6215 			/* we are done with this control */
6216 			if (control->length == 0) {
6217 				if (control->data) {
6218 #ifdef INVARIANTS
6219 					panic("control->data not null at read eor?");
6220 #else
6221 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
6222 					sctp_m_freem(control->data);
6223 					control->data = NULL;
6224 #endif
6225 				}
6226 		done_with_control:
6227 				if (hold_rlock == 0) {
6228 					SCTP_INP_READ_LOCK(inp);
6229 					hold_rlock = 1;
6230 				}
6231 				TAILQ_REMOVE(&inp->read_queue, control, next);
6232 				/* Add back any hiddend data */
6233 				if (control->held_length) {
6234 					held_length = 0;
6235 					control->held_length = 0;
6236 					wakeup_read_socket = 1;
6237 				}
6238 				if (control->aux_data) {
6239 					sctp_m_free(control->aux_data);
6240 					control->aux_data = NULL;
6241 				}
6242 				no_rcv_needed = control->do_not_ref_stcb;
6243 				sctp_free_remote_addr(control->whoFrom);
6244 				control->data = NULL;
6245 #ifdef INVARIANTS
6246 				if (control->on_strm_q) {
6247 					panic("About to free ctl:%p so:%p and its in %d",
6248 					    control, so, control->on_strm_q);
6249 				}
6250 #endif
6251 				sctp_free_a_readq(stcb, control);
6252 				control = NULL;
6253 				if ((freed_so_far >= rwnd_req) &&
6254 				    (no_rcv_needed == 0))
6255 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6256 
6257 			} else {
6258 				/*
6259 				 * The user did not read all of this
6260 				 * message, turn off the returned MSG_EOR
6261 				 * since we are leaving more behind on the
6262 				 * control to read.
6263 				 */
6264 #ifdef INVARIANTS
6265 				if (control->end_added &&
6266 				    (control->data == NULL) &&
6267 				    (control->tail_mbuf == NULL)) {
6268 					panic("Gak, control->length is corrupt?");
6269 				}
6270 #endif
6271 				no_rcv_needed = control->do_not_ref_stcb;
6272 				out_flags &= ~MSG_EOR;
6273 			}
6274 		}
6275 		if (out_flags & MSG_EOR) {
6276 			goto release;
6277 		}
6278 		if ((uio->uio_resid == 0) ||
6279 		    ((in_eeor_mode) &&
6280 		    (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) {
6281 			goto release;
6282 		}
6283 		/*
6284 		 * If I hit here the receiver wants more and this message is
6285 		 * NOT done (pd-api). So two questions. Can we block? if not
6286 		 * we are done. Did the user NOT set MSG_WAITALL?
6287 		 */
6288 		if (block_allowed == 0) {
6289 			goto release;
6290 		}
6291 		/*
6292 		 * We need to wait for more data a few things: - We don't
6293 		 * sbunlock() so we don't get someone else reading. - We
6294 		 * must be sure to account for the case where what is added
6295 		 * is NOT to our control when we wakeup.
6296 		 */
6297 
6298 		/*
6299 		 * Do we need to tell the transport a rwnd update might be
6300 		 * needed before we go to sleep?
6301 		 */
6302 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6303 		    ((freed_so_far >= rwnd_req) &&
6304 		    (control->do_not_ref_stcb == 0) &&
6305 		    (no_rcv_needed == 0))) {
6306 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6307 		}
6308 wait_some_more:
6309 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6310 			goto release;
6311 		}
6312 
6313 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6314 			goto release;
6315 
6316 		if (hold_rlock == 1) {
6317 			SCTP_INP_READ_UNLOCK(inp);
6318 			hold_rlock = 0;
6319 		}
6320 		if (hold_sblock == 0) {
6321 			SOCKBUF_LOCK(&so->so_rcv);
6322 			hold_sblock = 1;
6323 		}
6324 		if ((copied_so_far) && (control->length == 0) &&
6325 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6326 			goto release;
6327 		}
6328 		if (so->so_rcv.sb_cc <= control->held_length) {
6329 			error = sbwait(&so->so_rcv);
6330 			if (error) {
6331 				goto release;
6332 			}
6333 			control->held_length = 0;
6334 		}
6335 		if (hold_sblock) {
6336 			SOCKBUF_UNLOCK(&so->so_rcv);
6337 			hold_sblock = 0;
6338 		}
6339 		if (control->length == 0) {
6340 			/* still nothing here */
6341 			if (control->end_added == 1) {
6342 				/* he aborted, or is done i.e.did a shutdown */
6343 				out_flags |= MSG_EOR;
6344 				if (control->pdapi_aborted) {
6345 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6346 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6347 
6348 					out_flags |= MSG_TRUNC;
6349 				} else {
6350 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6351 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6352 				}
6353 				goto done_with_control;
6354 			}
6355 			if (so->so_rcv.sb_cc > held_length) {
6356 				control->held_length = so->so_rcv.sb_cc;
6357 				held_length = 0;
6358 			}
6359 			goto wait_some_more;
6360 		} else if (control->data == NULL) {
6361 			/*
6362 			 * we must re-sync since data is probably being
6363 			 * added
6364 			 */
6365 			SCTP_INP_READ_LOCK(inp);
6366 			if ((control->length > 0) && (control->data == NULL)) {
6367 				/*
6368 				 * big trouble.. we have the lock and its
6369 				 * corrupt?
6370 				 */
6371 #ifdef INVARIANTS
6372 				panic("Impossible data==NULL length !=0");
6373 #endif
6374 				out_flags |= MSG_EOR;
6375 				out_flags |= MSG_TRUNC;
6376 				control->length = 0;
6377 				SCTP_INP_READ_UNLOCK(inp);
6378 				goto done_with_control;
6379 			}
6380 			SCTP_INP_READ_UNLOCK(inp);
6381 			/* We will fall around to get more data */
6382 		}
6383 		goto get_more_data;
6384 	} else {
6385 		/*-
6386 		 * Give caller back the mbuf chain,
6387 		 * store in uio_resid the length
6388 		 */
6389 		wakeup_read_socket = 0;
6390 		if ((control->end_added == 0) ||
6391 		    (TAILQ_NEXT(control, next) == NULL)) {
6392 			/* Need to get rlock */
6393 			if (hold_rlock == 0) {
6394 				SCTP_INP_READ_LOCK(inp);
6395 				hold_rlock = 1;
6396 			}
6397 		}
6398 		if (control->end_added) {
6399 			out_flags |= MSG_EOR;
6400 			if ((control->do_not_ref_stcb == 0) &&
6401 			    (control->stcb != NULL) &&
6402 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6403 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6404 		}
6405 		if (control->spec_flags & M_NOTIFICATION) {
6406 			out_flags |= MSG_NOTIFICATION;
6407 		}
6408 		uio->uio_resid = control->length;
6409 		*mp = control->data;
6410 		m = control->data;
6411 		while (m) {
6412 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6413 				sctp_sblog(&so->so_rcv,
6414 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6415 			}
6416 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6417 			freed_so_far += (uint32_t)SCTP_BUF_LEN(m);
6418 			freed_so_far += MSIZE;
6419 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6420 				sctp_sblog(&so->so_rcv,
6421 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6422 			}
6423 			m = SCTP_BUF_NEXT(m);
6424 		}
6425 		control->data = control->tail_mbuf = NULL;
6426 		control->length = 0;
6427 		if (out_flags & MSG_EOR) {
6428 			/* Done with this control */
6429 			goto done_with_control;
6430 		}
6431 	}
6432 release:
6433 	if (hold_rlock == 1) {
6434 		SCTP_INP_READ_UNLOCK(inp);
6435 		hold_rlock = 0;
6436 	}
6437 	if (hold_sblock == 1) {
6438 		SOCKBUF_UNLOCK(&so->so_rcv);
6439 		hold_sblock = 0;
6440 	}
6441 
6442 	sbunlock(&so->so_rcv);
6443 	sockbuf_lock = 0;
6444 
6445 release_unlocked:
6446 	if (hold_sblock) {
6447 		SOCKBUF_UNLOCK(&so->so_rcv);
6448 		hold_sblock = 0;
6449 	}
6450 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6451 		if ((freed_so_far >= rwnd_req) &&
6452 		    (control && (control->do_not_ref_stcb == 0)) &&
6453 		    (no_rcv_needed == 0))
6454 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6455 	}
6456 out:
6457 	if (msg_flags) {
6458 		*msg_flags = out_flags;
6459 	}
6460 	if (((out_flags & MSG_EOR) == 0) &&
6461 	    ((in_flags & MSG_PEEK) == 0) &&
6462 	    (sinfo) &&
6463 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6464 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6465 		struct sctp_extrcvinfo *s_extra;
6466 
6467 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6468 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6469 	}
6470 	if (hold_rlock == 1) {
6471 		SCTP_INP_READ_UNLOCK(inp);
6472 	}
6473 	if (hold_sblock) {
6474 		SOCKBUF_UNLOCK(&so->so_rcv);
6475 	}
6476 	if (sockbuf_lock) {
6477 		sbunlock(&so->so_rcv);
6478 	}
6479 
6480 	if (freecnt_applied) {
6481 		/*
6482 		 * The lock on the socket buffer protects us so the free
6483 		 * code will stop. But since we used the socketbuf lock and
6484 		 * the sender uses the tcb_lock to increment, we need to use
6485 		 * the atomic add to the refcnt.
6486 		 */
6487 		if (stcb == NULL) {
6488 #ifdef INVARIANTS
6489 			panic("stcb for refcnt has gone NULL?");
6490 			goto stage_left;
6491 #else
6492 			goto stage_left;
6493 #endif
6494 		}
6495 		/* Save the value back for next time */
6496 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6497 		atomic_add_int(&stcb->asoc.refcnt, -1);
6498 	}
6499 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6500 		if (stcb) {
6501 			sctp_misc_ints(SCTP_SORECV_DONE,
6502 			    freed_so_far,
6503 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6504 			    stcb->asoc.my_rwnd,
6505 			    so->so_rcv.sb_cc);
6506 		} else {
6507 			sctp_misc_ints(SCTP_SORECV_DONE,
6508 			    freed_so_far,
6509 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6510 			    0,
6511 			    so->so_rcv.sb_cc);
6512 		}
6513 	}
6514 stage_left:
6515 	if (wakeup_read_socket) {
6516 		sctp_sorwakeup(inp, so);
6517 	}
6518 	return (error);
6519 }
6520 
6521 
6522 #ifdef SCTP_MBUF_LOGGING
6523 struct mbuf *
6524 sctp_m_free(struct mbuf *m)
6525 {
6526 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6527 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6528 	}
6529 	return (m_free(m));
6530 }
6531 
6532 void
6533 sctp_m_freem(struct mbuf *mb)
6534 {
6535 	while (mb != NULL)
6536 		mb = sctp_m_free(mb);
6537 }
6538 
6539 #endif
6540 
6541 int
6542 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6543 {
6544 	/*
6545 	 * Given a local address. For all associations that holds the
6546 	 * address, request a peer-set-primary.
6547 	 */
6548 	struct sctp_ifa *ifa;
6549 	struct sctp_laddr *wi;
6550 
6551 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6552 	if (ifa == NULL) {
6553 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6554 		return (EADDRNOTAVAIL);
6555 	}
6556 	/*
6557 	 * Now that we have the ifa we must awaken the iterator with this
6558 	 * message.
6559 	 */
6560 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6561 	if (wi == NULL) {
6562 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6563 		return (ENOMEM);
6564 	}
6565 	/* Now incr the count and int wi structure */
6566 	SCTP_INCR_LADDR_COUNT();
6567 	memset(wi, 0, sizeof(*wi));
6568 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6569 	wi->ifa = ifa;
6570 	wi->action = SCTP_SET_PRIM_ADDR;
6571 	atomic_add_int(&ifa->refcount, 1);
6572 
6573 	/* Now add it to the work queue */
6574 	SCTP_WQ_ADDR_LOCK();
6575 	/*
6576 	 * Should this really be a tailq? As it is we will process the
6577 	 * newest first :-0
6578 	 */
6579 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6580 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6581 	    (struct sctp_inpcb *)NULL,
6582 	    (struct sctp_tcb *)NULL,
6583 	    (struct sctp_nets *)NULL);
6584 	SCTP_WQ_ADDR_UNLOCK();
6585 	return (0);
6586 }
6587 
6588 
6589 int
6590 sctp_soreceive(struct socket *so,
6591     struct sockaddr **psa,
6592     struct uio *uio,
6593     struct mbuf **mp0,
6594     struct mbuf **controlp,
6595     int *flagsp)
6596 {
6597 	int error, fromlen;
6598 	uint8_t sockbuf[256];
6599 	struct sockaddr *from;
6600 	struct sctp_extrcvinfo sinfo;
6601 	int filling_sinfo = 1;
6602 	int flags;
6603 	struct sctp_inpcb *inp;
6604 
6605 	inp = (struct sctp_inpcb *)so->so_pcb;
6606 	/* pickup the assoc we are reading from */
6607 	if (inp == NULL) {
6608 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6609 		return (EINVAL);
6610 	}
6611 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6612 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6613 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6614 	    (controlp == NULL)) {
6615 		/* user does not want the sndrcv ctl */
6616 		filling_sinfo = 0;
6617 	}
6618 	if (psa) {
6619 		from = (struct sockaddr *)sockbuf;
6620 		fromlen = sizeof(sockbuf);
6621 		from->sa_len = 0;
6622 	} else {
6623 		from = NULL;
6624 		fromlen = 0;
6625 	}
6626 
6627 	if (filling_sinfo) {
6628 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6629 	}
6630 	if (flagsp != NULL) {
6631 		flags = *flagsp;
6632 	} else {
6633 		flags = 0;
6634 	}
6635 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags,
6636 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6637 	if (flagsp != NULL) {
6638 		*flagsp = flags;
6639 	}
6640 	if (controlp != NULL) {
6641 		/* copy back the sinfo in a CMSG format */
6642 		if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) {
6643 			*controlp = sctp_build_ctl_nchunk(inp,
6644 			    (struct sctp_sndrcvinfo *)&sinfo);
6645 		} else {
6646 			*controlp = NULL;
6647 		}
6648 	}
6649 	if (psa) {
6650 		/* copy back the address info */
6651 		if (from && from->sa_len) {
6652 			*psa = sodupsockaddr(from, M_NOWAIT);
6653 		} else {
6654 			*psa = NULL;
6655 		}
6656 	}
6657 	return (error);
6658 }
6659 
6660 
6661 
6662 
6663 
6664 int
6665 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6666     int totaddr, int *error)
6667 {
6668 	int added = 0;
6669 	int i;
6670 	struct sctp_inpcb *inp;
6671 	struct sockaddr *sa;
6672 	size_t incr = 0;
6673 #ifdef INET
6674 	struct sockaddr_in *sin;
6675 #endif
6676 #ifdef INET6
6677 	struct sockaddr_in6 *sin6;
6678 #endif
6679 
6680 	sa = addr;
6681 	inp = stcb->sctp_ep;
6682 	*error = 0;
6683 	for (i = 0; i < totaddr; i++) {
6684 		switch (sa->sa_family) {
6685 #ifdef INET
6686 		case AF_INET:
6687 			incr = sizeof(struct sockaddr_in);
6688 			sin = (struct sockaddr_in *)sa;
6689 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6690 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6691 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6692 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6693 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6694 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6695 				*error = EINVAL;
6696 				goto out_now;
6697 			}
6698 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6699 			    SCTP_DONOT_SETSCOPE,
6700 			    SCTP_ADDR_IS_CONFIRMED)) {
6701 				/* assoc gone no un-lock */
6702 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6703 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6704 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6705 				*error = ENOBUFS;
6706 				goto out_now;
6707 			}
6708 			added++;
6709 			break;
6710 #endif
6711 #ifdef INET6
6712 		case AF_INET6:
6713 			incr = sizeof(struct sockaddr_in6);
6714 			sin6 = (struct sockaddr_in6 *)sa;
6715 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6716 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6717 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6718 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6719 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6720 				*error = EINVAL;
6721 				goto out_now;
6722 			}
6723 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6724 			    SCTP_DONOT_SETSCOPE,
6725 			    SCTP_ADDR_IS_CONFIRMED)) {
6726 				/* assoc gone no un-lock */
6727 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6728 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6729 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6730 				*error = ENOBUFS;
6731 				goto out_now;
6732 			}
6733 			added++;
6734 			break;
6735 #endif
6736 		default:
6737 			break;
6738 		}
6739 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6740 	}
6741 out_now:
6742 	return (added);
6743 }
6744 
6745 int
6746 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6747     unsigned int totaddr,
6748     unsigned int *num_v4, unsigned int *num_v6,
6749     unsigned int limit)
6750 {
6751 	struct sockaddr *sa;
6752 	struct sctp_tcb *stcb;
6753 	unsigned int incr, at, i;
6754 
6755 	at = 0;
6756 	sa = addr;
6757 	*num_v6 = *num_v4 = 0;
6758 	/* account and validate addresses */
6759 	if (totaddr == 0) {
6760 		return (EINVAL);
6761 	}
6762 	for (i = 0; i < totaddr; i++) {
6763 		if (at + sizeof(struct sockaddr) > limit) {
6764 			return (EINVAL);
6765 		}
6766 		switch (sa->sa_family) {
6767 #ifdef INET
6768 		case AF_INET:
6769 			incr = (unsigned int)sizeof(struct sockaddr_in);
6770 			if (sa->sa_len != incr) {
6771 				return (EINVAL);
6772 			}
6773 			(*num_v4) += 1;
6774 			break;
6775 #endif
6776 #ifdef INET6
6777 		case AF_INET6:
6778 			{
6779 				struct sockaddr_in6 *sin6;
6780 
6781 				sin6 = (struct sockaddr_in6 *)sa;
6782 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6783 					/* Must be non-mapped for connectx */
6784 					return (EINVAL);
6785 				}
6786 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6787 				if (sa->sa_len != incr) {
6788 					return (EINVAL);
6789 				}
6790 				(*num_v6) += 1;
6791 				break;
6792 			}
6793 #endif
6794 		default:
6795 			return (EINVAL);
6796 		}
6797 		if ((at + incr) > limit) {
6798 			return (EINVAL);
6799 		}
6800 		SCTP_INP_INCR_REF(inp);
6801 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6802 		if (stcb != NULL) {
6803 			SCTP_TCB_UNLOCK(stcb);
6804 			return (EALREADY);
6805 		} else {
6806 			SCTP_INP_DECR_REF(inp);
6807 		}
6808 		at += incr;
6809 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6810 	}
6811 	return (0);
6812 }
6813 
6814 /*
6815  * sctp_bindx(ADD) for one address.
6816  * assumes all arguments are valid/checked by caller.
6817  */
6818 void
6819 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6820     struct sockaddr *sa, sctp_assoc_t assoc_id,
6821     uint32_t vrf_id, int *error, void *p)
6822 {
6823 	struct sockaddr *addr_touse;
6824 #if defined(INET) && defined(INET6)
6825 	struct sockaddr_in sin;
6826 #endif
6827 
6828 	/* see if we're bound all already! */
6829 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6830 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6831 		*error = EINVAL;
6832 		return;
6833 	}
6834 	addr_touse = sa;
6835 #ifdef INET6
6836 	if (sa->sa_family == AF_INET6) {
6837 #ifdef INET
6838 		struct sockaddr_in6 *sin6;
6839 
6840 #endif
6841 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6842 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6843 			*error = EINVAL;
6844 			return;
6845 		}
6846 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6847 			/* can only bind v6 on PF_INET6 sockets */
6848 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6849 			*error = EINVAL;
6850 			return;
6851 		}
6852 #ifdef INET
6853 		sin6 = (struct sockaddr_in6 *)addr_touse;
6854 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6855 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6856 			    SCTP_IPV6_V6ONLY(inp)) {
6857 				/* can't bind v4-mapped on PF_INET sockets */
6858 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6859 				*error = EINVAL;
6860 				return;
6861 			}
6862 			in6_sin6_2_sin(&sin, sin6);
6863 			addr_touse = (struct sockaddr *)&sin;
6864 		}
6865 #endif
6866 	}
6867 #endif
6868 #ifdef INET
6869 	if (sa->sa_family == AF_INET) {
6870 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6871 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6872 			*error = EINVAL;
6873 			return;
6874 		}
6875 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6876 		    SCTP_IPV6_V6ONLY(inp)) {
6877 			/* can't bind v4 on PF_INET sockets */
6878 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6879 			*error = EINVAL;
6880 			return;
6881 		}
6882 	}
6883 #endif
6884 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6885 		if (p == NULL) {
6886 			/* Can't get proc for Net/Open BSD */
6887 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6888 			*error = EINVAL;
6889 			return;
6890 		}
6891 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6892 		return;
6893 	}
6894 	/*
6895 	 * No locks required here since bind and mgmt_ep_sa all do their own
6896 	 * locking. If we do something for the FIX: below we may need to
6897 	 * lock in that case.
6898 	 */
6899 	if (assoc_id == 0) {
6900 		/* add the address */
6901 		struct sctp_inpcb *lep;
6902 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6903 
6904 		/* validate the incoming port */
6905 		if ((lsin->sin_port != 0) &&
6906 		    (lsin->sin_port != inp->sctp_lport)) {
6907 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6908 			*error = EINVAL;
6909 			return;
6910 		} else {
6911 			/* user specified 0 port, set it to existing port */
6912 			lsin->sin_port = inp->sctp_lport;
6913 		}
6914 
6915 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6916 		if (lep != NULL) {
6917 			/*
6918 			 * We must decrement the refcount since we have the
6919 			 * ep already and are binding. No remove going on
6920 			 * here.
6921 			 */
6922 			SCTP_INP_DECR_REF(lep);
6923 		}
6924 		if (lep == inp) {
6925 			/* already bound to it.. ok */
6926 			return;
6927 		} else if (lep == NULL) {
6928 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6929 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6930 			    SCTP_ADD_IP_ADDRESS,
6931 			    vrf_id, NULL);
6932 		} else {
6933 			*error = EADDRINUSE;
6934 		}
6935 		if (*error)
6936 			return;
6937 	} else {
6938 		/*
6939 		 * FIX: decide whether we allow assoc based bindx
6940 		 */
6941 	}
6942 }
6943 
6944 /*
6945  * sctp_bindx(DELETE) for one address.
6946  * assumes all arguments are valid/checked by caller.
6947  */
6948 void
6949 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6950     struct sockaddr *sa, sctp_assoc_t assoc_id,
6951     uint32_t vrf_id, int *error)
6952 {
6953 	struct sockaddr *addr_touse;
6954 #if defined(INET) && defined(INET6)
6955 	struct sockaddr_in sin;
6956 #endif
6957 
6958 	/* see if we're bound all already! */
6959 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6960 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6961 		*error = EINVAL;
6962 		return;
6963 	}
6964 	addr_touse = sa;
6965 #ifdef INET6
6966 	if (sa->sa_family == AF_INET6) {
6967 #ifdef INET
6968 		struct sockaddr_in6 *sin6;
6969 #endif
6970 
6971 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6972 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6973 			*error = EINVAL;
6974 			return;
6975 		}
6976 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6977 			/* can only bind v6 on PF_INET6 sockets */
6978 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6979 			*error = EINVAL;
6980 			return;
6981 		}
6982 #ifdef INET
6983 		sin6 = (struct sockaddr_in6 *)addr_touse;
6984 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6985 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6986 			    SCTP_IPV6_V6ONLY(inp)) {
6987 				/* can't bind mapped-v4 on PF_INET sockets */
6988 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6989 				*error = EINVAL;
6990 				return;
6991 			}
6992 			in6_sin6_2_sin(&sin, sin6);
6993 			addr_touse = (struct sockaddr *)&sin;
6994 		}
6995 #endif
6996 	}
6997 #endif
6998 #ifdef INET
6999 	if (sa->sa_family == AF_INET) {
7000 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
7001 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7002 			*error = EINVAL;
7003 			return;
7004 		}
7005 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7006 		    SCTP_IPV6_V6ONLY(inp)) {
7007 			/* can't bind v4 on PF_INET sockets */
7008 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7009 			*error = EINVAL;
7010 			return;
7011 		}
7012 	}
7013 #endif
7014 	/*
7015 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
7016 	 * below is ever changed we may need to lock before calling
7017 	 * association level binding.
7018 	 */
7019 	if (assoc_id == 0) {
7020 		/* delete the address */
7021 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
7022 		    SCTP_DEL_IP_ADDRESS,
7023 		    vrf_id, NULL);
7024 	} else {
7025 		/*
7026 		 * FIX: decide whether we allow assoc based bindx
7027 		 */
7028 	}
7029 }
7030 
7031 /*
7032  * returns the valid local address count for an assoc, taking into account
7033  * all scoping rules
7034  */
7035 int
7036 sctp_local_addr_count(struct sctp_tcb *stcb)
7037 {
7038 	int loopback_scope;
7039 #if defined(INET)
7040 	int ipv4_local_scope, ipv4_addr_legal;
7041 #endif
7042 #if defined (INET6)
7043 	int local_scope, site_scope, ipv6_addr_legal;
7044 #endif
7045 	struct sctp_vrf *vrf;
7046 	struct sctp_ifn *sctp_ifn;
7047 	struct sctp_ifa *sctp_ifa;
7048 	int count = 0;
7049 
7050 	/* Turn on all the appropriate scopes */
7051 	loopback_scope = stcb->asoc.scope.loopback_scope;
7052 #if defined(INET)
7053 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
7054 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
7055 #endif
7056 #if defined(INET6)
7057 	local_scope = stcb->asoc.scope.local_scope;
7058 	site_scope = stcb->asoc.scope.site_scope;
7059 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
7060 #endif
7061 	SCTP_IPI_ADDR_RLOCK();
7062 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
7063 	if (vrf == NULL) {
7064 		/* no vrf, no addresses */
7065 		SCTP_IPI_ADDR_RUNLOCK();
7066 		return (0);
7067 	}
7068 
7069 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7070 		/*
7071 		 * bound all case: go through all ifns on the vrf
7072 		 */
7073 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
7074 			if ((loopback_scope == 0) &&
7075 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
7076 				continue;
7077 			}
7078 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
7079 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
7080 					continue;
7081 				switch (sctp_ifa->address.sa.sa_family) {
7082 #ifdef INET
7083 				case AF_INET:
7084 					if (ipv4_addr_legal) {
7085 						struct sockaddr_in *sin;
7086 
7087 						sin = &sctp_ifa->address.sin;
7088 						if (sin->sin_addr.s_addr == 0) {
7089 							/*
7090 							 * skip unspecified
7091 							 * addrs
7092 							 */
7093 							continue;
7094 						}
7095 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
7096 						    &sin->sin_addr) != 0) {
7097 							continue;
7098 						}
7099 						if ((ipv4_local_scope == 0) &&
7100 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
7101 							continue;
7102 						}
7103 						/* count this one */
7104 						count++;
7105 					} else {
7106 						continue;
7107 					}
7108 					break;
7109 #endif
7110 #ifdef INET6
7111 				case AF_INET6:
7112 					if (ipv6_addr_legal) {
7113 						struct sockaddr_in6 *sin6;
7114 
7115 						sin6 = &sctp_ifa->address.sin6;
7116 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
7117 							continue;
7118 						}
7119 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
7120 						    &sin6->sin6_addr) != 0) {
7121 							continue;
7122 						}
7123 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
7124 							if (local_scope == 0)
7125 								continue;
7126 							if (sin6->sin6_scope_id == 0) {
7127 								if (sa6_recoverscope(sin6) != 0)
7128 									/*
7129 									 *
7130 									 * bad
7131 									 * link
7132 									 *
7133 									 * local
7134 									 *
7135 									 * address
7136 									 */
7137 									continue;
7138 							}
7139 						}
7140 						if ((site_scope == 0) &&
7141 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
7142 							continue;
7143 						}
7144 						/* count this one */
7145 						count++;
7146 					}
7147 					break;
7148 #endif
7149 				default:
7150 					/* TSNH */
7151 					break;
7152 				}
7153 			}
7154 		}
7155 	} else {
7156 		/*
7157 		 * subset bound case
7158 		 */
7159 		struct sctp_laddr *laddr;
7160 
7161 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
7162 		    sctp_nxt_addr) {
7163 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
7164 				continue;
7165 			}
7166 			/* count this one */
7167 			count++;
7168 		}
7169 	}
7170 	SCTP_IPI_ADDR_RUNLOCK();
7171 	return (count);
7172 }
7173 
7174 #if defined(SCTP_LOCAL_TRACE_BUF)
7175 
7176 void
7177 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
7178 {
7179 	uint32_t saveindex, newindex;
7180 
7181 	do {
7182 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
7183 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7184 			newindex = 1;
7185 		} else {
7186 			newindex = saveindex + 1;
7187 		}
7188 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
7189 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7190 		saveindex = 0;
7191 	}
7192 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
7193 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
7194 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
7195 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
7196 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
7197 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
7198 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
7199 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
7200 }
7201 
7202 #endif
7203 static void
7204 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
7205     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
7206 {
7207 	struct ip *iph;
7208 #ifdef INET6
7209 	struct ip6_hdr *ip6;
7210 #endif
7211 	struct mbuf *sp, *last;
7212 	struct udphdr *uhdr;
7213 	uint16_t port;
7214 
7215 	if ((m->m_flags & M_PKTHDR) == 0) {
7216 		/* Can't handle one that is not a pkt hdr */
7217 		goto out;
7218 	}
7219 	/* Pull the src port */
7220 	iph = mtod(m, struct ip *);
7221 	uhdr = (struct udphdr *)((caddr_t)iph + off);
7222 	port = uhdr->uh_sport;
7223 	/*
7224 	 * Split out the mbuf chain. Leave the IP header in m, place the
7225 	 * rest in the sp.
7226 	 */
7227 	sp = m_split(m, off, M_NOWAIT);
7228 	if (sp == NULL) {
7229 		/* Gak, drop packet, we can't do a split */
7230 		goto out;
7231 	}
7232 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
7233 		/* Gak, packet can't have an SCTP header in it - too small */
7234 		m_freem(sp);
7235 		goto out;
7236 	}
7237 	/* Now pull up the UDP header and SCTP header together */
7238 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
7239 	if (sp == NULL) {
7240 		/* Gak pullup failed */
7241 		goto out;
7242 	}
7243 	/* Trim out the UDP header */
7244 	m_adj(sp, sizeof(struct udphdr));
7245 
7246 	/* Now reconstruct the mbuf chain */
7247 	for (last = m; last->m_next; last = last->m_next);
7248 	last->m_next = sp;
7249 	m->m_pkthdr.len += sp->m_pkthdr.len;
7250 	/*
7251 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
7252 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
7253 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
7254 	 * SCTP checksum. Therefore, clear the bit.
7255 	 */
7256 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
7257 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
7258 	    m->m_pkthdr.len,
7259 	    if_name(m->m_pkthdr.rcvif),
7260 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
7261 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
7262 	iph = mtod(m, struct ip *);
7263 	switch (iph->ip_v) {
7264 #ifdef INET
7265 	case IPVERSION:
7266 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
7267 		sctp_input_with_port(m, off, port);
7268 		break;
7269 #endif
7270 #ifdef INET6
7271 	case IPV6_VERSION >> 4:
7272 		ip6 = mtod(m, struct ip6_hdr *);
7273 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
7274 		sctp6_input_with_port(&m, &off, port);
7275 		break;
7276 #endif
7277 	default:
7278 		goto out;
7279 		break;
7280 	}
7281 	return;
7282 out:
7283 	m_freem(m);
7284 }
7285 
7286 #ifdef INET
7287 static void
7288 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
7289 {
7290 	struct ip *outer_ip, *inner_ip;
7291 	struct sctphdr *sh;
7292 	struct icmp *icmp;
7293 	struct udphdr *udp;
7294 	struct sctp_inpcb *inp;
7295 	struct sctp_tcb *stcb;
7296 	struct sctp_nets *net;
7297 	struct sctp_init_chunk *ch;
7298 	struct sockaddr_in src, dst;
7299 	uint8_t type, code;
7300 
7301 	inner_ip = (struct ip *)vip;
7302 	icmp = (struct icmp *)((caddr_t)inner_ip -
7303 	    (sizeof(struct icmp) - sizeof(struct ip)));
7304 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
7305 	if (ntohs(outer_ip->ip_len) <
7306 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
7307 		return;
7308 	}
7309 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
7310 	sh = (struct sctphdr *)(udp + 1);
7311 	memset(&src, 0, sizeof(struct sockaddr_in));
7312 	src.sin_family = AF_INET;
7313 	src.sin_len = sizeof(struct sockaddr_in);
7314 	src.sin_port = sh->src_port;
7315 	src.sin_addr = inner_ip->ip_src;
7316 	memset(&dst, 0, sizeof(struct sockaddr_in));
7317 	dst.sin_family = AF_INET;
7318 	dst.sin_len = sizeof(struct sockaddr_in);
7319 	dst.sin_port = sh->dest_port;
7320 	dst.sin_addr = inner_ip->ip_dst;
7321 	/*
7322 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
7323 	 * holds our local endpoint address. Thus we reverse the dst and the
7324 	 * src in the lookup.
7325 	 */
7326 	inp = NULL;
7327 	net = NULL;
7328 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7329 	    (struct sockaddr *)&src,
7330 	    &inp, &net, 1,
7331 	    SCTP_DEFAULT_VRFID);
7332 	if ((stcb != NULL) &&
7333 	    (net != NULL) &&
7334 	    (inp != NULL)) {
7335 		/* Check the UDP port numbers */
7336 		if ((udp->uh_dport != net->port) ||
7337 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7338 			SCTP_TCB_UNLOCK(stcb);
7339 			return;
7340 		}
7341 		/* Check the verification tag */
7342 		if (ntohl(sh->v_tag) != 0) {
7343 			/*
7344 			 * This must be the verification tag used for
7345 			 * sending out packets. We don't consider packets
7346 			 * reflecting the verification tag.
7347 			 */
7348 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
7349 				SCTP_TCB_UNLOCK(stcb);
7350 				return;
7351 			}
7352 		} else {
7353 			if (ntohs(outer_ip->ip_len) >=
7354 			    sizeof(struct ip) +
7355 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
7356 				/*
7357 				 * In this case we can check if we got an
7358 				 * INIT chunk and if the initiate tag
7359 				 * matches.
7360 				 */
7361 				ch = (struct sctp_init_chunk *)(sh + 1);
7362 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
7363 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
7364 					SCTP_TCB_UNLOCK(stcb);
7365 					return;
7366 				}
7367 			} else {
7368 				SCTP_TCB_UNLOCK(stcb);
7369 				return;
7370 			}
7371 		}
7372 		type = icmp->icmp_type;
7373 		code = icmp->icmp_code;
7374 		if ((type == ICMP_UNREACH) &&
7375 		    (code == ICMP_UNREACH_PORT)) {
7376 			code = ICMP_UNREACH_PROTOCOL;
7377 		}
7378 		sctp_notify(inp, stcb, net, type, code,
7379 		    ntohs(inner_ip->ip_len),
7380 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
7381 	} else {
7382 		if ((stcb == NULL) && (inp != NULL)) {
7383 			/* reduce ref-count */
7384 			SCTP_INP_WLOCK(inp);
7385 			SCTP_INP_DECR_REF(inp);
7386 			SCTP_INP_WUNLOCK(inp);
7387 		}
7388 		if (stcb) {
7389 			SCTP_TCB_UNLOCK(stcb);
7390 		}
7391 	}
7392 	return;
7393 }
7394 #endif
7395 
7396 #ifdef INET6
7397 static void
7398 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7399 {
7400 	struct ip6ctlparam *ip6cp;
7401 	struct sctp_inpcb *inp;
7402 	struct sctp_tcb *stcb;
7403 	struct sctp_nets *net;
7404 	struct sctphdr sh;
7405 	struct udphdr udp;
7406 	struct sockaddr_in6 src, dst;
7407 	uint8_t type, code;
7408 
7409 	ip6cp = (struct ip6ctlparam *)d;
7410 	/*
7411 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7412 	 */
7413 	if (ip6cp->ip6c_m == NULL) {
7414 		return;
7415 	}
7416 	/*
7417 	 * Check if we can safely examine the ports and the verification tag
7418 	 * of the SCTP common header.
7419 	 */
7420 	if (ip6cp->ip6c_m->m_pkthdr.len <
7421 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7422 		return;
7423 	}
7424 	/* Copy out the UDP header. */
7425 	memset(&udp, 0, sizeof(struct udphdr));
7426 	m_copydata(ip6cp->ip6c_m,
7427 	    ip6cp->ip6c_off,
7428 	    sizeof(struct udphdr),
7429 	    (caddr_t)&udp);
7430 	/* Copy out the port numbers and the verification tag. */
7431 	memset(&sh, 0, sizeof(struct sctphdr));
7432 	m_copydata(ip6cp->ip6c_m,
7433 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7434 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7435 	    (caddr_t)&sh);
7436 	memset(&src, 0, sizeof(struct sockaddr_in6));
7437 	src.sin6_family = AF_INET6;
7438 	src.sin6_len = sizeof(struct sockaddr_in6);
7439 	src.sin6_port = sh.src_port;
7440 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7441 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7442 		return;
7443 	}
7444 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7445 	dst.sin6_family = AF_INET6;
7446 	dst.sin6_len = sizeof(struct sockaddr_in6);
7447 	dst.sin6_port = sh.dest_port;
7448 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7449 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7450 		return;
7451 	}
7452 	inp = NULL;
7453 	net = NULL;
7454 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7455 	    (struct sockaddr *)&src,
7456 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7457 	if ((stcb != NULL) &&
7458 	    (net != NULL) &&
7459 	    (inp != NULL)) {
7460 		/* Check the UDP port numbers */
7461 		if ((udp.uh_dport != net->port) ||
7462 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7463 			SCTP_TCB_UNLOCK(stcb);
7464 			return;
7465 		}
7466 		/* Check the verification tag */
7467 		if (ntohl(sh.v_tag) != 0) {
7468 			/*
7469 			 * This must be the verification tag used for
7470 			 * sending out packets. We don't consider packets
7471 			 * reflecting the verification tag.
7472 			 */
7473 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7474 				SCTP_TCB_UNLOCK(stcb);
7475 				return;
7476 			}
7477 		} else {
7478 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7479 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7480 			    sizeof(struct sctphdr) +
7481 			    sizeof(struct sctp_chunkhdr) +
7482 			    offsetof(struct sctp_init, a_rwnd)) {
7483 				/*
7484 				 * In this case we can check if we got an
7485 				 * INIT chunk and if the initiate tag
7486 				 * matches.
7487 				 */
7488 				uint32_t initiate_tag;
7489 				uint8_t chunk_type;
7490 
7491 				m_copydata(ip6cp->ip6c_m,
7492 				    ip6cp->ip6c_off +
7493 				    sizeof(struct udphdr) +
7494 				    sizeof(struct sctphdr),
7495 				    sizeof(uint8_t),
7496 				    (caddr_t)&chunk_type);
7497 				m_copydata(ip6cp->ip6c_m,
7498 				    ip6cp->ip6c_off +
7499 				    sizeof(struct udphdr) +
7500 				    sizeof(struct sctphdr) +
7501 				    sizeof(struct sctp_chunkhdr),
7502 				    sizeof(uint32_t),
7503 				    (caddr_t)&initiate_tag);
7504 				if ((chunk_type != SCTP_INITIATION) ||
7505 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7506 					SCTP_TCB_UNLOCK(stcb);
7507 					return;
7508 				}
7509 			} else {
7510 				SCTP_TCB_UNLOCK(stcb);
7511 				return;
7512 			}
7513 		}
7514 		type = ip6cp->ip6c_icmp6->icmp6_type;
7515 		code = ip6cp->ip6c_icmp6->icmp6_code;
7516 		if ((type == ICMP6_DST_UNREACH) &&
7517 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7518 			type = ICMP6_PARAM_PROB;
7519 			code = ICMP6_PARAMPROB_NEXTHEADER;
7520 		}
7521 		sctp6_notify(inp, stcb, net, type, code,
7522 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7523 	} else {
7524 		if ((stcb == NULL) && (inp != NULL)) {
7525 			/* reduce inp's ref-count */
7526 			SCTP_INP_WLOCK(inp);
7527 			SCTP_INP_DECR_REF(inp);
7528 			SCTP_INP_WUNLOCK(inp);
7529 		}
7530 		if (stcb) {
7531 			SCTP_TCB_UNLOCK(stcb);
7532 		}
7533 	}
7534 }
7535 #endif
7536 
7537 void
7538 sctp_over_udp_stop(void)
7539 {
7540 	/*
7541 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7542 	 * for writting!
7543 	 */
7544 #ifdef INET
7545 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7546 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7547 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7548 	}
7549 #endif
7550 #ifdef INET6
7551 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7552 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7553 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7554 	}
7555 #endif
7556 }
7557 
7558 int
7559 sctp_over_udp_start(void)
7560 {
7561 	uint16_t port;
7562 	int ret;
7563 #ifdef INET
7564 	struct sockaddr_in sin;
7565 #endif
7566 #ifdef INET6
7567 	struct sockaddr_in6 sin6;
7568 #endif
7569 	/*
7570 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7571 	 * for writting!
7572 	 */
7573 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7574 	if (ntohs(port) == 0) {
7575 		/* Must have a port set */
7576 		return (EINVAL);
7577 	}
7578 #ifdef INET
7579 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7580 		/* Already running -- must stop first */
7581 		return (EALREADY);
7582 	}
7583 #endif
7584 #ifdef INET6
7585 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7586 		/* Already running -- must stop first */
7587 		return (EALREADY);
7588 	}
7589 #endif
7590 #ifdef INET
7591 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7592 	    SOCK_DGRAM, IPPROTO_UDP,
7593 	    curthread->td_ucred, curthread))) {
7594 		sctp_over_udp_stop();
7595 		return (ret);
7596 	}
7597 	/* Call the special UDP hook. */
7598 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7599 	    sctp_recv_udp_tunneled_packet,
7600 	    sctp_recv_icmp_tunneled_packet,
7601 	    NULL))) {
7602 		sctp_over_udp_stop();
7603 		return (ret);
7604 	}
7605 	/* Ok, we have a socket, bind it to the port. */
7606 	memset(&sin, 0, sizeof(struct sockaddr_in));
7607 	sin.sin_len = sizeof(struct sockaddr_in);
7608 	sin.sin_family = AF_INET;
7609 	sin.sin_port = htons(port);
7610 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7611 	    (struct sockaddr *)&sin, curthread))) {
7612 		sctp_over_udp_stop();
7613 		return (ret);
7614 	}
7615 #endif
7616 #ifdef INET6
7617 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7618 	    SOCK_DGRAM, IPPROTO_UDP,
7619 	    curthread->td_ucred, curthread))) {
7620 		sctp_over_udp_stop();
7621 		return (ret);
7622 	}
7623 	/* Call the special UDP hook. */
7624 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7625 	    sctp_recv_udp_tunneled_packet,
7626 	    sctp_recv_icmp6_tunneled_packet,
7627 	    NULL))) {
7628 		sctp_over_udp_stop();
7629 		return (ret);
7630 	}
7631 	/* Ok, we have a socket, bind it to the port. */
7632 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7633 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7634 	sin6.sin6_family = AF_INET6;
7635 	sin6.sin6_port = htons(port);
7636 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7637 	    (struct sockaddr *)&sin6, curthread))) {
7638 		sctp_over_udp_stop();
7639 		return (ret);
7640 	}
7641 #endif
7642 	return (0);
7643 }
7644 
7645 /*
7646  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7647  * If all arguments are zero, zero is returned.
7648  */
7649 uint32_t
7650 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7651 {
7652 	if (mtu1 > 0) {
7653 		if (mtu2 > 0) {
7654 			if (mtu3 > 0) {
7655 				return (min(mtu1, min(mtu2, mtu3)));
7656 			} else {
7657 				return (min(mtu1, mtu2));
7658 			}
7659 		} else {
7660 			if (mtu3 > 0) {
7661 				return (min(mtu1, mtu3));
7662 			} else {
7663 				return (mtu1);
7664 			}
7665 		}
7666 	} else {
7667 		if (mtu2 > 0) {
7668 			if (mtu3 > 0) {
7669 				return (min(mtu2, mtu3));
7670 			} else {
7671 				return (mtu2);
7672 			}
7673 		} else {
7674 			return (mtu3);
7675 		}
7676 	}
7677 }
7678 
7679 void
7680 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7681 {
7682 	struct in_conninfo inc;
7683 
7684 	memset(&inc, 0, sizeof(struct in_conninfo));
7685 	inc.inc_fibnum = fibnum;
7686 	switch (addr->sa.sa_family) {
7687 #ifdef INET
7688 	case AF_INET:
7689 		inc.inc_faddr = addr->sin.sin_addr;
7690 		break;
7691 #endif
7692 #ifdef INET6
7693 	case AF_INET6:
7694 		inc.inc_flags |= INC_ISIPV6;
7695 		inc.inc6_faddr = addr->sin6.sin6_addr;
7696 		break;
7697 #endif
7698 	default:
7699 		return;
7700 	}
7701 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7702 }
7703 
7704 uint32_t
7705 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7706 {
7707 	struct in_conninfo inc;
7708 
7709 	memset(&inc, 0, sizeof(struct in_conninfo));
7710 	inc.inc_fibnum = fibnum;
7711 	switch (addr->sa.sa_family) {
7712 #ifdef INET
7713 	case AF_INET:
7714 		inc.inc_faddr = addr->sin.sin_addr;
7715 		break;
7716 #endif
7717 #ifdef INET6
7718 	case AF_INET6:
7719 		inc.inc_flags |= INC_ISIPV6;
7720 		inc.inc6_faddr = addr->sin6.sin6_addr;
7721 		break;
7722 #endif
7723 	default:
7724 		return (0);
7725 	}
7726 	return ((uint32_t)tcp_hc_getmtu(&inc));
7727 }
7728 
7729 void
7730 sctp_set_state(struct sctp_tcb *stcb, int new_state)
7731 {
7732 #if defined(KDTRACE_HOOKS)
7733 	int old_state = stcb->asoc.state;
7734 #endif
7735 
7736 	KASSERT((new_state & ~SCTP_STATE_MASK) == 0,
7737 	    ("sctp_set_state: Can't set substate (new_state = %x)",
7738 	    new_state));
7739 	stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state;
7740 	if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) ||
7741 	    (new_state == SCTP_STATE_SHUTDOWN_SENT) ||
7742 	    (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7743 		SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
7744 	}
7745 #if defined(KDTRACE_HOOKS)
7746 	if (((old_state & SCTP_STATE_MASK) != new_state) &&
7747 	    !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) &&
7748 	    (new_state == SCTP_STATE_INUSE))) {
7749 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7750 	}
7751 #endif
7752 }
7753 
7754 void
7755 sctp_add_substate(struct sctp_tcb *stcb, int substate)
7756 {
7757 #if defined(KDTRACE_HOOKS)
7758 	int old_state = stcb->asoc.state;
7759 #endif
7760 
7761 	KASSERT((substate & SCTP_STATE_MASK) == 0,
7762 	    ("sctp_add_substate: Can't set state (substate = %x)",
7763 	    substate));
7764 	stcb->asoc.state |= substate;
7765 #if defined(KDTRACE_HOOKS)
7766 	if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) &&
7767 	    ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) ||
7768 	    ((substate & SCTP_STATE_SHUTDOWN_PENDING) &&
7769 	    ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) {
7770 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7771 	}
7772 #endif
7773 }
7774