xref: /freebsd/sys/netinet/sctputil.c (revision f2530c80db7b29b95368fce956b3a778f096b368)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #include <netinet6/sctp6_var.h>
45 #endif
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_output.h>
48 #include <netinet/sctp_uio.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_auth.h>
52 #include <netinet/sctp_asconf.h>
53 #include <netinet/sctp_bsd_addr.h>
54 #include <netinet/sctp_kdtrace.h>
55 #if defined(INET6) || defined(INET)
56 #include <netinet/tcp_var.h>
57 #endif
58 #include <netinet/udp.h>
59 #include <netinet/udp_var.h>
60 #include <sys/proc.h>
61 #ifdef INET6
62 #include <netinet/icmp6.h>
63 #endif
64 
65 
66 #ifndef KTR_SCTP
67 #define KTR_SCTP KTR_SUBSYS
68 #endif
69 
70 extern const struct sctp_cc_functions sctp_cc_functions[];
71 extern const struct sctp_ss_functions sctp_ss_functions[];
72 
73 void
74 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
75 {
76 #if defined(SCTP_LOCAL_TRACE_BUF)
77 	struct sctp_cwnd_log sctp_clog;
78 
79 	sctp_clog.x.sb.stcb = stcb;
80 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
81 	if (stcb)
82 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
83 	else
84 		sctp_clog.x.sb.stcb_sbcc = 0;
85 	sctp_clog.x.sb.incr = incr;
86 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
87 	    SCTP_LOG_EVENT_SB,
88 	    from,
89 	    sctp_clog.x.misc.log1,
90 	    sctp_clog.x.misc.log2,
91 	    sctp_clog.x.misc.log3,
92 	    sctp_clog.x.misc.log4);
93 #endif
94 }
95 
96 void
97 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
98 {
99 #if defined(SCTP_LOCAL_TRACE_BUF)
100 	struct sctp_cwnd_log sctp_clog;
101 
102 	sctp_clog.x.close.inp = (void *)inp;
103 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
104 	if (stcb) {
105 		sctp_clog.x.close.stcb = (void *)stcb;
106 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
107 	} else {
108 		sctp_clog.x.close.stcb = 0;
109 		sctp_clog.x.close.state = 0;
110 	}
111 	sctp_clog.x.close.loc = loc;
112 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
113 	    SCTP_LOG_EVENT_CLOSE,
114 	    0,
115 	    sctp_clog.x.misc.log1,
116 	    sctp_clog.x.misc.log2,
117 	    sctp_clog.x.misc.log3,
118 	    sctp_clog.x.misc.log4);
119 #endif
120 }
121 
122 void
123 rto_logging(struct sctp_nets *net, int from)
124 {
125 #if defined(SCTP_LOCAL_TRACE_BUF)
126 	struct sctp_cwnd_log sctp_clog;
127 
128 	memset(&sctp_clog, 0, sizeof(sctp_clog));
129 	sctp_clog.x.rto.net = (void *)net;
130 	sctp_clog.x.rto.rtt = net->rtt / 1000;
131 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
132 	    SCTP_LOG_EVENT_RTT,
133 	    from,
134 	    sctp_clog.x.misc.log1,
135 	    sctp_clog.x.misc.log2,
136 	    sctp_clog.x.misc.log3,
137 	    sctp_clog.x.misc.log4);
138 #endif
139 }
140 
141 void
142 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
143 {
144 #if defined(SCTP_LOCAL_TRACE_BUF)
145 	struct sctp_cwnd_log sctp_clog;
146 
147 	sctp_clog.x.strlog.stcb = stcb;
148 	sctp_clog.x.strlog.n_tsn = tsn;
149 	sctp_clog.x.strlog.n_sseq = sseq;
150 	sctp_clog.x.strlog.e_tsn = 0;
151 	sctp_clog.x.strlog.e_sseq = 0;
152 	sctp_clog.x.strlog.strm = stream;
153 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
154 	    SCTP_LOG_EVENT_STRM,
155 	    from,
156 	    sctp_clog.x.misc.log1,
157 	    sctp_clog.x.misc.log2,
158 	    sctp_clog.x.misc.log3,
159 	    sctp_clog.x.misc.log4);
160 #endif
161 }
162 
163 void
164 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
165 {
166 #if defined(SCTP_LOCAL_TRACE_BUF)
167 	struct sctp_cwnd_log sctp_clog;
168 
169 	sctp_clog.x.nagle.stcb = (void *)stcb;
170 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
171 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
172 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
173 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
174 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
175 	    SCTP_LOG_EVENT_NAGLE,
176 	    action,
177 	    sctp_clog.x.misc.log1,
178 	    sctp_clog.x.misc.log2,
179 	    sctp_clog.x.misc.log3,
180 	    sctp_clog.x.misc.log4);
181 #endif
182 }
183 
184 void
185 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
186 {
187 #if defined(SCTP_LOCAL_TRACE_BUF)
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	sctp_clog.x.sack.cumack = cumack;
191 	sctp_clog.x.sack.oldcumack = old_cumack;
192 	sctp_clog.x.sack.tsn = tsn;
193 	sctp_clog.x.sack.numGaps = gaps;
194 	sctp_clog.x.sack.numDups = dups;
195 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
196 	    SCTP_LOG_EVENT_SACK,
197 	    from,
198 	    sctp_clog.x.misc.log1,
199 	    sctp_clog.x.misc.log2,
200 	    sctp_clog.x.misc.log3,
201 	    sctp_clog.x.misc.log4);
202 #endif
203 }
204 
205 void
206 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
207 {
208 #if defined(SCTP_LOCAL_TRACE_BUF)
209 	struct sctp_cwnd_log sctp_clog;
210 
211 	memset(&sctp_clog, 0, sizeof(sctp_clog));
212 	sctp_clog.x.map.base = map;
213 	sctp_clog.x.map.cum = cum;
214 	sctp_clog.x.map.high = high;
215 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
216 	    SCTP_LOG_EVENT_MAP,
217 	    from,
218 	    sctp_clog.x.misc.log1,
219 	    sctp_clog.x.misc.log2,
220 	    sctp_clog.x.misc.log3,
221 	    sctp_clog.x.misc.log4);
222 #endif
223 }
224 
225 void
226 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
227 {
228 #if defined(SCTP_LOCAL_TRACE_BUF)
229 	struct sctp_cwnd_log sctp_clog;
230 
231 	memset(&sctp_clog, 0, sizeof(sctp_clog));
232 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
233 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
234 	sctp_clog.x.fr.tsn = tsn;
235 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
236 	    SCTP_LOG_EVENT_FR,
237 	    from,
238 	    sctp_clog.x.misc.log1,
239 	    sctp_clog.x.misc.log2,
240 	    sctp_clog.x.misc.log3,
241 	    sctp_clog.x.misc.log4);
242 #endif
243 }
244 
245 #ifdef SCTP_MBUF_LOGGING
246 void
247 sctp_log_mb(struct mbuf *m, int from)
248 {
249 #if defined(SCTP_LOCAL_TRACE_BUF)
250 	struct sctp_cwnd_log sctp_clog;
251 
252 	sctp_clog.x.mb.mp = m;
253 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
254 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
255 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
256 	if (SCTP_BUF_IS_EXTENDED(m)) {
257 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
258 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
259 	} else {
260 		sctp_clog.x.mb.ext = 0;
261 		sctp_clog.x.mb.refcnt = 0;
262 	}
263 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
264 	    SCTP_LOG_EVENT_MBUF,
265 	    from,
266 	    sctp_clog.x.misc.log1,
267 	    sctp_clog.x.misc.log2,
268 	    sctp_clog.x.misc.log3,
269 	    sctp_clog.x.misc.log4);
270 #endif
271 }
272 
273 void
274 sctp_log_mbc(struct mbuf *m, int from)
275 {
276 	struct mbuf *mat;
277 
278 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
279 		sctp_log_mb(mat, from);
280 	}
281 }
282 #endif
283 
284 void
285 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
286 {
287 #if defined(SCTP_LOCAL_TRACE_BUF)
288 	struct sctp_cwnd_log sctp_clog;
289 
290 	if (control == NULL) {
291 		SCTP_PRINTF("Gak log of NULL?\n");
292 		return;
293 	}
294 	sctp_clog.x.strlog.stcb = control->stcb;
295 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
296 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
297 	sctp_clog.x.strlog.strm = control->sinfo_stream;
298 	if (poschk != NULL) {
299 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
300 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
301 	} else {
302 		sctp_clog.x.strlog.e_tsn = 0;
303 		sctp_clog.x.strlog.e_sseq = 0;
304 	}
305 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
306 	    SCTP_LOG_EVENT_STRM,
307 	    from,
308 	    sctp_clog.x.misc.log1,
309 	    sctp_clog.x.misc.log2,
310 	    sctp_clog.x.misc.log3,
311 	    sctp_clog.x.misc.log4);
312 #endif
313 }
314 
315 void
316 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
317 {
318 #if defined(SCTP_LOCAL_TRACE_BUF)
319 	struct sctp_cwnd_log sctp_clog;
320 
321 	sctp_clog.x.cwnd.net = net;
322 	if (stcb->asoc.send_queue_cnt > 255)
323 		sctp_clog.x.cwnd.cnt_in_send = 255;
324 	else
325 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
326 	if (stcb->asoc.stream_queue_cnt > 255)
327 		sctp_clog.x.cwnd.cnt_in_str = 255;
328 	else
329 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
330 
331 	if (net) {
332 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
333 		sctp_clog.x.cwnd.inflight = net->flight_size;
334 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
335 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
336 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
337 	}
338 	if (SCTP_CWNDLOG_PRESEND == from) {
339 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
340 	}
341 	sctp_clog.x.cwnd.cwnd_augment = augment;
342 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
343 	    SCTP_LOG_EVENT_CWND,
344 	    from,
345 	    sctp_clog.x.misc.log1,
346 	    sctp_clog.x.misc.log2,
347 	    sctp_clog.x.misc.log3,
348 	    sctp_clog.x.misc.log4);
349 #endif
350 }
351 
352 void
353 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
354 {
355 #if defined(SCTP_LOCAL_TRACE_BUF)
356 	struct sctp_cwnd_log sctp_clog;
357 
358 	memset(&sctp_clog, 0, sizeof(sctp_clog));
359 	if (inp) {
360 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
361 
362 	} else {
363 		sctp_clog.x.lock.sock = (void *)NULL;
364 	}
365 	sctp_clog.x.lock.inp = (void *)inp;
366 	if (stcb) {
367 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
368 	} else {
369 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
370 	}
371 	if (inp) {
372 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
373 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
374 	} else {
375 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
376 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
377 	}
378 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
379 	if (inp && (inp->sctp_socket)) {
380 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
381 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
382 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
383 	} else {
384 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
385 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
386 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
387 	}
388 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
389 	    SCTP_LOG_LOCK_EVENT,
390 	    from,
391 	    sctp_clog.x.misc.log1,
392 	    sctp_clog.x.misc.log2,
393 	    sctp_clog.x.misc.log3,
394 	    sctp_clog.x.misc.log4);
395 #endif
396 }
397 
398 void
399 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
400 {
401 #if defined(SCTP_LOCAL_TRACE_BUF)
402 	struct sctp_cwnd_log sctp_clog;
403 
404 	memset(&sctp_clog, 0, sizeof(sctp_clog));
405 	sctp_clog.x.cwnd.net = net;
406 	sctp_clog.x.cwnd.cwnd_new_value = error;
407 	sctp_clog.x.cwnd.inflight = net->flight_size;
408 	sctp_clog.x.cwnd.cwnd_augment = burst;
409 	if (stcb->asoc.send_queue_cnt > 255)
410 		sctp_clog.x.cwnd.cnt_in_send = 255;
411 	else
412 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
413 	if (stcb->asoc.stream_queue_cnt > 255)
414 		sctp_clog.x.cwnd.cnt_in_str = 255;
415 	else
416 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
417 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
418 	    SCTP_LOG_EVENT_MAXBURST,
419 	    from,
420 	    sctp_clog.x.misc.log1,
421 	    sctp_clog.x.misc.log2,
422 	    sctp_clog.x.misc.log3,
423 	    sctp_clog.x.misc.log4);
424 #endif
425 }
426 
427 void
428 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
429 {
430 #if defined(SCTP_LOCAL_TRACE_BUF)
431 	struct sctp_cwnd_log sctp_clog;
432 
433 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
434 	sctp_clog.x.rwnd.send_size = snd_size;
435 	sctp_clog.x.rwnd.overhead = overhead;
436 	sctp_clog.x.rwnd.new_rwnd = 0;
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_EVENT_RWND,
439 	    from,
440 	    sctp_clog.x.misc.log1,
441 	    sctp_clog.x.misc.log2,
442 	    sctp_clog.x.misc.log3,
443 	    sctp_clog.x.misc.log4);
444 #endif
445 }
446 
447 void
448 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
449 {
450 #if defined(SCTP_LOCAL_TRACE_BUF)
451 	struct sctp_cwnd_log sctp_clog;
452 
453 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
454 	sctp_clog.x.rwnd.send_size = flight_size;
455 	sctp_clog.x.rwnd.overhead = overhead;
456 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
457 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
458 	    SCTP_LOG_EVENT_RWND,
459 	    from,
460 	    sctp_clog.x.misc.log1,
461 	    sctp_clog.x.misc.log2,
462 	    sctp_clog.x.misc.log3,
463 	    sctp_clog.x.misc.log4);
464 #endif
465 }
466 
467 #ifdef SCTP_MBCNT_LOGGING
468 static void
469 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
470 {
471 #if defined(SCTP_LOCAL_TRACE_BUF)
472 	struct sctp_cwnd_log sctp_clog;
473 
474 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
475 	sctp_clog.x.mbcnt.size_change = book;
476 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
477 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
478 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
479 	    SCTP_LOG_EVENT_MBCNT,
480 	    from,
481 	    sctp_clog.x.misc.log1,
482 	    sctp_clog.x.misc.log2,
483 	    sctp_clog.x.misc.log3,
484 	    sctp_clog.x.misc.log4);
485 #endif
486 }
487 #endif
488 
489 void
490 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
491 {
492 #if defined(SCTP_LOCAL_TRACE_BUF)
493 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
494 	    SCTP_LOG_MISC_EVENT,
495 	    from,
496 	    a, b, c, d);
497 #endif
498 }
499 
500 void
501 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
502 {
503 #if defined(SCTP_LOCAL_TRACE_BUF)
504 	struct sctp_cwnd_log sctp_clog;
505 
506 	sctp_clog.x.wake.stcb = (void *)stcb;
507 	sctp_clog.x.wake.wake_cnt = wake_cnt;
508 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
509 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
510 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
511 
512 	if (stcb->asoc.stream_queue_cnt < 0xff)
513 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
514 	else
515 		sctp_clog.x.wake.stream_qcnt = 0xff;
516 
517 	if (stcb->asoc.chunks_on_out_queue < 0xff)
518 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
519 	else
520 		sctp_clog.x.wake.chunks_on_oque = 0xff;
521 
522 	sctp_clog.x.wake.sctpflags = 0;
523 	/* set in the defered mode stuff */
524 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
525 		sctp_clog.x.wake.sctpflags |= 1;
526 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
527 		sctp_clog.x.wake.sctpflags |= 2;
528 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
529 		sctp_clog.x.wake.sctpflags |= 4;
530 	/* what about the sb */
531 	if (stcb->sctp_socket) {
532 		struct socket *so = stcb->sctp_socket;
533 
534 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
535 	} else {
536 		sctp_clog.x.wake.sbflags = 0xff;
537 	}
538 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
539 	    SCTP_LOG_EVENT_WAKE,
540 	    from,
541 	    sctp_clog.x.misc.log1,
542 	    sctp_clog.x.misc.log2,
543 	    sctp_clog.x.misc.log3,
544 	    sctp_clog.x.misc.log4);
545 #endif
546 }
547 
548 void
549 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen)
550 {
551 #if defined(SCTP_LOCAL_TRACE_BUF)
552 	struct sctp_cwnd_log sctp_clog;
553 
554 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
555 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
556 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
557 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
558 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
559 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
560 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
561 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
562 	    SCTP_LOG_EVENT_BLOCK,
563 	    from,
564 	    sctp_clog.x.misc.log1,
565 	    sctp_clog.x.misc.log2,
566 	    sctp_clog.x.misc.log3,
567 	    sctp_clog.x.misc.log4);
568 #endif
569 }
570 
571 int
572 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
573 {
574 	/* May need to fix this if ktrdump does not work */
575 	return (0);
576 }
577 
578 #ifdef SCTP_AUDITING_ENABLED
579 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
580 static int sctp_audit_indx = 0;
581 
582 static
583 void
584 sctp_print_audit_report(void)
585 {
586 	int i;
587 	int cnt;
588 
589 	cnt = 0;
590 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
591 		if ((sctp_audit_data[i][0] == 0xe0) &&
592 		    (sctp_audit_data[i][1] == 0x01)) {
593 			cnt = 0;
594 			SCTP_PRINTF("\n");
595 		} else if (sctp_audit_data[i][0] == 0xf0) {
596 			cnt = 0;
597 			SCTP_PRINTF("\n");
598 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
599 		    (sctp_audit_data[i][1] == 0x01)) {
600 			SCTP_PRINTF("\n");
601 			cnt = 0;
602 		}
603 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
604 		    (uint32_t)sctp_audit_data[i][1]);
605 		cnt++;
606 		if ((cnt % 14) == 0)
607 			SCTP_PRINTF("\n");
608 	}
609 	for (i = 0; i < sctp_audit_indx; i++) {
610 		if ((sctp_audit_data[i][0] == 0xe0) &&
611 		    (sctp_audit_data[i][1] == 0x01)) {
612 			cnt = 0;
613 			SCTP_PRINTF("\n");
614 		} else if (sctp_audit_data[i][0] == 0xf0) {
615 			cnt = 0;
616 			SCTP_PRINTF("\n");
617 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
618 		    (sctp_audit_data[i][1] == 0x01)) {
619 			SCTP_PRINTF("\n");
620 			cnt = 0;
621 		}
622 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
623 		    (uint32_t)sctp_audit_data[i][1]);
624 		cnt++;
625 		if ((cnt % 14) == 0)
626 			SCTP_PRINTF("\n");
627 	}
628 	SCTP_PRINTF("\n");
629 }
630 
631 void
632 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
633     struct sctp_nets *net)
634 {
635 	int resend_cnt, tot_out, rep, tot_book_cnt;
636 	struct sctp_nets *lnet;
637 	struct sctp_tmit_chunk *chk;
638 
639 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
640 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
641 	sctp_audit_indx++;
642 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
643 		sctp_audit_indx = 0;
644 	}
645 	if (inp == NULL) {
646 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
647 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
648 		sctp_audit_indx++;
649 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
650 			sctp_audit_indx = 0;
651 		}
652 		return;
653 	}
654 	if (stcb == NULL) {
655 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
656 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
657 		sctp_audit_indx++;
658 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
659 			sctp_audit_indx = 0;
660 		}
661 		return;
662 	}
663 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
664 	sctp_audit_data[sctp_audit_indx][1] =
665 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
666 	sctp_audit_indx++;
667 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
668 		sctp_audit_indx = 0;
669 	}
670 	rep = 0;
671 	tot_book_cnt = 0;
672 	resend_cnt = tot_out = 0;
673 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
674 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
675 			resend_cnt++;
676 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
677 			tot_out += chk->book_size;
678 			tot_book_cnt++;
679 		}
680 	}
681 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
682 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
683 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
684 		sctp_audit_indx++;
685 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
686 			sctp_audit_indx = 0;
687 		}
688 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
689 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
690 		rep = 1;
691 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
692 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
693 		sctp_audit_data[sctp_audit_indx][1] =
694 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
695 		sctp_audit_indx++;
696 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
697 			sctp_audit_indx = 0;
698 		}
699 	}
700 	if (tot_out != stcb->asoc.total_flight) {
701 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
702 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
703 		sctp_audit_indx++;
704 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
705 			sctp_audit_indx = 0;
706 		}
707 		rep = 1;
708 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
709 		    (int)stcb->asoc.total_flight);
710 		stcb->asoc.total_flight = tot_out;
711 	}
712 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
713 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
714 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
715 		sctp_audit_indx++;
716 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
717 			sctp_audit_indx = 0;
718 		}
719 		rep = 1;
720 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
721 
722 		stcb->asoc.total_flight_count = tot_book_cnt;
723 	}
724 	tot_out = 0;
725 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
726 		tot_out += lnet->flight_size;
727 	}
728 	if (tot_out != stcb->asoc.total_flight) {
729 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
730 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
731 		sctp_audit_indx++;
732 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
733 			sctp_audit_indx = 0;
734 		}
735 		rep = 1;
736 		SCTP_PRINTF("real flight:%d net total was %d\n",
737 		    stcb->asoc.total_flight, tot_out);
738 		/* now corrective action */
739 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
740 
741 			tot_out = 0;
742 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
743 				if ((chk->whoTo == lnet) &&
744 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
745 					tot_out += chk->book_size;
746 				}
747 			}
748 			if (lnet->flight_size != tot_out) {
749 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
750 				    (void *)lnet, lnet->flight_size,
751 				    tot_out);
752 				lnet->flight_size = tot_out;
753 			}
754 		}
755 	}
756 	if (rep) {
757 		sctp_print_audit_report();
758 	}
759 }
760 
761 void
762 sctp_audit_log(uint8_t ev, uint8_t fd)
763 {
764 
765 	sctp_audit_data[sctp_audit_indx][0] = ev;
766 	sctp_audit_data[sctp_audit_indx][1] = fd;
767 	sctp_audit_indx++;
768 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
769 		sctp_audit_indx = 0;
770 	}
771 }
772 
773 #endif
774 
775 /*
776  * sctp_stop_timers_for_shutdown() should be called
777  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
778  * state to make sure that all timers are stopped.
779  */
780 void
781 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
782 {
783 	struct sctp_association *asoc;
784 	struct sctp_nets *net;
785 
786 	asoc = &stcb->asoc;
787 
788 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
789 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
790 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
791 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
792 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
793 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
794 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
795 	}
796 }
797 
798 /*
799  * A list of sizes based on typical mtu's, used only if next hop size not
800  * returned. These values MUST be multiples of 4 and MUST be ordered.
801  */
802 static uint32_t sctp_mtu_sizes[] = {
803 	68,
804 	296,
805 	508,
806 	512,
807 	544,
808 	576,
809 	1004,
810 	1492,
811 	1500,
812 	1536,
813 	2000,
814 	2048,
815 	4352,
816 	4464,
817 	8166,
818 	17912,
819 	32000,
820 	65532
821 };
822 
823 /*
824  * Return the largest MTU in sctp_mtu_sizes smaller than val.
825  * If val is smaller than the minimum, just return the largest
826  * multiple of 4 smaller or equal to val.
827  * Ensure that the result is a multiple of 4.
828  */
829 uint32_t
830 sctp_get_prev_mtu(uint32_t val)
831 {
832 	uint32_t i;
833 
834 	val &= 0xfffffffc;
835 	if (val <= sctp_mtu_sizes[0]) {
836 		return (val);
837 	}
838 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
839 		if (val <= sctp_mtu_sizes[i]) {
840 			break;
841 		}
842 	}
843 	KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0,
844 	    ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1));
845 	return (sctp_mtu_sizes[i - 1]);
846 }
847 
848 /*
849  * Return the smallest MTU in sctp_mtu_sizes larger than val.
850  * If val is larger than the maximum, just return the largest multiple of 4 smaller
851  * or equal to val.
852  * Ensure that the result is a multiple of 4.
853  */
854 uint32_t
855 sctp_get_next_mtu(uint32_t val)
856 {
857 	/* select another MTU that is just bigger than this one */
858 	uint32_t i;
859 
860 	val &= 0xfffffffc;
861 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
862 		if (val < sctp_mtu_sizes[i]) {
863 			KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0,
864 			    ("sctp_mtu_sizes[%u] not a multiple of 4", i));
865 			return (sctp_mtu_sizes[i]);
866 		}
867 	}
868 	return (val);
869 }
870 
871 void
872 sctp_fill_random_store(struct sctp_pcb *m)
873 {
874 	/*
875 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
876 	 * our counter. The result becomes our good random numbers and we
877 	 * then setup to give these out. Note that we do no locking to
878 	 * protect this. This is ok, since if competing folks call this we
879 	 * will get more gobbled gook in the random store which is what we
880 	 * want. There is a danger that two guys will use the same random
881 	 * numbers, but thats ok too since that is random as well :->
882 	 */
883 	m->store_at = 0;
884 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
885 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
886 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
887 	m->random_counter++;
888 }
889 
890 uint32_t
891 sctp_select_initial_TSN(struct sctp_pcb *inp)
892 {
893 	/*
894 	 * A true implementation should use random selection process to get
895 	 * the initial stream sequence number, using RFC1750 as a good
896 	 * guideline
897 	 */
898 	uint32_t x, *xp;
899 	uint8_t *p;
900 	int store_at, new_store;
901 
902 	if (inp->initial_sequence_debug != 0) {
903 		uint32_t ret;
904 
905 		ret = inp->initial_sequence_debug;
906 		inp->initial_sequence_debug++;
907 		return (ret);
908 	}
909 retry:
910 	store_at = inp->store_at;
911 	new_store = store_at + sizeof(uint32_t);
912 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
913 		new_store = 0;
914 	}
915 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
916 		goto retry;
917 	}
918 	if (new_store == 0) {
919 		/* Refill the random store */
920 		sctp_fill_random_store(inp);
921 	}
922 	p = &inp->random_store[store_at];
923 	xp = (uint32_t *)p;
924 	x = *xp;
925 	return (x);
926 }
927 
928 uint32_t
929 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
930 {
931 	uint32_t x;
932 	struct timeval now;
933 
934 	if (check) {
935 		(void)SCTP_GETTIME_TIMEVAL(&now);
936 	}
937 	for (;;) {
938 		x = sctp_select_initial_TSN(&inp->sctp_ep);
939 		if (x == 0) {
940 			/* we never use 0 */
941 			continue;
942 		}
943 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
944 			break;
945 		}
946 	}
947 	return (x);
948 }
949 
950 int32_t
951 sctp_map_assoc_state(int kernel_state)
952 {
953 	int32_t user_state;
954 
955 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
956 		user_state = SCTP_CLOSED;
957 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
958 		user_state = SCTP_SHUTDOWN_PENDING;
959 	} else {
960 		switch (kernel_state & SCTP_STATE_MASK) {
961 		case SCTP_STATE_EMPTY:
962 			user_state = SCTP_CLOSED;
963 			break;
964 		case SCTP_STATE_INUSE:
965 			user_state = SCTP_CLOSED;
966 			break;
967 		case SCTP_STATE_COOKIE_WAIT:
968 			user_state = SCTP_COOKIE_WAIT;
969 			break;
970 		case SCTP_STATE_COOKIE_ECHOED:
971 			user_state = SCTP_COOKIE_ECHOED;
972 			break;
973 		case SCTP_STATE_OPEN:
974 			user_state = SCTP_ESTABLISHED;
975 			break;
976 		case SCTP_STATE_SHUTDOWN_SENT:
977 			user_state = SCTP_SHUTDOWN_SENT;
978 			break;
979 		case SCTP_STATE_SHUTDOWN_RECEIVED:
980 			user_state = SCTP_SHUTDOWN_RECEIVED;
981 			break;
982 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
983 			user_state = SCTP_SHUTDOWN_ACK_SENT;
984 			break;
985 		default:
986 			user_state = SCTP_CLOSED;
987 			break;
988 		}
989 	}
990 	return (user_state);
991 }
992 
993 int
994 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
995     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
996 {
997 	struct sctp_association *asoc;
998 
999 	/*
1000 	 * Anything set to zero is taken care of by the allocation routine's
1001 	 * bzero
1002 	 */
1003 
1004 	/*
1005 	 * Up front select what scoping to apply on addresses I tell my peer
1006 	 * Not sure what to do with these right now, we will need to come up
1007 	 * with a way to set them. We may need to pass them through from the
1008 	 * caller in the sctp_aloc_assoc() function.
1009 	 */
1010 	int i;
1011 #if defined(SCTP_DETAILED_STR_STATS)
1012 	int j;
1013 #endif
1014 
1015 	asoc = &stcb->asoc;
1016 	/* init all variables to a known value. */
1017 	SCTP_SET_STATE(stcb, SCTP_STATE_INUSE);
1018 	asoc->max_burst = inp->sctp_ep.max_burst;
1019 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
1020 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
1021 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
1022 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
1023 	asoc->ecn_supported = inp->ecn_supported;
1024 	asoc->prsctp_supported = inp->prsctp_supported;
1025 	asoc->idata_supported = inp->idata_supported;
1026 	asoc->auth_supported = inp->auth_supported;
1027 	asoc->asconf_supported = inp->asconf_supported;
1028 	asoc->reconfig_supported = inp->reconfig_supported;
1029 	asoc->nrsack_supported = inp->nrsack_supported;
1030 	asoc->pktdrop_supported = inp->pktdrop_supported;
1031 	asoc->idata_supported = inp->idata_supported;
1032 	asoc->sctp_cmt_pf = (uint8_t)0;
1033 	asoc->sctp_frag_point = inp->sctp_frag_point;
1034 	asoc->sctp_features = inp->sctp_features;
1035 	asoc->default_dscp = inp->sctp_ep.default_dscp;
1036 	asoc->max_cwnd = inp->max_cwnd;
1037 #ifdef INET6
1038 	if (inp->sctp_ep.default_flowlabel) {
1039 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
1040 	} else {
1041 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
1042 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
1043 			asoc->default_flowlabel &= 0x000fffff;
1044 			asoc->default_flowlabel |= 0x80000000;
1045 		} else {
1046 			asoc->default_flowlabel = 0;
1047 		}
1048 	}
1049 #endif
1050 	asoc->sb_send_resv = 0;
1051 	if (override_tag) {
1052 		asoc->my_vtag = override_tag;
1053 	} else {
1054 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1055 	}
1056 	/* Get the nonce tags */
1057 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1058 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1059 	asoc->vrf_id = vrf_id;
1060 
1061 #ifdef SCTP_ASOCLOG_OF_TSNS
1062 	asoc->tsn_in_at = 0;
1063 	asoc->tsn_out_at = 0;
1064 	asoc->tsn_in_wrapped = 0;
1065 	asoc->tsn_out_wrapped = 0;
1066 	asoc->cumack_log_at = 0;
1067 	asoc->cumack_log_atsnt = 0;
1068 #endif
1069 #ifdef SCTP_FS_SPEC_LOG
1070 	asoc->fs_index = 0;
1071 #endif
1072 	asoc->refcnt = 0;
1073 	asoc->assoc_up_sent = 0;
1074 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1075 	    sctp_select_initial_TSN(&inp->sctp_ep);
1076 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1077 	/* we are optimisitic here */
1078 	asoc->peer_supports_nat = 0;
1079 	asoc->sent_queue_retran_cnt = 0;
1080 
1081 	/* for CMT */
1082 	asoc->last_net_cmt_send_started = NULL;
1083 
1084 	/* This will need to be adjusted */
1085 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1086 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1087 	asoc->asconf_seq_in = asoc->last_acked_seq;
1088 
1089 	/* here we are different, we hold the next one we expect */
1090 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1091 
1092 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1093 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1094 
1095 	asoc->default_mtu = inp->sctp_ep.default_mtu;
1096 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1097 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1098 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1099 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1100 	asoc->free_chunk_cnt = 0;
1101 
1102 	asoc->iam_blocking = 0;
1103 	asoc->context = inp->sctp_context;
1104 	asoc->local_strreset_support = inp->local_strreset_support;
1105 	asoc->def_send = inp->def_send;
1106 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1107 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1108 	asoc->pr_sctp_cnt = 0;
1109 	asoc->total_output_queue_size = 0;
1110 
1111 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1112 		asoc->scope.ipv6_addr_legal = 1;
1113 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1114 			asoc->scope.ipv4_addr_legal = 1;
1115 		} else {
1116 			asoc->scope.ipv4_addr_legal = 0;
1117 		}
1118 	} else {
1119 		asoc->scope.ipv6_addr_legal = 0;
1120 		asoc->scope.ipv4_addr_legal = 1;
1121 	}
1122 
1123 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1124 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1125 
1126 	asoc->smallest_mtu = inp->sctp_frag_point;
1127 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1128 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1129 
1130 	asoc->stream_locked_on = 0;
1131 	asoc->ecn_echo_cnt_onq = 0;
1132 	asoc->stream_locked = 0;
1133 
1134 	asoc->send_sack = 1;
1135 
1136 	LIST_INIT(&asoc->sctp_restricted_addrs);
1137 
1138 	TAILQ_INIT(&asoc->nets);
1139 	TAILQ_INIT(&asoc->pending_reply_queue);
1140 	TAILQ_INIT(&asoc->asconf_ack_sent);
1141 	/* Setup to fill the hb random cache at first HB */
1142 	asoc->hb_random_idx = 4;
1143 
1144 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1145 
1146 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1147 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1148 
1149 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1150 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1151 
1152 	/*
1153 	 * Now the stream parameters, here we allocate space for all streams
1154 	 * that we request by default.
1155 	 */
1156 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1157 	    o_strms;
1158 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1159 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1160 	    SCTP_M_STRMO);
1161 	if (asoc->strmout == NULL) {
1162 		/* big trouble no memory */
1163 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1164 		return (ENOMEM);
1165 	}
1166 	for (i = 0; i < asoc->streamoutcnt; i++) {
1167 		/*
1168 		 * inbound side must be set to 0xffff, also NOTE when we get
1169 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1170 		 * count (streamoutcnt) but first check if we sent to any of
1171 		 * the upper streams that were dropped (if some were). Those
1172 		 * that were dropped must be notified to the upper layer as
1173 		 * failed to send.
1174 		 */
1175 		asoc->strmout[i].next_mid_ordered = 0;
1176 		asoc->strmout[i].next_mid_unordered = 0;
1177 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1178 		asoc->strmout[i].chunks_on_queues = 0;
1179 #if defined(SCTP_DETAILED_STR_STATS)
1180 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1181 			asoc->strmout[i].abandoned_sent[j] = 0;
1182 			asoc->strmout[i].abandoned_unsent[j] = 0;
1183 		}
1184 #else
1185 		asoc->strmout[i].abandoned_sent[0] = 0;
1186 		asoc->strmout[i].abandoned_unsent[0] = 0;
1187 #endif
1188 		asoc->strmout[i].sid = i;
1189 		asoc->strmout[i].last_msg_incomplete = 0;
1190 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1191 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1192 	}
1193 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1194 
1195 	/* Now the mapping array */
1196 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1197 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1198 	    SCTP_M_MAP);
1199 	if (asoc->mapping_array == NULL) {
1200 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1201 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1202 		return (ENOMEM);
1203 	}
1204 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1205 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1206 	    SCTP_M_MAP);
1207 	if (asoc->nr_mapping_array == NULL) {
1208 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1209 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1210 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1211 		return (ENOMEM);
1212 	}
1213 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1214 
1215 	/* Now the init of the other outqueues */
1216 	TAILQ_INIT(&asoc->free_chunks);
1217 	TAILQ_INIT(&asoc->control_send_queue);
1218 	TAILQ_INIT(&asoc->asconf_send_queue);
1219 	TAILQ_INIT(&asoc->send_queue);
1220 	TAILQ_INIT(&asoc->sent_queue);
1221 	TAILQ_INIT(&asoc->resetHead);
1222 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1223 	TAILQ_INIT(&asoc->asconf_queue);
1224 	/* authentication fields */
1225 	asoc->authinfo.random = NULL;
1226 	asoc->authinfo.active_keyid = 0;
1227 	asoc->authinfo.assoc_key = NULL;
1228 	asoc->authinfo.assoc_keyid = 0;
1229 	asoc->authinfo.recv_key = NULL;
1230 	asoc->authinfo.recv_keyid = 0;
1231 	LIST_INIT(&asoc->shared_keys);
1232 	asoc->marked_retrans = 0;
1233 	asoc->port = inp->sctp_ep.port;
1234 	asoc->timoinit = 0;
1235 	asoc->timodata = 0;
1236 	asoc->timosack = 0;
1237 	asoc->timoshutdown = 0;
1238 	asoc->timoheartbeat = 0;
1239 	asoc->timocookie = 0;
1240 	asoc->timoshutdownack = 0;
1241 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1242 	asoc->discontinuity_time = asoc->start_time;
1243 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1244 		asoc->abandoned_unsent[i] = 0;
1245 		asoc->abandoned_sent[i] = 0;
1246 	}
1247 	/*
1248 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1249 	 * freed later when the association is freed.
1250 	 */
1251 	return (0);
1252 }
1253 
1254 void
1255 sctp_print_mapping_array(struct sctp_association *asoc)
1256 {
1257 	unsigned int i, limit;
1258 
1259 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1260 	    asoc->mapping_array_size,
1261 	    asoc->mapping_array_base_tsn,
1262 	    asoc->cumulative_tsn,
1263 	    asoc->highest_tsn_inside_map,
1264 	    asoc->highest_tsn_inside_nr_map);
1265 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1266 		if (asoc->mapping_array[limit - 1] != 0) {
1267 			break;
1268 		}
1269 	}
1270 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1271 	for (i = 0; i < limit; i++) {
1272 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1273 	}
1274 	if (limit % 16)
1275 		SCTP_PRINTF("\n");
1276 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1277 		if (asoc->nr_mapping_array[limit - 1]) {
1278 			break;
1279 		}
1280 	}
1281 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1282 	for (i = 0; i < limit; i++) {
1283 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1284 	}
1285 	if (limit % 16)
1286 		SCTP_PRINTF("\n");
1287 }
1288 
1289 int
1290 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1291 {
1292 	/* mapping array needs to grow */
1293 	uint8_t *new_array1, *new_array2;
1294 	uint32_t new_size;
1295 
1296 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1297 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1298 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1299 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1300 		/* can't get more, forget it */
1301 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1302 		if (new_array1) {
1303 			SCTP_FREE(new_array1, SCTP_M_MAP);
1304 		}
1305 		if (new_array2) {
1306 			SCTP_FREE(new_array2, SCTP_M_MAP);
1307 		}
1308 		return (-1);
1309 	}
1310 	memset(new_array1, 0, new_size);
1311 	memset(new_array2, 0, new_size);
1312 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1313 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1314 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1315 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1316 	asoc->mapping_array = new_array1;
1317 	asoc->nr_mapping_array = new_array2;
1318 	asoc->mapping_array_size = new_size;
1319 	return (0);
1320 }
1321 
1322 
1323 static void
1324 sctp_iterator_work(struct sctp_iterator *it)
1325 {
1326 	int iteration_count = 0;
1327 	int inp_skip = 0;
1328 	int first_in = 1;
1329 	struct sctp_inpcb *tinp;
1330 
1331 	SCTP_INP_INFO_RLOCK();
1332 	SCTP_ITERATOR_LOCK();
1333 	sctp_it_ctl.cur_it = it;
1334 	if (it->inp) {
1335 		SCTP_INP_RLOCK(it->inp);
1336 		SCTP_INP_DECR_REF(it->inp);
1337 	}
1338 	if (it->inp == NULL) {
1339 		/* iterator is complete */
1340 done_with_iterator:
1341 		sctp_it_ctl.cur_it = NULL;
1342 		SCTP_ITERATOR_UNLOCK();
1343 		SCTP_INP_INFO_RUNLOCK();
1344 		if (it->function_atend != NULL) {
1345 			(*it->function_atend) (it->pointer, it->val);
1346 		}
1347 		SCTP_FREE(it, SCTP_M_ITER);
1348 		return;
1349 	}
1350 select_a_new_ep:
1351 	if (first_in) {
1352 		first_in = 0;
1353 	} else {
1354 		SCTP_INP_RLOCK(it->inp);
1355 	}
1356 	while (((it->pcb_flags) &&
1357 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1358 	    ((it->pcb_features) &&
1359 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1360 		/* endpoint flags or features don't match, so keep looking */
1361 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1362 			SCTP_INP_RUNLOCK(it->inp);
1363 			goto done_with_iterator;
1364 		}
1365 		tinp = it->inp;
1366 		it->inp = LIST_NEXT(it->inp, sctp_list);
1367 		SCTP_INP_RUNLOCK(tinp);
1368 		if (it->inp == NULL) {
1369 			goto done_with_iterator;
1370 		}
1371 		SCTP_INP_RLOCK(it->inp);
1372 	}
1373 	/* now go through each assoc which is in the desired state */
1374 	if (it->done_current_ep == 0) {
1375 		if (it->function_inp != NULL)
1376 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1377 		it->done_current_ep = 1;
1378 	}
1379 	if (it->stcb == NULL) {
1380 		/* run the per instance function */
1381 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1382 	}
1383 	if ((inp_skip) || it->stcb == NULL) {
1384 		if (it->function_inp_end != NULL) {
1385 			inp_skip = (*it->function_inp_end) (it->inp,
1386 			    it->pointer,
1387 			    it->val);
1388 		}
1389 		SCTP_INP_RUNLOCK(it->inp);
1390 		goto no_stcb;
1391 	}
1392 	while (it->stcb) {
1393 		SCTP_TCB_LOCK(it->stcb);
1394 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1395 			/* not in the right state... keep looking */
1396 			SCTP_TCB_UNLOCK(it->stcb);
1397 			goto next_assoc;
1398 		}
1399 		/* see if we have limited out the iterator loop */
1400 		iteration_count++;
1401 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1402 			/* Pause to let others grab the lock */
1403 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1404 			SCTP_TCB_UNLOCK(it->stcb);
1405 			SCTP_INP_INCR_REF(it->inp);
1406 			SCTP_INP_RUNLOCK(it->inp);
1407 			SCTP_ITERATOR_UNLOCK();
1408 			SCTP_INP_INFO_RUNLOCK();
1409 			SCTP_INP_INFO_RLOCK();
1410 			SCTP_ITERATOR_LOCK();
1411 			if (sctp_it_ctl.iterator_flags) {
1412 				/* We won't be staying here */
1413 				SCTP_INP_DECR_REF(it->inp);
1414 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1415 				if (sctp_it_ctl.iterator_flags &
1416 				    SCTP_ITERATOR_STOP_CUR_IT) {
1417 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1418 					goto done_with_iterator;
1419 				}
1420 				if (sctp_it_ctl.iterator_flags &
1421 				    SCTP_ITERATOR_STOP_CUR_INP) {
1422 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1423 					goto no_stcb;
1424 				}
1425 				/* If we reach here huh? */
1426 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1427 				    sctp_it_ctl.iterator_flags);
1428 				sctp_it_ctl.iterator_flags = 0;
1429 			}
1430 			SCTP_INP_RLOCK(it->inp);
1431 			SCTP_INP_DECR_REF(it->inp);
1432 			SCTP_TCB_LOCK(it->stcb);
1433 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1434 			iteration_count = 0;
1435 		}
1436 
1437 		/* run function on this one */
1438 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1439 
1440 		/*
1441 		 * we lie here, it really needs to have its own type but
1442 		 * first I must verify that this won't effect things :-0
1443 		 */
1444 		if (it->no_chunk_output == 0)
1445 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1446 
1447 		SCTP_TCB_UNLOCK(it->stcb);
1448 next_assoc:
1449 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1450 		if (it->stcb == NULL) {
1451 			/* Run last function */
1452 			if (it->function_inp_end != NULL) {
1453 				inp_skip = (*it->function_inp_end) (it->inp,
1454 				    it->pointer,
1455 				    it->val);
1456 			}
1457 		}
1458 	}
1459 	SCTP_INP_RUNLOCK(it->inp);
1460 no_stcb:
1461 	/* done with all assocs on this endpoint, move on to next endpoint */
1462 	it->done_current_ep = 0;
1463 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1464 		it->inp = NULL;
1465 	} else {
1466 		it->inp = LIST_NEXT(it->inp, sctp_list);
1467 	}
1468 	if (it->inp == NULL) {
1469 		goto done_with_iterator;
1470 	}
1471 	goto select_a_new_ep;
1472 }
1473 
1474 void
1475 sctp_iterator_worker(void)
1476 {
1477 	struct sctp_iterator *it;
1478 
1479 	/* This function is called with the WQ lock in place */
1480 	sctp_it_ctl.iterator_running = 1;
1481 	while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) {
1482 		/* now lets work on this one */
1483 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1484 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1485 		CURVNET_SET(it->vn);
1486 		sctp_iterator_work(it);
1487 		CURVNET_RESTORE();
1488 		SCTP_IPI_ITERATOR_WQ_LOCK();
1489 		/* sa_ignore FREED_MEMORY */
1490 	}
1491 	sctp_it_ctl.iterator_running = 0;
1492 	return;
1493 }
1494 
1495 
1496 static void
1497 sctp_handle_addr_wq(void)
1498 {
1499 	/* deal with the ADDR wq from the rtsock calls */
1500 	struct sctp_laddr *wi, *nwi;
1501 	struct sctp_asconf_iterator *asc;
1502 
1503 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1504 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1505 	if (asc == NULL) {
1506 		/* Try later, no memory */
1507 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1508 		    (struct sctp_inpcb *)NULL,
1509 		    (struct sctp_tcb *)NULL,
1510 		    (struct sctp_nets *)NULL);
1511 		return;
1512 	}
1513 	LIST_INIT(&asc->list_of_work);
1514 	asc->cnt = 0;
1515 
1516 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1517 		LIST_REMOVE(wi, sctp_nxt_addr);
1518 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1519 		asc->cnt++;
1520 	}
1521 
1522 	if (asc->cnt == 0) {
1523 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1524 	} else {
1525 		int ret;
1526 
1527 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1528 		    sctp_asconf_iterator_stcb,
1529 		    NULL,	/* No ep end for boundall */
1530 		    SCTP_PCB_FLAGS_BOUNDALL,
1531 		    SCTP_PCB_ANY_FEATURES,
1532 		    SCTP_ASOC_ANY_STATE,
1533 		    (void *)asc, 0,
1534 		    sctp_asconf_iterator_end, NULL, 0);
1535 		if (ret) {
1536 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1537 			/*
1538 			 * Freeing if we are stopping or put back on the
1539 			 * addr_wq.
1540 			 */
1541 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1542 				sctp_asconf_iterator_end(asc, 0);
1543 			} else {
1544 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1545 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1546 				}
1547 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1548 			}
1549 		}
1550 	}
1551 }
1552 
1553 void
1554 sctp_timeout_handler(void *t)
1555 {
1556 	struct sctp_inpcb *inp;
1557 	struct sctp_tcb *stcb;
1558 	struct sctp_nets *net;
1559 	struct sctp_timer *tmr;
1560 	struct mbuf *op_err;
1561 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1562 	struct socket *so;
1563 #endif
1564 	int did_output;
1565 	int type;
1566 
1567 	tmr = (struct sctp_timer *)t;
1568 	inp = (struct sctp_inpcb *)tmr->ep;
1569 	stcb = (struct sctp_tcb *)tmr->tcb;
1570 	net = (struct sctp_nets *)tmr->net;
1571 	CURVNET_SET((struct vnet *)tmr->vnet);
1572 	did_output = 1;
1573 
1574 #ifdef SCTP_AUDITING_ENABLED
1575 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1576 	sctp_auditing(3, inp, stcb, net);
1577 #endif
1578 
1579 	/* sanity checks... */
1580 	if (tmr->self != (void *)tmr) {
1581 		/*
1582 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1583 		 * (void *)tmr);
1584 		 */
1585 		CURVNET_RESTORE();
1586 		return;
1587 	}
1588 	tmr->stopped_from = 0xa001;
1589 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1590 		/*
1591 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1592 		 * tmr->type);
1593 		 */
1594 		CURVNET_RESTORE();
1595 		return;
1596 	}
1597 	tmr->stopped_from = 0xa002;
1598 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1599 		CURVNET_RESTORE();
1600 		return;
1601 	}
1602 	/* if this is an iterator timeout, get the struct and clear inp */
1603 	tmr->stopped_from = 0xa003;
1604 	if (inp) {
1605 		SCTP_INP_INCR_REF(inp);
1606 		if ((inp->sctp_socket == NULL) &&
1607 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1608 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1609 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1610 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1611 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1612 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1613 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1614 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1615 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))) {
1616 			SCTP_INP_DECR_REF(inp);
1617 			CURVNET_RESTORE();
1618 			return;
1619 		}
1620 	}
1621 	tmr->stopped_from = 0xa004;
1622 	if (stcb) {
1623 		atomic_add_int(&stcb->asoc.refcnt, 1);
1624 		if (stcb->asoc.state == 0) {
1625 			atomic_add_int(&stcb->asoc.refcnt, -1);
1626 			if (inp) {
1627 				SCTP_INP_DECR_REF(inp);
1628 			}
1629 			CURVNET_RESTORE();
1630 			return;
1631 		}
1632 	}
1633 	type = tmr->type;
1634 	tmr->stopped_from = 0xa005;
1635 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1636 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1637 		if (inp) {
1638 			SCTP_INP_DECR_REF(inp);
1639 		}
1640 		if (stcb) {
1641 			atomic_add_int(&stcb->asoc.refcnt, -1);
1642 		}
1643 		CURVNET_RESTORE();
1644 		return;
1645 	}
1646 	tmr->stopped_from = 0xa006;
1647 
1648 	if (stcb) {
1649 		SCTP_TCB_LOCK(stcb);
1650 		atomic_add_int(&stcb->asoc.refcnt, -1);
1651 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1652 		    ((stcb->asoc.state == 0) ||
1653 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1654 			SCTP_TCB_UNLOCK(stcb);
1655 			if (inp) {
1656 				SCTP_INP_DECR_REF(inp);
1657 			}
1658 			CURVNET_RESTORE();
1659 			return;
1660 		}
1661 	} else if (inp != NULL) {
1662 		if (type != SCTP_TIMER_TYPE_INPKILL) {
1663 			SCTP_INP_WLOCK(inp);
1664 		}
1665 	} else {
1666 		SCTP_WQ_ADDR_LOCK();
1667 	}
1668 	/* record in stopped what t-o occurred */
1669 	tmr->stopped_from = type;
1670 
1671 	/* mark as being serviced now */
1672 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1673 		/*
1674 		 * Callout has been rescheduled.
1675 		 */
1676 		goto get_out;
1677 	}
1678 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1679 		/*
1680 		 * Not active, so no action.
1681 		 */
1682 		goto get_out;
1683 	}
1684 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1685 
1686 	/* call the handler for the appropriate timer type */
1687 	switch (type) {
1688 	case SCTP_TIMER_TYPE_ADDR_WQ:
1689 		sctp_handle_addr_wq();
1690 		break;
1691 	case SCTP_TIMER_TYPE_SEND:
1692 		if ((stcb == NULL) || (inp == NULL)) {
1693 			break;
1694 		}
1695 		SCTP_STAT_INCR(sctps_timodata);
1696 		stcb->asoc.timodata++;
1697 		stcb->asoc.num_send_timers_up--;
1698 		if (stcb->asoc.num_send_timers_up < 0) {
1699 			stcb->asoc.num_send_timers_up = 0;
1700 		}
1701 		SCTP_TCB_LOCK_ASSERT(stcb);
1702 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1703 			/* no need to unlock on tcb its gone */
1704 
1705 			goto out_decr;
1706 		}
1707 		SCTP_TCB_LOCK_ASSERT(stcb);
1708 #ifdef SCTP_AUDITING_ENABLED
1709 		sctp_auditing(4, inp, stcb, net);
1710 #endif
1711 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1712 		if ((stcb->asoc.num_send_timers_up == 0) &&
1713 		    (stcb->asoc.sent_queue_cnt > 0)) {
1714 			struct sctp_tmit_chunk *chk;
1715 
1716 			/*
1717 			 * safeguard. If there on some on the sent queue
1718 			 * somewhere but no timers running something is
1719 			 * wrong... so we start a timer on the first chunk
1720 			 * on the send queue on whatever net it is sent to.
1721 			 */
1722 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1723 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1724 			    chk->whoTo);
1725 		}
1726 		break;
1727 	case SCTP_TIMER_TYPE_INIT:
1728 		if ((stcb == NULL) || (inp == NULL)) {
1729 			break;
1730 		}
1731 		SCTP_STAT_INCR(sctps_timoinit);
1732 		stcb->asoc.timoinit++;
1733 		if (sctp_t1init_timer(inp, stcb, net)) {
1734 			/* no need to unlock on tcb its gone */
1735 			goto out_decr;
1736 		}
1737 		/* We do output but not here */
1738 		did_output = 0;
1739 		break;
1740 	case SCTP_TIMER_TYPE_RECV:
1741 		if ((stcb == NULL) || (inp == NULL)) {
1742 			break;
1743 		}
1744 		SCTP_STAT_INCR(sctps_timosack);
1745 		stcb->asoc.timosack++;
1746 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1747 #ifdef SCTP_AUDITING_ENABLED
1748 		sctp_auditing(4, inp, stcb, net);
1749 #endif
1750 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1751 		break;
1752 	case SCTP_TIMER_TYPE_SHUTDOWN:
1753 		if ((stcb == NULL) || (inp == NULL)) {
1754 			break;
1755 		}
1756 		if (sctp_shutdown_timer(inp, stcb, net)) {
1757 			/* no need to unlock on tcb its gone */
1758 			goto out_decr;
1759 		}
1760 		SCTP_STAT_INCR(sctps_timoshutdown);
1761 		stcb->asoc.timoshutdown++;
1762 #ifdef SCTP_AUDITING_ENABLED
1763 		sctp_auditing(4, inp, stcb, net);
1764 #endif
1765 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1766 		break;
1767 	case SCTP_TIMER_TYPE_HEARTBEAT:
1768 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1769 			break;
1770 		}
1771 		SCTP_STAT_INCR(sctps_timoheartbeat);
1772 		stcb->asoc.timoheartbeat++;
1773 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1774 			/* no need to unlock on tcb its gone */
1775 			goto out_decr;
1776 		}
1777 #ifdef SCTP_AUDITING_ENABLED
1778 		sctp_auditing(4, inp, stcb, net);
1779 #endif
1780 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1781 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1782 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1783 		}
1784 		break;
1785 	case SCTP_TIMER_TYPE_COOKIE:
1786 		if ((stcb == NULL) || (inp == NULL)) {
1787 			break;
1788 		}
1789 
1790 		if (sctp_cookie_timer(inp, stcb, net)) {
1791 			/* no need to unlock on tcb its gone */
1792 			goto out_decr;
1793 		}
1794 		SCTP_STAT_INCR(sctps_timocookie);
1795 		stcb->asoc.timocookie++;
1796 #ifdef SCTP_AUDITING_ENABLED
1797 		sctp_auditing(4, inp, stcb, net);
1798 #endif
1799 		/*
1800 		 * We consider T3 and Cookie timer pretty much the same with
1801 		 * respect to where from in chunk_output.
1802 		 */
1803 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1804 		break;
1805 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1806 		{
1807 			struct timeval tv;
1808 			int i, secret;
1809 
1810 			if (inp == NULL) {
1811 				break;
1812 			}
1813 			SCTP_STAT_INCR(sctps_timosecret);
1814 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1815 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1816 			inp->sctp_ep.last_secret_number =
1817 			    inp->sctp_ep.current_secret_number;
1818 			inp->sctp_ep.current_secret_number++;
1819 			if (inp->sctp_ep.current_secret_number >=
1820 			    SCTP_HOW_MANY_SECRETS) {
1821 				inp->sctp_ep.current_secret_number = 0;
1822 			}
1823 			secret = (int)inp->sctp_ep.current_secret_number;
1824 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1825 				inp->sctp_ep.secret_key[secret][i] =
1826 				    sctp_select_initial_TSN(&inp->sctp_ep);
1827 			}
1828 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1829 		}
1830 		did_output = 0;
1831 		break;
1832 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1833 		if ((stcb == NULL) || (inp == NULL)) {
1834 			break;
1835 		}
1836 		SCTP_STAT_INCR(sctps_timopathmtu);
1837 		sctp_pathmtu_timer(inp, stcb, net);
1838 		did_output = 0;
1839 		break;
1840 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1841 		if ((stcb == NULL) || (inp == NULL)) {
1842 			break;
1843 		}
1844 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1845 			/* no need to unlock on tcb its gone */
1846 			goto out_decr;
1847 		}
1848 		SCTP_STAT_INCR(sctps_timoshutdownack);
1849 		stcb->asoc.timoshutdownack++;
1850 #ifdef SCTP_AUDITING_ENABLED
1851 		sctp_auditing(4, inp, stcb, net);
1852 #endif
1853 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1854 		break;
1855 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1856 		if ((stcb == NULL) || (inp == NULL)) {
1857 			break;
1858 		}
1859 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1860 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1861 		    "Shutdown guard timer expired");
1862 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1863 		/* no need to unlock on tcb its gone */
1864 		goto out_decr;
1865 
1866 	case SCTP_TIMER_TYPE_STRRESET:
1867 		if ((stcb == NULL) || (inp == NULL)) {
1868 			break;
1869 		}
1870 		if (sctp_strreset_timer(inp, stcb, net)) {
1871 			/* no need to unlock on tcb its gone */
1872 			goto out_decr;
1873 		}
1874 		SCTP_STAT_INCR(sctps_timostrmrst);
1875 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1876 		break;
1877 	case SCTP_TIMER_TYPE_ASCONF:
1878 		if ((stcb == NULL) || (inp == NULL)) {
1879 			break;
1880 		}
1881 		if (sctp_asconf_timer(inp, stcb, net)) {
1882 			/* no need to unlock on tcb its gone */
1883 			goto out_decr;
1884 		}
1885 		SCTP_STAT_INCR(sctps_timoasconf);
1886 #ifdef SCTP_AUDITING_ENABLED
1887 		sctp_auditing(4, inp, stcb, net);
1888 #endif
1889 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1890 		break;
1891 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1892 		if ((stcb == NULL) || (inp == NULL)) {
1893 			break;
1894 		}
1895 		sctp_delete_prim_timer(inp, stcb, net);
1896 		SCTP_STAT_INCR(sctps_timodelprim);
1897 		break;
1898 
1899 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1900 		if ((stcb == NULL) || (inp == NULL)) {
1901 			break;
1902 		}
1903 		SCTP_STAT_INCR(sctps_timoautoclose);
1904 		sctp_autoclose_timer(inp, stcb, net);
1905 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1906 		did_output = 0;
1907 		break;
1908 	case SCTP_TIMER_TYPE_ASOCKILL:
1909 		if ((stcb == NULL) || (inp == NULL)) {
1910 			break;
1911 		}
1912 		SCTP_STAT_INCR(sctps_timoassockill);
1913 		/* Can we free it yet? */
1914 		SCTP_INP_DECR_REF(inp);
1915 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1916 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1917 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1918 		so = SCTP_INP_SO(inp);
1919 		atomic_add_int(&stcb->asoc.refcnt, 1);
1920 		SCTP_TCB_UNLOCK(stcb);
1921 		SCTP_SOCKET_LOCK(so, 1);
1922 		SCTP_TCB_LOCK(stcb);
1923 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1924 #endif
1925 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1926 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1927 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1928 		SCTP_SOCKET_UNLOCK(so, 1);
1929 #endif
1930 		/*
1931 		 * free asoc, always unlocks (or destroy's) so prevent
1932 		 * duplicate unlock or unlock of a free mtx :-0
1933 		 */
1934 		stcb = NULL;
1935 		goto out_no_decr;
1936 	case SCTP_TIMER_TYPE_INPKILL:
1937 		SCTP_STAT_INCR(sctps_timoinpkill);
1938 		if (inp == NULL) {
1939 			break;
1940 		}
1941 		/*
1942 		 * special case, take away our increment since WE are the
1943 		 * killer
1944 		 */
1945 		SCTP_INP_DECR_REF(inp);
1946 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1947 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1948 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1949 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1950 		inp = NULL;
1951 		goto out_no_decr;
1952 	default:
1953 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1954 		    type);
1955 		break;
1956 	}
1957 #ifdef SCTP_AUDITING_ENABLED
1958 	sctp_audit_log(0xF1, (uint8_t)type);
1959 	if (inp)
1960 		sctp_auditing(5, inp, stcb, net);
1961 #endif
1962 	if ((did_output) && stcb) {
1963 		/*
1964 		 * Now we need to clean up the control chunk chain if an
1965 		 * ECNE is on it. It must be marked as UNSENT again so next
1966 		 * call will continue to send it until such time that we get
1967 		 * a CWR, to remove it. It is, however, less likely that we
1968 		 * will find a ecn echo on the chain though.
1969 		 */
1970 		sctp_fix_ecn_echo(&stcb->asoc);
1971 	}
1972 get_out:
1973 	if (stcb) {
1974 		SCTP_TCB_UNLOCK(stcb);
1975 	} else if (inp != NULL) {
1976 		SCTP_INP_WUNLOCK(inp);
1977 	} else {
1978 		SCTP_WQ_ADDR_UNLOCK();
1979 	}
1980 
1981 out_decr:
1982 	if (inp) {
1983 		SCTP_INP_DECR_REF(inp);
1984 	}
1985 
1986 out_no_decr:
1987 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1988 	CURVNET_RESTORE();
1989 }
1990 
1991 void
1992 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1993     struct sctp_nets *net)
1994 {
1995 	uint32_t to_ticks;
1996 	struct sctp_timer *tmr;
1997 
1998 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1999 		return;
2000 
2001 	tmr = NULL;
2002 	if (stcb) {
2003 		SCTP_TCB_LOCK_ASSERT(stcb);
2004 	}
2005 	switch (t_type) {
2006 	case SCTP_TIMER_TYPE_ADDR_WQ:
2007 		/* Only 1 tick away :-) */
2008 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2009 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
2010 		break;
2011 	case SCTP_TIMER_TYPE_SEND:
2012 		/* Here we use the RTO timer */
2013 		{
2014 			int rto_val;
2015 
2016 			if ((stcb == NULL) || (net == NULL)) {
2017 				return;
2018 			}
2019 			tmr = &net->rxt_timer;
2020 			if (net->RTO == 0) {
2021 				rto_val = stcb->asoc.initial_rto;
2022 			} else {
2023 				rto_val = net->RTO;
2024 			}
2025 			to_ticks = MSEC_TO_TICKS(rto_val);
2026 		}
2027 		break;
2028 	case SCTP_TIMER_TYPE_INIT:
2029 		/*
2030 		 * Here we use the INIT timer default usually about 1
2031 		 * minute.
2032 		 */
2033 		if ((stcb == NULL) || (net == NULL)) {
2034 			return;
2035 		}
2036 		tmr = &net->rxt_timer;
2037 		if (net->RTO == 0) {
2038 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2039 		} else {
2040 			to_ticks = MSEC_TO_TICKS(net->RTO);
2041 		}
2042 		break;
2043 	case SCTP_TIMER_TYPE_RECV:
2044 		/*
2045 		 * Here we use the Delayed-Ack timer value from the inp
2046 		 * ususually about 200ms.
2047 		 */
2048 		if (stcb == NULL) {
2049 			return;
2050 		}
2051 		tmr = &stcb->asoc.dack_timer;
2052 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2053 		break;
2054 	case SCTP_TIMER_TYPE_SHUTDOWN:
2055 		/* Here we use the RTO of the destination. */
2056 		if ((stcb == NULL) || (net == NULL)) {
2057 			return;
2058 		}
2059 		if (net->RTO == 0) {
2060 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2061 		} else {
2062 			to_ticks = MSEC_TO_TICKS(net->RTO);
2063 		}
2064 		tmr = &net->rxt_timer;
2065 		break;
2066 	case SCTP_TIMER_TYPE_HEARTBEAT:
2067 		/*
2068 		 * the net is used here so that we can add in the RTO. Even
2069 		 * though we use a different timer. We also add the HB timer
2070 		 * PLUS a random jitter.
2071 		 */
2072 		if ((stcb == NULL) || (net == NULL)) {
2073 			return;
2074 		} else {
2075 			uint32_t rndval;
2076 			uint32_t jitter;
2077 
2078 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2079 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2080 				return;
2081 			}
2082 			if (net->RTO == 0) {
2083 				to_ticks = stcb->asoc.initial_rto;
2084 			} else {
2085 				to_ticks = net->RTO;
2086 			}
2087 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2088 			jitter = rndval % to_ticks;
2089 			if (jitter >= (to_ticks >> 1)) {
2090 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2091 			} else {
2092 				to_ticks = to_ticks - jitter;
2093 			}
2094 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2095 			    !(net->dest_state & SCTP_ADDR_PF)) {
2096 				to_ticks += net->heart_beat_delay;
2097 			}
2098 			/*
2099 			 * Now we must convert the to_ticks that are now in
2100 			 * ms to ticks.
2101 			 */
2102 			to_ticks = MSEC_TO_TICKS(to_ticks);
2103 			tmr = &net->hb_timer;
2104 		}
2105 		break;
2106 	case SCTP_TIMER_TYPE_COOKIE:
2107 		/*
2108 		 * Here we can use the RTO timer from the network since one
2109 		 * RTT was compelete. If a retran happened then we will be
2110 		 * using the RTO initial value.
2111 		 */
2112 		if ((stcb == NULL) || (net == NULL)) {
2113 			return;
2114 		}
2115 		if (net->RTO == 0) {
2116 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2117 		} else {
2118 			to_ticks = MSEC_TO_TICKS(net->RTO);
2119 		}
2120 		tmr = &net->rxt_timer;
2121 		break;
2122 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2123 		/*
2124 		 * nothing needed but the endpoint here ususually about 60
2125 		 * minutes.
2126 		 */
2127 		tmr = &inp->sctp_ep.signature_change;
2128 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2129 		break;
2130 	case SCTP_TIMER_TYPE_ASOCKILL:
2131 		if (stcb == NULL) {
2132 			return;
2133 		}
2134 		tmr = &stcb->asoc.strreset_timer;
2135 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2136 		break;
2137 	case SCTP_TIMER_TYPE_INPKILL:
2138 		/*
2139 		 * The inp is setup to die. We re-use the signature_chage
2140 		 * timer since that has stopped and we are in the GONE
2141 		 * state.
2142 		 */
2143 		tmr = &inp->sctp_ep.signature_change;
2144 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2145 		break;
2146 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2147 		/*
2148 		 * Here we use the value found in the EP for PMTU ususually
2149 		 * about 10 minutes.
2150 		 */
2151 		if ((stcb == NULL) || (net == NULL)) {
2152 			return;
2153 		}
2154 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2155 			return;
2156 		}
2157 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2158 		tmr = &net->pmtu_timer;
2159 		break;
2160 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2161 		/* Here we use the RTO of the destination */
2162 		if ((stcb == NULL) || (net == NULL)) {
2163 			return;
2164 		}
2165 		if (net->RTO == 0) {
2166 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2167 		} else {
2168 			to_ticks = MSEC_TO_TICKS(net->RTO);
2169 		}
2170 		tmr = &net->rxt_timer;
2171 		break;
2172 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2173 		/*
2174 		 * Here we use the endpoints shutdown guard timer usually
2175 		 * about 3 minutes.
2176 		 */
2177 		if (stcb == NULL) {
2178 			return;
2179 		}
2180 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2181 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2182 		} else {
2183 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2184 		}
2185 		tmr = &stcb->asoc.shut_guard_timer;
2186 		break;
2187 	case SCTP_TIMER_TYPE_STRRESET:
2188 		/*
2189 		 * Here the timer comes from the stcb but its value is from
2190 		 * the net's RTO.
2191 		 */
2192 		if ((stcb == NULL) || (net == NULL)) {
2193 			return;
2194 		}
2195 		if (net->RTO == 0) {
2196 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2197 		} else {
2198 			to_ticks = MSEC_TO_TICKS(net->RTO);
2199 		}
2200 		tmr = &stcb->asoc.strreset_timer;
2201 		break;
2202 	case SCTP_TIMER_TYPE_ASCONF:
2203 		/*
2204 		 * Here the timer comes from the stcb but its value is from
2205 		 * the net's RTO.
2206 		 */
2207 		if ((stcb == NULL) || (net == NULL)) {
2208 			return;
2209 		}
2210 		if (net->RTO == 0) {
2211 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2212 		} else {
2213 			to_ticks = MSEC_TO_TICKS(net->RTO);
2214 		}
2215 		tmr = &stcb->asoc.asconf_timer;
2216 		break;
2217 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2218 		if ((stcb == NULL) || (net != NULL)) {
2219 			return;
2220 		}
2221 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2222 		tmr = &stcb->asoc.delete_prim_timer;
2223 		break;
2224 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2225 		if (stcb == NULL) {
2226 			return;
2227 		}
2228 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2229 			/*
2230 			 * Really an error since stcb is NOT set to
2231 			 * autoclose
2232 			 */
2233 			return;
2234 		}
2235 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2236 		tmr = &stcb->asoc.autoclose_timer;
2237 		break;
2238 	default:
2239 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2240 		    __func__, t_type);
2241 		return;
2242 		break;
2243 	}
2244 	if ((to_ticks <= 0) || (tmr == NULL)) {
2245 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2246 		    __func__, t_type, to_ticks, (void *)tmr);
2247 		return;
2248 	}
2249 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2250 		/*
2251 		 * we do NOT allow you to have it already running. if it is
2252 		 * we leave the current one up unchanged
2253 		 */
2254 		return;
2255 	}
2256 	/* At this point we can proceed */
2257 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2258 		stcb->asoc.num_send_timers_up++;
2259 	}
2260 	tmr->stopped_from = 0;
2261 	tmr->type = t_type;
2262 	tmr->ep = (void *)inp;
2263 	tmr->tcb = (void *)stcb;
2264 	tmr->net = (void *)net;
2265 	tmr->self = (void *)tmr;
2266 	tmr->vnet = (void *)curvnet;
2267 	tmr->ticks = sctp_get_tick_count();
2268 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2269 	return;
2270 }
2271 
2272 void
2273 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2274     struct sctp_nets *net, uint32_t from)
2275 {
2276 	struct sctp_timer *tmr;
2277 
2278 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2279 	    (inp == NULL))
2280 		return;
2281 
2282 	tmr = NULL;
2283 	if (stcb) {
2284 		SCTP_TCB_LOCK_ASSERT(stcb);
2285 	}
2286 	switch (t_type) {
2287 	case SCTP_TIMER_TYPE_ADDR_WQ:
2288 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2289 		break;
2290 	case SCTP_TIMER_TYPE_SEND:
2291 		if ((stcb == NULL) || (net == NULL)) {
2292 			return;
2293 		}
2294 		tmr = &net->rxt_timer;
2295 		break;
2296 	case SCTP_TIMER_TYPE_INIT:
2297 		if ((stcb == NULL) || (net == NULL)) {
2298 			return;
2299 		}
2300 		tmr = &net->rxt_timer;
2301 		break;
2302 	case SCTP_TIMER_TYPE_RECV:
2303 		if (stcb == NULL) {
2304 			return;
2305 		}
2306 		tmr = &stcb->asoc.dack_timer;
2307 		break;
2308 	case SCTP_TIMER_TYPE_SHUTDOWN:
2309 		if ((stcb == NULL) || (net == NULL)) {
2310 			return;
2311 		}
2312 		tmr = &net->rxt_timer;
2313 		break;
2314 	case SCTP_TIMER_TYPE_HEARTBEAT:
2315 		if ((stcb == NULL) || (net == NULL)) {
2316 			return;
2317 		}
2318 		tmr = &net->hb_timer;
2319 		break;
2320 	case SCTP_TIMER_TYPE_COOKIE:
2321 		if ((stcb == NULL) || (net == NULL)) {
2322 			return;
2323 		}
2324 		tmr = &net->rxt_timer;
2325 		break;
2326 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2327 		/* nothing needed but the endpoint here */
2328 		tmr = &inp->sctp_ep.signature_change;
2329 		/*
2330 		 * We re-use the newcookie timer for the INP kill timer. We
2331 		 * must assure that we do not kill it by accident.
2332 		 */
2333 		break;
2334 	case SCTP_TIMER_TYPE_ASOCKILL:
2335 		/*
2336 		 * Stop the asoc kill timer.
2337 		 */
2338 		if (stcb == NULL) {
2339 			return;
2340 		}
2341 		tmr = &stcb->asoc.strreset_timer;
2342 		break;
2343 
2344 	case SCTP_TIMER_TYPE_INPKILL:
2345 		/*
2346 		 * The inp is setup to die. We re-use the signature_chage
2347 		 * timer since that has stopped and we are in the GONE
2348 		 * state.
2349 		 */
2350 		tmr = &inp->sctp_ep.signature_change;
2351 		break;
2352 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2353 		if ((stcb == NULL) || (net == NULL)) {
2354 			return;
2355 		}
2356 		tmr = &net->pmtu_timer;
2357 		break;
2358 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2359 		if ((stcb == NULL) || (net == NULL)) {
2360 			return;
2361 		}
2362 		tmr = &net->rxt_timer;
2363 		break;
2364 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2365 		if (stcb == NULL) {
2366 			return;
2367 		}
2368 		tmr = &stcb->asoc.shut_guard_timer;
2369 		break;
2370 	case SCTP_TIMER_TYPE_STRRESET:
2371 		if (stcb == NULL) {
2372 			return;
2373 		}
2374 		tmr = &stcb->asoc.strreset_timer;
2375 		break;
2376 	case SCTP_TIMER_TYPE_ASCONF:
2377 		if (stcb == NULL) {
2378 			return;
2379 		}
2380 		tmr = &stcb->asoc.asconf_timer;
2381 		break;
2382 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2383 		if (stcb == NULL) {
2384 			return;
2385 		}
2386 		tmr = &stcb->asoc.delete_prim_timer;
2387 		break;
2388 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2389 		if (stcb == NULL) {
2390 			return;
2391 		}
2392 		tmr = &stcb->asoc.autoclose_timer;
2393 		break;
2394 	default:
2395 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2396 		    __func__, t_type);
2397 		break;
2398 	}
2399 	if (tmr == NULL) {
2400 		return;
2401 	}
2402 	if ((tmr->type != t_type) && tmr->type) {
2403 		/*
2404 		 * Ok we have a timer that is under joint use. Cookie timer
2405 		 * per chance with the SEND timer. We therefore are NOT
2406 		 * running the timer that the caller wants stopped.  So just
2407 		 * return.
2408 		 */
2409 		return;
2410 	}
2411 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2412 		stcb->asoc.num_send_timers_up--;
2413 		if (stcb->asoc.num_send_timers_up < 0) {
2414 			stcb->asoc.num_send_timers_up = 0;
2415 		}
2416 	}
2417 	tmr->self = NULL;
2418 	tmr->stopped_from = from;
2419 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2420 	return;
2421 }
2422 
2423 uint32_t
2424 sctp_calculate_len(struct mbuf *m)
2425 {
2426 	uint32_t tlen = 0;
2427 	struct mbuf *at;
2428 
2429 	at = m;
2430 	while (at) {
2431 		tlen += SCTP_BUF_LEN(at);
2432 		at = SCTP_BUF_NEXT(at);
2433 	}
2434 	return (tlen);
2435 }
2436 
2437 void
2438 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2439     struct sctp_association *asoc, uint32_t mtu)
2440 {
2441 	/*
2442 	 * Reset the P-MTU size on this association, this involves changing
2443 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2444 	 * allow the DF flag to be cleared.
2445 	 */
2446 	struct sctp_tmit_chunk *chk;
2447 	unsigned int eff_mtu, ovh;
2448 
2449 	asoc->smallest_mtu = mtu;
2450 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2451 		ovh = SCTP_MIN_OVERHEAD;
2452 	} else {
2453 		ovh = SCTP_MIN_V4_OVERHEAD;
2454 	}
2455 	eff_mtu = mtu - ovh;
2456 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2457 		if (chk->send_size > eff_mtu) {
2458 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2459 		}
2460 	}
2461 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2462 		if (chk->send_size > eff_mtu) {
2463 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2464 		}
2465 	}
2466 }
2467 
2468 
2469 /*
2470  * Given an association and starting time of the current RTT period, update
2471  * RTO in number of msecs. net should point to the current network.
2472  * Return 1, if an RTO update was performed, return 0 if no update was
2473  * performed due to invalid starting point.
2474  */
2475 
2476 int
2477 sctp_calculate_rto(struct sctp_tcb *stcb,
2478     struct sctp_association *asoc,
2479     struct sctp_nets *net,
2480     struct timeval *old,
2481     int rtt_from_sack)
2482 {
2483 	struct timeval now;
2484 	uint64_t rtt_us;	/* RTT in us */
2485 	int32_t rtt;		/* RTT in ms */
2486 	uint32_t new_rto;
2487 	int first_measure = 0;
2488 
2489 	/************************/
2490 	/* 1. calculate new RTT */
2491 	/************************/
2492 	/* get the current time */
2493 	if (stcb->asoc.use_precise_time) {
2494 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2495 	} else {
2496 		(void)SCTP_GETTIME_TIMEVAL(&now);
2497 	}
2498 	if ((old->tv_sec > now.tv_sec) ||
2499 	    ((old->tv_sec == now.tv_sec) && (old->tv_sec > now.tv_sec))) {
2500 		/* The starting point is in the future. */
2501 		return (0);
2502 	}
2503 	timevalsub(&now, old);
2504 	rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec;
2505 	if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) {
2506 		/* The RTT is larger than a sane value. */
2507 		return (0);
2508 	}
2509 	/* store the current RTT in us */
2510 	net->rtt = rtt_us;
2511 	/* compute rtt in ms */
2512 	rtt = (int32_t)(net->rtt / 1000);
2513 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2514 		/*
2515 		 * Tell the CC module that a new update has just occurred
2516 		 * from a sack
2517 		 */
2518 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2519 	}
2520 	/*
2521 	 * Do we need to determine the lan? We do this only on sacks i.e.
2522 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2523 	 */
2524 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2525 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2526 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2527 			net->lan_type = SCTP_LAN_INTERNET;
2528 		} else {
2529 			net->lan_type = SCTP_LAN_LOCAL;
2530 		}
2531 	}
2532 
2533 	/***************************/
2534 	/* 2. update RTTVAR & SRTT */
2535 	/***************************/
2536 	/*-
2537 	 * Compute the scaled average lastsa and the
2538 	 * scaled variance lastsv as described in van Jacobson
2539 	 * Paper "Congestion Avoidance and Control", Annex A.
2540 	 *
2541 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2542 	 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar
2543 	 */
2544 	if (net->RTO_measured) {
2545 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2546 		net->lastsa += rtt;
2547 		if (rtt < 0) {
2548 			rtt = -rtt;
2549 		}
2550 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2551 		net->lastsv += rtt;
2552 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2553 			rto_logging(net, SCTP_LOG_RTTVAR);
2554 		}
2555 	} else {
2556 		/* First RTO measurment */
2557 		net->RTO_measured = 1;
2558 		first_measure = 1;
2559 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2560 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2561 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2562 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2563 		}
2564 	}
2565 	if (net->lastsv == 0) {
2566 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2567 	}
2568 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2569 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2570 	    (stcb->asoc.sat_network_lockout == 0)) {
2571 		stcb->asoc.sat_network = 1;
2572 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2573 		stcb->asoc.sat_network = 0;
2574 		stcb->asoc.sat_network_lockout = 1;
2575 	}
2576 	/* bound it, per C6/C7 in Section 5.3.1 */
2577 	if (new_rto < stcb->asoc.minrto) {
2578 		new_rto = stcb->asoc.minrto;
2579 	}
2580 	if (new_rto > stcb->asoc.maxrto) {
2581 		new_rto = stcb->asoc.maxrto;
2582 	}
2583 	net->RTO = new_rto;
2584 	return (1);
2585 }
2586 
2587 /*
2588  * return a pointer to a contiguous piece of data from the given mbuf chain
2589  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2590  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2591  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2592  */
2593 caddr_t
2594 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2595 {
2596 	uint32_t count;
2597 	uint8_t *ptr;
2598 
2599 	ptr = in_ptr;
2600 	if ((off < 0) || (len <= 0))
2601 		return (NULL);
2602 
2603 	/* find the desired start location */
2604 	while ((m != NULL) && (off > 0)) {
2605 		if (off < SCTP_BUF_LEN(m))
2606 			break;
2607 		off -= SCTP_BUF_LEN(m);
2608 		m = SCTP_BUF_NEXT(m);
2609 	}
2610 	if (m == NULL)
2611 		return (NULL);
2612 
2613 	/* is the current mbuf large enough (eg. contiguous)? */
2614 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2615 		return (mtod(m, caddr_t)+off);
2616 	} else {
2617 		/* else, it spans more than one mbuf, so save a temp copy... */
2618 		while ((m != NULL) && (len > 0)) {
2619 			count = min(SCTP_BUF_LEN(m) - off, len);
2620 			memcpy(ptr, mtod(m, caddr_t)+off, count);
2621 			len -= count;
2622 			ptr += count;
2623 			off = 0;
2624 			m = SCTP_BUF_NEXT(m);
2625 		}
2626 		if ((m == NULL) && (len > 0))
2627 			return (NULL);
2628 		else
2629 			return ((caddr_t)in_ptr);
2630 	}
2631 }
2632 
2633 
2634 
2635 struct sctp_paramhdr *
2636 sctp_get_next_param(struct mbuf *m,
2637     int offset,
2638     struct sctp_paramhdr *pull,
2639     int pull_limit)
2640 {
2641 	/* This just provides a typed signature to Peter's Pull routine */
2642 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2643 	    (uint8_t *)pull));
2644 }
2645 
2646 
2647 struct mbuf *
2648 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2649 {
2650 	struct mbuf *m_last;
2651 	caddr_t dp;
2652 
2653 	if (padlen > 3) {
2654 		return (NULL);
2655 	}
2656 	if (padlen <= M_TRAILINGSPACE(m)) {
2657 		/*
2658 		 * The easy way. We hope the majority of the time we hit
2659 		 * here :)
2660 		 */
2661 		m_last = m;
2662 	} else {
2663 		/* Hard way we must grow the mbuf chain */
2664 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2665 		if (m_last == NULL) {
2666 			return (NULL);
2667 		}
2668 		SCTP_BUF_LEN(m_last) = 0;
2669 		SCTP_BUF_NEXT(m_last) = NULL;
2670 		SCTP_BUF_NEXT(m) = m_last;
2671 	}
2672 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2673 	SCTP_BUF_LEN(m_last) += padlen;
2674 	memset(dp, 0, padlen);
2675 	return (m_last);
2676 }
2677 
2678 struct mbuf *
2679 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2680 {
2681 	/* find the last mbuf in chain and pad it */
2682 	struct mbuf *m_at;
2683 
2684 	if (last_mbuf != NULL) {
2685 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2686 	} else {
2687 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2688 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2689 				return (sctp_add_pad_tombuf(m_at, padval));
2690 			}
2691 		}
2692 	}
2693 	return (NULL);
2694 }
2695 
2696 static void
2697 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2698     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2699 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2700     SCTP_UNUSED
2701 #endif
2702 )
2703 {
2704 	struct mbuf *m_notify;
2705 	struct sctp_assoc_change *sac;
2706 	struct sctp_queued_to_read *control;
2707 	unsigned int notif_len;
2708 	uint16_t abort_len;
2709 	unsigned int i;
2710 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2711 	struct socket *so;
2712 #endif
2713 
2714 	if (stcb == NULL) {
2715 		return;
2716 	}
2717 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2718 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2719 		if (abort != NULL) {
2720 			abort_len = ntohs(abort->ch.chunk_length);
2721 			/*
2722 			 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
2723 			 * contiguous.
2724 			 */
2725 			if (abort_len > SCTP_CHUNK_BUFFER_SIZE) {
2726 				abort_len = SCTP_CHUNK_BUFFER_SIZE;
2727 			}
2728 		} else {
2729 			abort_len = 0;
2730 		}
2731 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2732 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2733 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2734 			notif_len += abort_len;
2735 		}
2736 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2737 		if (m_notify == NULL) {
2738 			/* Retry with smaller value. */
2739 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2740 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2741 			if (m_notify == NULL) {
2742 				goto set_error;
2743 			}
2744 		}
2745 		SCTP_BUF_NEXT(m_notify) = NULL;
2746 		sac = mtod(m_notify, struct sctp_assoc_change *);
2747 		memset(sac, 0, notif_len);
2748 		sac->sac_type = SCTP_ASSOC_CHANGE;
2749 		sac->sac_flags = 0;
2750 		sac->sac_length = sizeof(struct sctp_assoc_change);
2751 		sac->sac_state = state;
2752 		sac->sac_error = error;
2753 		/* XXX verify these stream counts */
2754 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2755 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2756 		sac->sac_assoc_id = sctp_get_associd(stcb);
2757 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2758 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2759 				i = 0;
2760 				if (stcb->asoc.prsctp_supported == 1) {
2761 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2762 				}
2763 				if (stcb->asoc.auth_supported == 1) {
2764 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2765 				}
2766 				if (stcb->asoc.asconf_supported == 1) {
2767 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2768 				}
2769 				if (stcb->asoc.idata_supported == 1) {
2770 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2771 				}
2772 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2773 				if (stcb->asoc.reconfig_supported == 1) {
2774 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2775 				}
2776 				sac->sac_length += i;
2777 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2778 				memcpy(sac->sac_info, abort, abort_len);
2779 				sac->sac_length += abort_len;
2780 			}
2781 		}
2782 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2783 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2784 		    0, 0, stcb->asoc.context, 0, 0, 0,
2785 		    m_notify);
2786 		if (control != NULL) {
2787 			control->length = SCTP_BUF_LEN(m_notify);
2788 			control->spec_flags = M_NOTIFICATION;
2789 			/* not that we need this */
2790 			control->tail_mbuf = m_notify;
2791 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2792 			    control,
2793 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2794 			    so_locked);
2795 		} else {
2796 			sctp_m_freem(m_notify);
2797 		}
2798 	}
2799 	/*
2800 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2801 	 * comes in.
2802 	 */
2803 set_error:
2804 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2805 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2806 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2807 		SOCK_LOCK(stcb->sctp_socket);
2808 		if (from_peer) {
2809 			if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
2810 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2811 				stcb->sctp_socket->so_error = ECONNREFUSED;
2812 			} else {
2813 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2814 				stcb->sctp_socket->so_error = ECONNRESET;
2815 			}
2816 		} else {
2817 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
2818 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
2819 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2820 				stcb->sctp_socket->so_error = ETIMEDOUT;
2821 			} else {
2822 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2823 				stcb->sctp_socket->so_error = ECONNABORTED;
2824 			}
2825 		}
2826 		SOCK_UNLOCK(stcb->sctp_socket);
2827 	}
2828 	/* Wake ANY sleepers */
2829 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2830 	so = SCTP_INP_SO(stcb->sctp_ep);
2831 	if (!so_locked) {
2832 		atomic_add_int(&stcb->asoc.refcnt, 1);
2833 		SCTP_TCB_UNLOCK(stcb);
2834 		SCTP_SOCKET_LOCK(so, 1);
2835 		SCTP_TCB_LOCK(stcb);
2836 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2837 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2838 			SCTP_SOCKET_UNLOCK(so, 1);
2839 			return;
2840 		}
2841 	}
2842 #endif
2843 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2844 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2845 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2846 		socantrcvmore(stcb->sctp_socket);
2847 	}
2848 	sorwakeup(stcb->sctp_socket);
2849 	sowwakeup(stcb->sctp_socket);
2850 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2851 	if (!so_locked) {
2852 		SCTP_SOCKET_UNLOCK(so, 1);
2853 	}
2854 #endif
2855 }
2856 
2857 static void
2858 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2859     struct sockaddr *sa, uint32_t error, int so_locked
2860 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2861     SCTP_UNUSED
2862 #endif
2863 )
2864 {
2865 	struct mbuf *m_notify;
2866 	struct sctp_paddr_change *spc;
2867 	struct sctp_queued_to_read *control;
2868 
2869 	if ((stcb == NULL) ||
2870 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2871 		/* event not enabled */
2872 		return;
2873 	}
2874 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2875 	if (m_notify == NULL)
2876 		return;
2877 	SCTP_BUF_LEN(m_notify) = 0;
2878 	spc = mtod(m_notify, struct sctp_paddr_change *);
2879 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2880 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2881 	spc->spc_flags = 0;
2882 	spc->spc_length = sizeof(struct sctp_paddr_change);
2883 	switch (sa->sa_family) {
2884 #ifdef INET
2885 	case AF_INET:
2886 #ifdef INET6
2887 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2888 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2889 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2890 		} else {
2891 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2892 		}
2893 #else
2894 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2895 #endif
2896 		break;
2897 #endif
2898 #ifdef INET6
2899 	case AF_INET6:
2900 		{
2901 			struct sockaddr_in6 *sin6;
2902 
2903 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2904 
2905 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2906 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2907 				if (sin6->sin6_scope_id == 0) {
2908 					/* recover scope_id for user */
2909 					(void)sa6_recoverscope(sin6);
2910 				} else {
2911 					/* clear embedded scope_id for user */
2912 					in6_clearscope(&sin6->sin6_addr);
2913 				}
2914 			}
2915 			break;
2916 		}
2917 #endif
2918 	default:
2919 		/* TSNH */
2920 		break;
2921 	}
2922 	spc->spc_state = state;
2923 	spc->spc_error = error;
2924 	spc->spc_assoc_id = sctp_get_associd(stcb);
2925 
2926 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2927 	SCTP_BUF_NEXT(m_notify) = NULL;
2928 
2929 	/* append to socket */
2930 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2931 	    0, 0, stcb->asoc.context, 0, 0, 0,
2932 	    m_notify);
2933 	if (control == NULL) {
2934 		/* no memory */
2935 		sctp_m_freem(m_notify);
2936 		return;
2937 	}
2938 	control->length = SCTP_BUF_LEN(m_notify);
2939 	control->spec_flags = M_NOTIFICATION;
2940 	/* not that we need this */
2941 	control->tail_mbuf = m_notify;
2942 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2943 	    control,
2944 	    &stcb->sctp_socket->so_rcv, 1,
2945 	    SCTP_READ_LOCK_NOT_HELD,
2946 	    so_locked);
2947 }
2948 
2949 
2950 static void
2951 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2952     struct sctp_tmit_chunk *chk, int so_locked
2953 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2954     SCTP_UNUSED
2955 #endif
2956 )
2957 {
2958 	struct mbuf *m_notify;
2959 	struct sctp_send_failed *ssf;
2960 	struct sctp_send_failed_event *ssfe;
2961 	struct sctp_queued_to_read *control;
2962 	struct sctp_chunkhdr *chkhdr;
2963 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
2964 
2965 	if ((stcb == NULL) ||
2966 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2967 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2968 		/* event not enabled */
2969 		return;
2970 	}
2971 
2972 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2973 		notifhdr_len = sizeof(struct sctp_send_failed_event);
2974 	} else {
2975 		notifhdr_len = sizeof(struct sctp_send_failed);
2976 	}
2977 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
2978 	if (m_notify == NULL)
2979 		/* no space left */
2980 		return;
2981 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
2982 	if (stcb->asoc.idata_supported) {
2983 		chkhdr_len = sizeof(struct sctp_idata_chunk);
2984 	} else {
2985 		chkhdr_len = sizeof(struct sctp_data_chunk);
2986 	}
2987 	/* Use some defaults in case we can't access the chunk header */
2988 	if (chk->send_size >= chkhdr_len) {
2989 		payload_len = chk->send_size - chkhdr_len;
2990 	} else {
2991 		payload_len = 0;
2992 	}
2993 	padding_len = 0;
2994 	if (chk->data != NULL) {
2995 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
2996 		if (chkhdr != NULL) {
2997 			chk_len = ntohs(chkhdr->chunk_length);
2998 			if ((chk_len >= chkhdr_len) &&
2999 			    (chk->send_size >= chk_len) &&
3000 			    (chk->send_size - chk_len < 4)) {
3001 				padding_len = chk->send_size - chk_len;
3002 				payload_len = chk->send_size - chkhdr_len - padding_len;
3003 			}
3004 		}
3005 	}
3006 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3007 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3008 		memset(ssfe, 0, notifhdr_len);
3009 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3010 		if (sent) {
3011 			ssfe->ssfe_flags = SCTP_DATA_SENT;
3012 		} else {
3013 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3014 		}
3015 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
3016 		ssfe->ssfe_error = error;
3017 		/* not exactly what the user sent in, but should be close :) */
3018 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
3019 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
3020 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
3021 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
3022 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3023 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3024 	} else {
3025 		ssf = mtod(m_notify, struct sctp_send_failed *);
3026 		memset(ssf, 0, notifhdr_len);
3027 		ssf->ssf_type = SCTP_SEND_FAILED;
3028 		if (sent) {
3029 			ssf->ssf_flags = SCTP_DATA_SENT;
3030 		} else {
3031 			ssf->ssf_flags = SCTP_DATA_UNSENT;
3032 		}
3033 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3034 		ssf->ssf_error = error;
3035 		/* not exactly what the user sent in, but should be close :) */
3036 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3037 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3038 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3039 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3040 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3041 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3042 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3043 	}
3044 	if (chk->data != NULL) {
3045 		/* Trim off the sctp chunk header (it should be there) */
3046 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3047 			m_adj(chk->data, chkhdr_len);
3048 			m_adj(chk->data, -padding_len);
3049 			sctp_mbuf_crush(chk->data);
3050 			chk->send_size -= (chkhdr_len + padding_len);
3051 		}
3052 	}
3053 	SCTP_BUF_NEXT(m_notify) = chk->data;
3054 	/* Steal off the mbuf */
3055 	chk->data = NULL;
3056 	/*
3057 	 * For this case, we check the actual socket buffer, since the assoc
3058 	 * is going away we don't want to overfill the socket buffer for a
3059 	 * non-reader
3060 	 */
3061 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3062 		sctp_m_freem(m_notify);
3063 		return;
3064 	}
3065 	/* append to socket */
3066 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3067 	    0, 0, stcb->asoc.context, 0, 0, 0,
3068 	    m_notify);
3069 	if (control == NULL) {
3070 		/* no memory */
3071 		sctp_m_freem(m_notify);
3072 		return;
3073 	}
3074 	control->length = SCTP_BUF_LEN(m_notify);
3075 	control->spec_flags = M_NOTIFICATION;
3076 	/* not that we need this */
3077 	control->tail_mbuf = m_notify;
3078 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3079 	    control,
3080 	    &stcb->sctp_socket->so_rcv, 1,
3081 	    SCTP_READ_LOCK_NOT_HELD,
3082 	    so_locked);
3083 }
3084 
3085 
3086 static void
3087 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3088     struct sctp_stream_queue_pending *sp, int so_locked
3089 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3090     SCTP_UNUSED
3091 #endif
3092 )
3093 {
3094 	struct mbuf *m_notify;
3095 	struct sctp_send_failed *ssf;
3096 	struct sctp_send_failed_event *ssfe;
3097 	struct sctp_queued_to_read *control;
3098 	int notifhdr_len;
3099 
3100 	if ((stcb == NULL) ||
3101 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3102 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3103 		/* event not enabled */
3104 		return;
3105 	}
3106 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3107 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3108 	} else {
3109 		notifhdr_len = sizeof(struct sctp_send_failed);
3110 	}
3111 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3112 	if (m_notify == NULL) {
3113 		/* no space left */
3114 		return;
3115 	}
3116 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3117 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3118 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3119 		memset(ssfe, 0, notifhdr_len);
3120 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3121 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3122 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3123 		ssfe->ssfe_error = error;
3124 		/* not exactly what the user sent in, but should be close :) */
3125 		ssfe->ssfe_info.snd_sid = sp->sid;
3126 		if (sp->some_taken) {
3127 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3128 		} else {
3129 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3130 		}
3131 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3132 		ssfe->ssfe_info.snd_context = sp->context;
3133 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3134 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3135 	} else {
3136 		ssf = mtod(m_notify, struct sctp_send_failed *);
3137 		memset(ssf, 0, notifhdr_len);
3138 		ssf->ssf_type = SCTP_SEND_FAILED;
3139 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3140 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3141 		ssf->ssf_error = error;
3142 		/* not exactly what the user sent in, but should be close :) */
3143 		ssf->ssf_info.sinfo_stream = sp->sid;
3144 		ssf->ssf_info.sinfo_ssn = 0;
3145 		if (sp->some_taken) {
3146 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3147 		} else {
3148 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3149 		}
3150 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3151 		ssf->ssf_info.sinfo_context = sp->context;
3152 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3153 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3154 	}
3155 	SCTP_BUF_NEXT(m_notify) = sp->data;
3156 
3157 	/* Steal off the mbuf */
3158 	sp->data = NULL;
3159 	/*
3160 	 * For this case, we check the actual socket buffer, since the assoc
3161 	 * is going away we don't want to overfill the socket buffer for a
3162 	 * non-reader
3163 	 */
3164 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3165 		sctp_m_freem(m_notify);
3166 		return;
3167 	}
3168 	/* append to socket */
3169 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3170 	    0, 0, stcb->asoc.context, 0, 0, 0,
3171 	    m_notify);
3172 	if (control == NULL) {
3173 		/* no memory */
3174 		sctp_m_freem(m_notify);
3175 		return;
3176 	}
3177 	control->length = SCTP_BUF_LEN(m_notify);
3178 	control->spec_flags = M_NOTIFICATION;
3179 	/* not that we need this */
3180 	control->tail_mbuf = m_notify;
3181 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3182 	    control,
3183 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3184 }
3185 
3186 
3187 
3188 static void
3189 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3190 {
3191 	struct mbuf *m_notify;
3192 	struct sctp_adaptation_event *sai;
3193 	struct sctp_queued_to_read *control;
3194 
3195 	if ((stcb == NULL) ||
3196 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3197 		/* event not enabled */
3198 		return;
3199 	}
3200 
3201 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3202 	if (m_notify == NULL)
3203 		/* no space left */
3204 		return;
3205 	SCTP_BUF_LEN(m_notify) = 0;
3206 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3207 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3208 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3209 	sai->sai_flags = 0;
3210 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3211 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3212 	sai->sai_assoc_id = sctp_get_associd(stcb);
3213 
3214 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3215 	SCTP_BUF_NEXT(m_notify) = NULL;
3216 
3217 	/* append to socket */
3218 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3219 	    0, 0, stcb->asoc.context, 0, 0, 0,
3220 	    m_notify);
3221 	if (control == NULL) {
3222 		/* no memory */
3223 		sctp_m_freem(m_notify);
3224 		return;
3225 	}
3226 	control->length = SCTP_BUF_LEN(m_notify);
3227 	control->spec_flags = M_NOTIFICATION;
3228 	/* not that we need this */
3229 	control->tail_mbuf = m_notify;
3230 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3231 	    control,
3232 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3233 }
3234 
3235 /* This always must be called with the read-queue LOCKED in the INP */
3236 static void
3237 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3238     uint32_t val, int so_locked
3239 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3240     SCTP_UNUSED
3241 #endif
3242 )
3243 {
3244 	struct mbuf *m_notify;
3245 	struct sctp_pdapi_event *pdapi;
3246 	struct sctp_queued_to_read *control;
3247 	struct sockbuf *sb;
3248 
3249 	if ((stcb == NULL) ||
3250 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3251 		/* event not enabled */
3252 		return;
3253 	}
3254 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3255 		return;
3256 	}
3257 
3258 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3259 	if (m_notify == NULL)
3260 		/* no space left */
3261 		return;
3262 	SCTP_BUF_LEN(m_notify) = 0;
3263 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3264 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3265 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3266 	pdapi->pdapi_flags = 0;
3267 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3268 	pdapi->pdapi_indication = error;
3269 	pdapi->pdapi_stream = (val >> 16);
3270 	pdapi->pdapi_seq = (val & 0x0000ffff);
3271 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3272 
3273 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3274 	SCTP_BUF_NEXT(m_notify) = NULL;
3275 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3276 	    0, 0, stcb->asoc.context, 0, 0, 0,
3277 	    m_notify);
3278 	if (control == NULL) {
3279 		/* no memory */
3280 		sctp_m_freem(m_notify);
3281 		return;
3282 	}
3283 	control->length = SCTP_BUF_LEN(m_notify);
3284 	control->spec_flags = M_NOTIFICATION;
3285 	/* not that we need this */
3286 	control->tail_mbuf = m_notify;
3287 	sb = &stcb->sctp_socket->so_rcv;
3288 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3289 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3290 	}
3291 	sctp_sballoc(stcb, sb, m_notify);
3292 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3293 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3294 	}
3295 	control->end_added = 1;
3296 	if (stcb->asoc.control_pdapi)
3297 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3298 	else {
3299 		/* we really should not see this case */
3300 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3301 	}
3302 	if (stcb->sctp_ep && stcb->sctp_socket) {
3303 		/* This should always be the case */
3304 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3305 		struct socket *so;
3306 
3307 		so = SCTP_INP_SO(stcb->sctp_ep);
3308 		if (!so_locked) {
3309 			atomic_add_int(&stcb->asoc.refcnt, 1);
3310 			SCTP_TCB_UNLOCK(stcb);
3311 			SCTP_SOCKET_LOCK(so, 1);
3312 			SCTP_TCB_LOCK(stcb);
3313 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3314 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3315 				SCTP_SOCKET_UNLOCK(so, 1);
3316 				return;
3317 			}
3318 		}
3319 #endif
3320 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3321 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3322 		if (!so_locked) {
3323 			SCTP_SOCKET_UNLOCK(so, 1);
3324 		}
3325 #endif
3326 	}
3327 }
3328 
3329 static void
3330 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3331 {
3332 	struct mbuf *m_notify;
3333 	struct sctp_shutdown_event *sse;
3334 	struct sctp_queued_to_read *control;
3335 
3336 	/*
3337 	 * For TCP model AND UDP connected sockets we will send an error up
3338 	 * when an SHUTDOWN completes
3339 	 */
3340 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3341 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3342 		/* mark socket closed for read/write and wakeup! */
3343 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3344 		struct socket *so;
3345 
3346 		so = SCTP_INP_SO(stcb->sctp_ep);
3347 		atomic_add_int(&stcb->asoc.refcnt, 1);
3348 		SCTP_TCB_UNLOCK(stcb);
3349 		SCTP_SOCKET_LOCK(so, 1);
3350 		SCTP_TCB_LOCK(stcb);
3351 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3352 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3353 			SCTP_SOCKET_UNLOCK(so, 1);
3354 			return;
3355 		}
3356 #endif
3357 		socantsendmore(stcb->sctp_socket);
3358 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3359 		SCTP_SOCKET_UNLOCK(so, 1);
3360 #endif
3361 	}
3362 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3363 		/* event not enabled */
3364 		return;
3365 	}
3366 
3367 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3368 	if (m_notify == NULL)
3369 		/* no space left */
3370 		return;
3371 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3372 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3373 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3374 	sse->sse_flags = 0;
3375 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3376 	sse->sse_assoc_id = sctp_get_associd(stcb);
3377 
3378 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3379 	SCTP_BUF_NEXT(m_notify) = NULL;
3380 
3381 	/* append to socket */
3382 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3383 	    0, 0, stcb->asoc.context, 0, 0, 0,
3384 	    m_notify);
3385 	if (control == NULL) {
3386 		/* no memory */
3387 		sctp_m_freem(m_notify);
3388 		return;
3389 	}
3390 	control->length = SCTP_BUF_LEN(m_notify);
3391 	control->spec_flags = M_NOTIFICATION;
3392 	/* not that we need this */
3393 	control->tail_mbuf = m_notify;
3394 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3395 	    control,
3396 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3397 }
3398 
3399 static void
3400 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3401     int so_locked
3402 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3403     SCTP_UNUSED
3404 #endif
3405 )
3406 {
3407 	struct mbuf *m_notify;
3408 	struct sctp_sender_dry_event *event;
3409 	struct sctp_queued_to_read *control;
3410 
3411 	if ((stcb == NULL) ||
3412 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3413 		/* event not enabled */
3414 		return;
3415 	}
3416 
3417 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3418 	if (m_notify == NULL) {
3419 		/* no space left */
3420 		return;
3421 	}
3422 	SCTP_BUF_LEN(m_notify) = 0;
3423 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3424 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3425 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3426 	event->sender_dry_flags = 0;
3427 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3428 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3429 
3430 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3431 	SCTP_BUF_NEXT(m_notify) = NULL;
3432 
3433 	/* append to socket */
3434 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3435 	    0, 0, stcb->asoc.context, 0, 0, 0,
3436 	    m_notify);
3437 	if (control == NULL) {
3438 		/* no memory */
3439 		sctp_m_freem(m_notify);
3440 		return;
3441 	}
3442 	control->length = SCTP_BUF_LEN(m_notify);
3443 	control->spec_flags = M_NOTIFICATION;
3444 	/* not that we need this */
3445 	control->tail_mbuf = m_notify;
3446 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3447 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3448 }
3449 
3450 
3451 void
3452 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3453 {
3454 	struct mbuf *m_notify;
3455 	struct sctp_queued_to_read *control;
3456 	struct sctp_stream_change_event *stradd;
3457 
3458 	if ((stcb == NULL) ||
3459 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3460 		/* event not enabled */
3461 		return;
3462 	}
3463 	if ((stcb->asoc.peer_req_out) && flag) {
3464 		/* Peer made the request, don't tell the local user */
3465 		stcb->asoc.peer_req_out = 0;
3466 		return;
3467 	}
3468 	stcb->asoc.peer_req_out = 0;
3469 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3470 	if (m_notify == NULL)
3471 		/* no space left */
3472 		return;
3473 	SCTP_BUF_LEN(m_notify) = 0;
3474 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3475 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3476 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3477 	stradd->strchange_flags = flag;
3478 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3479 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3480 	stradd->strchange_instrms = numberin;
3481 	stradd->strchange_outstrms = numberout;
3482 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3483 	SCTP_BUF_NEXT(m_notify) = NULL;
3484 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3485 		/* no space */
3486 		sctp_m_freem(m_notify);
3487 		return;
3488 	}
3489 	/* append to socket */
3490 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3491 	    0, 0, stcb->asoc.context, 0, 0, 0,
3492 	    m_notify);
3493 	if (control == NULL) {
3494 		/* no memory */
3495 		sctp_m_freem(m_notify);
3496 		return;
3497 	}
3498 	control->length = SCTP_BUF_LEN(m_notify);
3499 	control->spec_flags = M_NOTIFICATION;
3500 	/* not that we need this */
3501 	control->tail_mbuf = m_notify;
3502 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3503 	    control,
3504 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3505 }
3506 
3507 void
3508 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3509 {
3510 	struct mbuf *m_notify;
3511 	struct sctp_queued_to_read *control;
3512 	struct sctp_assoc_reset_event *strasoc;
3513 
3514 	if ((stcb == NULL) ||
3515 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3516 		/* event not enabled */
3517 		return;
3518 	}
3519 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3520 	if (m_notify == NULL)
3521 		/* no space left */
3522 		return;
3523 	SCTP_BUF_LEN(m_notify) = 0;
3524 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3525 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3526 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3527 	strasoc->assocreset_flags = flag;
3528 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3529 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3530 	strasoc->assocreset_local_tsn = sending_tsn;
3531 	strasoc->assocreset_remote_tsn = recv_tsn;
3532 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3533 	SCTP_BUF_NEXT(m_notify) = NULL;
3534 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3535 		/* no space */
3536 		sctp_m_freem(m_notify);
3537 		return;
3538 	}
3539 	/* append to socket */
3540 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3541 	    0, 0, stcb->asoc.context, 0, 0, 0,
3542 	    m_notify);
3543 	if (control == NULL) {
3544 		/* no memory */
3545 		sctp_m_freem(m_notify);
3546 		return;
3547 	}
3548 	control->length = SCTP_BUF_LEN(m_notify);
3549 	control->spec_flags = M_NOTIFICATION;
3550 	/* not that we need this */
3551 	control->tail_mbuf = m_notify;
3552 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3553 	    control,
3554 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3555 }
3556 
3557 
3558 
3559 static void
3560 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3561     int number_entries, uint16_t *list, int flag)
3562 {
3563 	struct mbuf *m_notify;
3564 	struct sctp_queued_to_read *control;
3565 	struct sctp_stream_reset_event *strreset;
3566 	int len;
3567 
3568 	if ((stcb == NULL) ||
3569 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3570 		/* event not enabled */
3571 		return;
3572 	}
3573 
3574 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3575 	if (m_notify == NULL)
3576 		/* no space left */
3577 		return;
3578 	SCTP_BUF_LEN(m_notify) = 0;
3579 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3580 	if (len > M_TRAILINGSPACE(m_notify)) {
3581 		/* never enough room */
3582 		sctp_m_freem(m_notify);
3583 		return;
3584 	}
3585 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3586 	memset(strreset, 0, len);
3587 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3588 	strreset->strreset_flags = flag;
3589 	strreset->strreset_length = len;
3590 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3591 	if (number_entries) {
3592 		int i;
3593 
3594 		for (i = 0; i < number_entries; i++) {
3595 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3596 		}
3597 	}
3598 	SCTP_BUF_LEN(m_notify) = len;
3599 	SCTP_BUF_NEXT(m_notify) = NULL;
3600 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3601 		/* no space */
3602 		sctp_m_freem(m_notify);
3603 		return;
3604 	}
3605 	/* append to socket */
3606 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3607 	    0, 0, stcb->asoc.context, 0, 0, 0,
3608 	    m_notify);
3609 	if (control == NULL) {
3610 		/* no memory */
3611 		sctp_m_freem(m_notify);
3612 		return;
3613 	}
3614 	control->length = SCTP_BUF_LEN(m_notify);
3615 	control->spec_flags = M_NOTIFICATION;
3616 	/* not that we need this */
3617 	control->tail_mbuf = m_notify;
3618 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3619 	    control,
3620 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3621 }
3622 
3623 
3624 static void
3625 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3626 {
3627 	struct mbuf *m_notify;
3628 	struct sctp_remote_error *sre;
3629 	struct sctp_queued_to_read *control;
3630 	unsigned int notif_len;
3631 	uint16_t chunk_len;
3632 
3633 	if ((stcb == NULL) ||
3634 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3635 		return;
3636 	}
3637 	if (chunk != NULL) {
3638 		chunk_len = ntohs(chunk->ch.chunk_length);
3639 		/*
3640 		 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
3641 		 * contiguous.
3642 		 */
3643 		if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) {
3644 			chunk_len = SCTP_CHUNK_BUFFER_SIZE;
3645 		}
3646 	} else {
3647 		chunk_len = 0;
3648 	}
3649 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3650 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3651 	if (m_notify == NULL) {
3652 		/* Retry with smaller value. */
3653 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3654 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3655 		if (m_notify == NULL) {
3656 			return;
3657 		}
3658 	}
3659 	SCTP_BUF_NEXT(m_notify) = NULL;
3660 	sre = mtod(m_notify, struct sctp_remote_error *);
3661 	memset(sre, 0, notif_len);
3662 	sre->sre_type = SCTP_REMOTE_ERROR;
3663 	sre->sre_flags = 0;
3664 	sre->sre_length = sizeof(struct sctp_remote_error);
3665 	sre->sre_error = error;
3666 	sre->sre_assoc_id = sctp_get_associd(stcb);
3667 	if (notif_len > sizeof(struct sctp_remote_error)) {
3668 		memcpy(sre->sre_data, chunk, chunk_len);
3669 		sre->sre_length += chunk_len;
3670 	}
3671 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3672 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3673 	    0, 0, stcb->asoc.context, 0, 0, 0,
3674 	    m_notify);
3675 	if (control != NULL) {
3676 		control->length = SCTP_BUF_LEN(m_notify);
3677 		control->spec_flags = M_NOTIFICATION;
3678 		/* not that we need this */
3679 		control->tail_mbuf = m_notify;
3680 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3681 		    control,
3682 		    &stcb->sctp_socket->so_rcv, 1,
3683 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3684 	} else {
3685 		sctp_m_freem(m_notify);
3686 	}
3687 }
3688 
3689 
3690 void
3691 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3692     uint32_t error, void *data, int so_locked
3693 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3694     SCTP_UNUSED
3695 #endif
3696 )
3697 {
3698 	if ((stcb == NULL) ||
3699 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3700 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3701 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3702 		/* If the socket is gone we are out of here */
3703 		return;
3704 	}
3705 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3706 		return;
3707 	}
3708 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3709 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3710 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3711 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3712 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3713 			/* Don't report these in front states */
3714 			return;
3715 		}
3716 	}
3717 	switch (notification) {
3718 	case SCTP_NOTIFY_ASSOC_UP:
3719 		if (stcb->asoc.assoc_up_sent == 0) {
3720 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3721 			stcb->asoc.assoc_up_sent = 1;
3722 		}
3723 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3724 			sctp_notify_adaptation_layer(stcb);
3725 		}
3726 		if (stcb->asoc.auth_supported == 0) {
3727 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3728 			    NULL, so_locked);
3729 		}
3730 		break;
3731 	case SCTP_NOTIFY_ASSOC_DOWN:
3732 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3733 		break;
3734 	case SCTP_NOTIFY_INTERFACE_DOWN:
3735 		{
3736 			struct sctp_nets *net;
3737 
3738 			net = (struct sctp_nets *)data;
3739 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3740 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3741 			break;
3742 		}
3743 	case SCTP_NOTIFY_INTERFACE_UP:
3744 		{
3745 			struct sctp_nets *net;
3746 
3747 			net = (struct sctp_nets *)data;
3748 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3749 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3750 			break;
3751 		}
3752 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3753 		{
3754 			struct sctp_nets *net;
3755 
3756 			net = (struct sctp_nets *)data;
3757 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3758 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3759 			break;
3760 		}
3761 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3762 		sctp_notify_send_failed2(stcb, error,
3763 		    (struct sctp_stream_queue_pending *)data, so_locked);
3764 		break;
3765 	case SCTP_NOTIFY_SENT_DG_FAIL:
3766 		sctp_notify_send_failed(stcb, 1, error,
3767 		    (struct sctp_tmit_chunk *)data, so_locked);
3768 		break;
3769 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3770 		sctp_notify_send_failed(stcb, 0, error,
3771 		    (struct sctp_tmit_chunk *)data, so_locked);
3772 		break;
3773 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3774 		{
3775 			uint32_t val;
3776 
3777 			val = *((uint32_t *)data);
3778 
3779 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3780 			break;
3781 		}
3782 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3783 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3784 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3785 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3786 		} else {
3787 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3788 		}
3789 		break;
3790 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3791 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3792 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3793 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3794 		} else {
3795 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3796 		}
3797 		break;
3798 	case SCTP_NOTIFY_ASSOC_RESTART:
3799 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3800 		if (stcb->asoc.auth_supported == 0) {
3801 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3802 			    NULL, so_locked);
3803 		}
3804 		break;
3805 	case SCTP_NOTIFY_STR_RESET_SEND:
3806 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3807 		break;
3808 	case SCTP_NOTIFY_STR_RESET_RECV:
3809 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3810 		break;
3811 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3812 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3813 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3814 		break;
3815 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3816 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3817 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3818 		break;
3819 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3820 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3821 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3822 		break;
3823 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3824 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3825 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3826 		break;
3827 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3828 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3829 		    error, so_locked);
3830 		break;
3831 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3832 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3833 		    error, so_locked);
3834 		break;
3835 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3836 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3837 		    error, so_locked);
3838 		break;
3839 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3840 		sctp_notify_shutdown_event(stcb);
3841 		break;
3842 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3843 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3844 		    (uint16_t)(uintptr_t)data,
3845 		    so_locked);
3846 		break;
3847 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3848 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3849 		    (uint16_t)(uintptr_t)data,
3850 		    so_locked);
3851 		break;
3852 	case SCTP_NOTIFY_NO_PEER_AUTH:
3853 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3854 		    (uint16_t)(uintptr_t)data,
3855 		    so_locked);
3856 		break;
3857 	case SCTP_NOTIFY_SENDER_DRY:
3858 		sctp_notify_sender_dry_event(stcb, so_locked);
3859 		break;
3860 	case SCTP_NOTIFY_REMOTE_ERROR:
3861 		sctp_notify_remote_error(stcb, error, data);
3862 		break;
3863 	default:
3864 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3865 		    __func__, notification, notification);
3866 		break;
3867 	}			/* end switch */
3868 }
3869 
3870 void
3871 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3872 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3873     SCTP_UNUSED
3874 #endif
3875 )
3876 {
3877 	struct sctp_association *asoc;
3878 	struct sctp_stream_out *outs;
3879 	struct sctp_tmit_chunk *chk, *nchk;
3880 	struct sctp_stream_queue_pending *sp, *nsp;
3881 	int i;
3882 
3883 	if (stcb == NULL) {
3884 		return;
3885 	}
3886 	asoc = &stcb->asoc;
3887 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3888 		/* already being freed */
3889 		return;
3890 	}
3891 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3892 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3893 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3894 		return;
3895 	}
3896 	/* now through all the gunk freeing chunks */
3897 	if (holds_lock == 0) {
3898 		SCTP_TCB_SEND_LOCK(stcb);
3899 	}
3900 	/* sent queue SHOULD be empty */
3901 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3902 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3903 		asoc->sent_queue_cnt--;
3904 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3905 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3906 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3907 #ifdef INVARIANTS
3908 			} else {
3909 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3910 #endif
3911 			}
3912 		}
3913 		if (chk->data != NULL) {
3914 			sctp_free_bufspace(stcb, asoc, chk, 1);
3915 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3916 			    error, chk, so_locked);
3917 			if (chk->data) {
3918 				sctp_m_freem(chk->data);
3919 				chk->data = NULL;
3920 			}
3921 		}
3922 		sctp_free_a_chunk(stcb, chk, so_locked);
3923 		/* sa_ignore FREED_MEMORY */
3924 	}
3925 	/* pending send queue SHOULD be empty */
3926 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3927 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3928 		asoc->send_queue_cnt--;
3929 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3930 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3931 #ifdef INVARIANTS
3932 		} else {
3933 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3934 #endif
3935 		}
3936 		if (chk->data != NULL) {
3937 			sctp_free_bufspace(stcb, asoc, chk, 1);
3938 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3939 			    error, chk, so_locked);
3940 			if (chk->data) {
3941 				sctp_m_freem(chk->data);
3942 				chk->data = NULL;
3943 			}
3944 		}
3945 		sctp_free_a_chunk(stcb, chk, so_locked);
3946 		/* sa_ignore FREED_MEMORY */
3947 	}
3948 	for (i = 0; i < asoc->streamoutcnt; i++) {
3949 		/* For each stream */
3950 		outs = &asoc->strmout[i];
3951 		/* clean up any sends there */
3952 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3953 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
3954 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3955 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
3956 			sctp_free_spbufspace(stcb, asoc, sp);
3957 			if (sp->data) {
3958 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3959 				    error, (void *)sp, so_locked);
3960 				if (sp->data) {
3961 					sctp_m_freem(sp->data);
3962 					sp->data = NULL;
3963 					sp->tail_mbuf = NULL;
3964 					sp->length = 0;
3965 				}
3966 			}
3967 			if (sp->net) {
3968 				sctp_free_remote_addr(sp->net);
3969 				sp->net = NULL;
3970 			}
3971 			/* Free the chunk */
3972 			sctp_free_a_strmoq(stcb, sp, so_locked);
3973 			/* sa_ignore FREED_MEMORY */
3974 		}
3975 	}
3976 
3977 	if (holds_lock == 0) {
3978 		SCTP_TCB_SEND_UNLOCK(stcb);
3979 	}
3980 }
3981 
3982 void
3983 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3984     struct sctp_abort_chunk *abort, int so_locked
3985 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3986     SCTP_UNUSED
3987 #endif
3988 )
3989 {
3990 	if (stcb == NULL) {
3991 		return;
3992 	}
3993 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3994 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3995 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3996 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3997 	}
3998 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3999 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4000 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4001 		return;
4002 	}
4003 	/* Tell them we lost the asoc */
4004 	sctp_report_all_outbound(stcb, error, 0, so_locked);
4005 	if (from_peer) {
4006 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
4007 	} else {
4008 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
4009 	}
4010 }
4011 
4012 void
4013 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4014     struct mbuf *m, int iphlen,
4015     struct sockaddr *src, struct sockaddr *dst,
4016     struct sctphdr *sh, struct mbuf *op_err,
4017     uint8_t mflowtype, uint32_t mflowid,
4018     uint32_t vrf_id, uint16_t port)
4019 {
4020 	uint32_t vtag;
4021 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4022 	struct socket *so;
4023 #endif
4024 
4025 	vtag = 0;
4026 	if (stcb != NULL) {
4027 		vtag = stcb->asoc.peer_vtag;
4028 		vrf_id = stcb->asoc.vrf_id;
4029 	}
4030 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4031 	    mflowtype, mflowid, inp->fibnum,
4032 	    vrf_id, port);
4033 	if (stcb != NULL) {
4034 		/* We have a TCB to abort, send notification too */
4035 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4036 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4037 		/* Ok, now lets free it */
4038 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4039 		so = SCTP_INP_SO(inp);
4040 		atomic_add_int(&stcb->asoc.refcnt, 1);
4041 		SCTP_TCB_UNLOCK(stcb);
4042 		SCTP_SOCKET_LOCK(so, 1);
4043 		SCTP_TCB_LOCK(stcb);
4044 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4045 #endif
4046 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4047 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4048 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4049 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4050 		}
4051 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4052 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4053 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4054 		SCTP_SOCKET_UNLOCK(so, 1);
4055 #endif
4056 	}
4057 }
4058 #ifdef SCTP_ASOCLOG_OF_TSNS
4059 void
4060 sctp_print_out_track_log(struct sctp_tcb *stcb)
4061 {
4062 #ifdef NOSIY_PRINTS
4063 	int i;
4064 
4065 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4066 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4067 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4068 		SCTP_PRINTF("None rcvd\n");
4069 		goto none_in;
4070 	}
4071 	if (stcb->asoc.tsn_in_wrapped) {
4072 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4073 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4074 			    stcb->asoc.in_tsnlog[i].tsn,
4075 			    stcb->asoc.in_tsnlog[i].strm,
4076 			    stcb->asoc.in_tsnlog[i].seq,
4077 			    stcb->asoc.in_tsnlog[i].flgs,
4078 			    stcb->asoc.in_tsnlog[i].sz);
4079 		}
4080 	}
4081 	if (stcb->asoc.tsn_in_at) {
4082 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4083 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4084 			    stcb->asoc.in_tsnlog[i].tsn,
4085 			    stcb->asoc.in_tsnlog[i].strm,
4086 			    stcb->asoc.in_tsnlog[i].seq,
4087 			    stcb->asoc.in_tsnlog[i].flgs,
4088 			    stcb->asoc.in_tsnlog[i].sz);
4089 		}
4090 	}
4091 none_in:
4092 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4093 	if ((stcb->asoc.tsn_out_at == 0) &&
4094 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4095 		SCTP_PRINTF("None sent\n");
4096 	}
4097 	if (stcb->asoc.tsn_out_wrapped) {
4098 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4099 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4100 			    stcb->asoc.out_tsnlog[i].tsn,
4101 			    stcb->asoc.out_tsnlog[i].strm,
4102 			    stcb->asoc.out_tsnlog[i].seq,
4103 			    stcb->asoc.out_tsnlog[i].flgs,
4104 			    stcb->asoc.out_tsnlog[i].sz);
4105 		}
4106 	}
4107 	if (stcb->asoc.tsn_out_at) {
4108 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4109 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4110 			    stcb->asoc.out_tsnlog[i].tsn,
4111 			    stcb->asoc.out_tsnlog[i].strm,
4112 			    stcb->asoc.out_tsnlog[i].seq,
4113 			    stcb->asoc.out_tsnlog[i].flgs,
4114 			    stcb->asoc.out_tsnlog[i].sz);
4115 		}
4116 	}
4117 #endif
4118 }
4119 #endif
4120 
4121 void
4122 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4123     struct mbuf *op_err,
4124     int so_locked
4125 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4126     SCTP_UNUSED
4127 #endif
4128 )
4129 {
4130 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4131 	struct socket *so;
4132 #endif
4133 
4134 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4135 	so = SCTP_INP_SO(inp);
4136 #endif
4137 	if (stcb == NULL) {
4138 		/* Got to have a TCB */
4139 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4140 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4141 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4142 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4143 			}
4144 		}
4145 		return;
4146 	} else {
4147 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4148 	}
4149 	/* notify the peer */
4150 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4151 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4152 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4153 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4154 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4155 	}
4156 	/* notify the ulp */
4157 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4158 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4159 	}
4160 	/* now free the asoc */
4161 #ifdef SCTP_ASOCLOG_OF_TSNS
4162 	sctp_print_out_track_log(stcb);
4163 #endif
4164 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4165 	if (!so_locked) {
4166 		atomic_add_int(&stcb->asoc.refcnt, 1);
4167 		SCTP_TCB_UNLOCK(stcb);
4168 		SCTP_SOCKET_LOCK(so, 1);
4169 		SCTP_TCB_LOCK(stcb);
4170 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4171 	}
4172 #endif
4173 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4174 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4175 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4176 	if (!so_locked) {
4177 		SCTP_SOCKET_UNLOCK(so, 1);
4178 	}
4179 #endif
4180 }
4181 
4182 void
4183 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4184     struct sockaddr *src, struct sockaddr *dst,
4185     struct sctphdr *sh, struct sctp_inpcb *inp,
4186     struct mbuf *cause,
4187     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4188     uint32_t vrf_id, uint16_t port)
4189 {
4190 	struct sctp_chunkhdr *ch, chunk_buf;
4191 	unsigned int chk_length;
4192 	int contains_init_chunk;
4193 
4194 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4195 	/* Generate a TO address for future reference */
4196 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4197 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4198 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4199 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4200 		}
4201 	}
4202 	contains_init_chunk = 0;
4203 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4204 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4205 	while (ch != NULL) {
4206 		chk_length = ntohs(ch->chunk_length);
4207 		if (chk_length < sizeof(*ch)) {
4208 			/* break to abort land */
4209 			break;
4210 		}
4211 		switch (ch->chunk_type) {
4212 		case SCTP_INIT:
4213 			contains_init_chunk = 1;
4214 			break;
4215 		case SCTP_PACKET_DROPPED:
4216 			/* we don't respond to pkt-dropped */
4217 			return;
4218 		case SCTP_ABORT_ASSOCIATION:
4219 			/* we don't respond with an ABORT to an ABORT */
4220 			return;
4221 		case SCTP_SHUTDOWN_COMPLETE:
4222 			/*
4223 			 * we ignore it since we are not waiting for it and
4224 			 * peer is gone
4225 			 */
4226 			return;
4227 		case SCTP_SHUTDOWN_ACK:
4228 			sctp_send_shutdown_complete2(src, dst, sh,
4229 			    mflowtype, mflowid, fibnum,
4230 			    vrf_id, port);
4231 			return;
4232 		default:
4233 			break;
4234 		}
4235 		offset += SCTP_SIZE32(chk_length);
4236 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4237 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4238 	}
4239 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4240 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4241 	    (contains_init_chunk == 0))) {
4242 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4243 		    mflowtype, mflowid, fibnum,
4244 		    vrf_id, port);
4245 	}
4246 }
4247 
4248 /*
4249  * check the inbound datagram to make sure there is not an abort inside it,
4250  * if there is return 1, else return 0.
4251  */
4252 int
4253 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4254 {
4255 	struct sctp_chunkhdr *ch;
4256 	struct sctp_init_chunk *init_chk, chunk_buf;
4257 	int offset;
4258 	unsigned int chk_length;
4259 
4260 	offset = iphlen + sizeof(struct sctphdr);
4261 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4262 	    (uint8_t *)&chunk_buf);
4263 	while (ch != NULL) {
4264 		chk_length = ntohs(ch->chunk_length);
4265 		if (chk_length < sizeof(*ch)) {
4266 			/* packet is probably corrupt */
4267 			break;
4268 		}
4269 		/* we seem to be ok, is it an abort? */
4270 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4271 			/* yep, tell them */
4272 			return (1);
4273 		}
4274 		if (ch->chunk_type == SCTP_INITIATION) {
4275 			/* need to update the Vtag */
4276 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4277 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4278 			if (init_chk != NULL) {
4279 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4280 			}
4281 		}
4282 		/* Nope, move to the next chunk */
4283 		offset += SCTP_SIZE32(chk_length);
4284 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4285 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4286 	}
4287 	return (0);
4288 }
4289 
4290 /*
4291  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4292  * set (i.e. it's 0) so, create this function to compare link local scopes
4293  */
4294 #ifdef INET6
4295 uint32_t
4296 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4297 {
4298 	struct sockaddr_in6 a, b;
4299 
4300 	/* save copies */
4301 	a = *addr1;
4302 	b = *addr2;
4303 
4304 	if (a.sin6_scope_id == 0)
4305 		if (sa6_recoverscope(&a)) {
4306 			/* can't get scope, so can't match */
4307 			return (0);
4308 		}
4309 	if (b.sin6_scope_id == 0)
4310 		if (sa6_recoverscope(&b)) {
4311 			/* can't get scope, so can't match */
4312 			return (0);
4313 		}
4314 	if (a.sin6_scope_id != b.sin6_scope_id)
4315 		return (0);
4316 
4317 	return (1);
4318 }
4319 
4320 /*
4321  * returns a sockaddr_in6 with embedded scope recovered and removed
4322  */
4323 struct sockaddr_in6 *
4324 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4325 {
4326 	/* check and strip embedded scope junk */
4327 	if (addr->sin6_family == AF_INET6) {
4328 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4329 			if (addr->sin6_scope_id == 0) {
4330 				*store = *addr;
4331 				if (!sa6_recoverscope(store)) {
4332 					/* use the recovered scope */
4333 					addr = store;
4334 				}
4335 			} else {
4336 				/* else, return the original "to" addr */
4337 				in6_clearscope(&addr->sin6_addr);
4338 			}
4339 		}
4340 	}
4341 	return (addr);
4342 }
4343 #endif
4344 
4345 /*
4346  * are the two addresses the same?  currently a "scopeless" check returns: 1
4347  * if same, 0 if not
4348  */
4349 int
4350 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4351 {
4352 
4353 	/* must be valid */
4354 	if (sa1 == NULL || sa2 == NULL)
4355 		return (0);
4356 
4357 	/* must be the same family */
4358 	if (sa1->sa_family != sa2->sa_family)
4359 		return (0);
4360 
4361 	switch (sa1->sa_family) {
4362 #ifdef INET6
4363 	case AF_INET6:
4364 		{
4365 			/* IPv6 addresses */
4366 			struct sockaddr_in6 *sin6_1, *sin6_2;
4367 
4368 			sin6_1 = (struct sockaddr_in6 *)sa1;
4369 			sin6_2 = (struct sockaddr_in6 *)sa2;
4370 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4371 			    sin6_2));
4372 		}
4373 #endif
4374 #ifdef INET
4375 	case AF_INET:
4376 		{
4377 			/* IPv4 addresses */
4378 			struct sockaddr_in *sin_1, *sin_2;
4379 
4380 			sin_1 = (struct sockaddr_in *)sa1;
4381 			sin_2 = (struct sockaddr_in *)sa2;
4382 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4383 		}
4384 #endif
4385 	default:
4386 		/* we don't do these... */
4387 		return (0);
4388 	}
4389 }
4390 
4391 void
4392 sctp_print_address(struct sockaddr *sa)
4393 {
4394 #ifdef INET6
4395 	char ip6buf[INET6_ADDRSTRLEN];
4396 #endif
4397 
4398 	switch (sa->sa_family) {
4399 #ifdef INET6
4400 	case AF_INET6:
4401 		{
4402 			struct sockaddr_in6 *sin6;
4403 
4404 			sin6 = (struct sockaddr_in6 *)sa;
4405 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4406 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4407 			    ntohs(sin6->sin6_port),
4408 			    sin6->sin6_scope_id);
4409 			break;
4410 		}
4411 #endif
4412 #ifdef INET
4413 	case AF_INET:
4414 		{
4415 			struct sockaddr_in *sin;
4416 			unsigned char *p;
4417 
4418 			sin = (struct sockaddr_in *)sa;
4419 			p = (unsigned char *)&sin->sin_addr;
4420 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4421 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4422 			break;
4423 		}
4424 #endif
4425 	default:
4426 		SCTP_PRINTF("?\n");
4427 		break;
4428 	}
4429 }
4430 
4431 void
4432 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4433     struct sctp_inpcb *new_inp,
4434     struct sctp_tcb *stcb,
4435     int waitflags)
4436 {
4437 	/*
4438 	 * go through our old INP and pull off any control structures that
4439 	 * belong to stcb and move then to the new inp.
4440 	 */
4441 	struct socket *old_so, *new_so;
4442 	struct sctp_queued_to_read *control, *nctl;
4443 	struct sctp_readhead tmp_queue;
4444 	struct mbuf *m;
4445 	int error = 0;
4446 
4447 	old_so = old_inp->sctp_socket;
4448 	new_so = new_inp->sctp_socket;
4449 	TAILQ_INIT(&tmp_queue);
4450 	error = sblock(&old_so->so_rcv, waitflags);
4451 	if (error) {
4452 		/*
4453 		 * Gak, can't get sblock, we have a problem. data will be
4454 		 * left stranded.. and we don't dare look at it since the
4455 		 * other thread may be reading something. Oh well, its a
4456 		 * screwed up app that does a peeloff OR a accept while
4457 		 * reading from the main socket... actually its only the
4458 		 * peeloff() case, since I think read will fail on a
4459 		 * listening socket..
4460 		 */
4461 		return;
4462 	}
4463 	/* lock the socket buffers */
4464 	SCTP_INP_READ_LOCK(old_inp);
4465 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4466 		/* Pull off all for out target stcb */
4467 		if (control->stcb == stcb) {
4468 			/* remove it we want it */
4469 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4470 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4471 			m = control->data;
4472 			while (m) {
4473 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4474 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4475 				}
4476 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4477 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4478 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4479 				}
4480 				m = SCTP_BUF_NEXT(m);
4481 			}
4482 		}
4483 	}
4484 	SCTP_INP_READ_UNLOCK(old_inp);
4485 	/* Remove the sb-lock on the old socket */
4486 
4487 	sbunlock(&old_so->so_rcv);
4488 	/* Now we move them over to the new socket buffer */
4489 	SCTP_INP_READ_LOCK(new_inp);
4490 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4491 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4492 		m = control->data;
4493 		while (m) {
4494 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4495 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4496 			}
4497 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4498 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4499 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4500 			}
4501 			m = SCTP_BUF_NEXT(m);
4502 		}
4503 	}
4504 	SCTP_INP_READ_UNLOCK(new_inp);
4505 }
4506 
4507 void
4508 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4509     struct sctp_tcb *stcb,
4510     int so_locked
4511 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4512     SCTP_UNUSED
4513 #endif
4514 )
4515 {
4516 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4517 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4518 		struct socket *so;
4519 
4520 		so = SCTP_INP_SO(inp);
4521 		if (!so_locked) {
4522 			if (stcb) {
4523 				atomic_add_int(&stcb->asoc.refcnt, 1);
4524 				SCTP_TCB_UNLOCK(stcb);
4525 			}
4526 			SCTP_SOCKET_LOCK(so, 1);
4527 			if (stcb) {
4528 				SCTP_TCB_LOCK(stcb);
4529 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4530 			}
4531 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4532 				SCTP_SOCKET_UNLOCK(so, 1);
4533 				return;
4534 			}
4535 		}
4536 #endif
4537 		sctp_sorwakeup(inp, inp->sctp_socket);
4538 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4539 		if (!so_locked) {
4540 			SCTP_SOCKET_UNLOCK(so, 1);
4541 		}
4542 #endif
4543 	}
4544 }
4545 
4546 void
4547 sctp_add_to_readq(struct sctp_inpcb *inp,
4548     struct sctp_tcb *stcb,
4549     struct sctp_queued_to_read *control,
4550     struct sockbuf *sb,
4551     int end,
4552     int inp_read_lock_held,
4553     int so_locked
4554 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4555     SCTP_UNUSED
4556 #endif
4557 )
4558 {
4559 	/*
4560 	 * Here we must place the control on the end of the socket read
4561 	 * queue AND increment sb_cc so that select will work properly on
4562 	 * read.
4563 	 */
4564 	struct mbuf *m, *prev = NULL;
4565 
4566 	if (inp == NULL) {
4567 		/* Gak, TSNH!! */
4568 #ifdef INVARIANTS
4569 		panic("Gak, inp NULL on add_to_readq");
4570 #endif
4571 		return;
4572 	}
4573 	if (inp_read_lock_held == 0)
4574 		SCTP_INP_READ_LOCK(inp);
4575 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4576 		if (!control->on_strm_q) {
4577 			sctp_free_remote_addr(control->whoFrom);
4578 			if (control->data) {
4579 				sctp_m_freem(control->data);
4580 				control->data = NULL;
4581 			}
4582 			sctp_free_a_readq(stcb, control);
4583 		}
4584 		if (inp_read_lock_held == 0)
4585 			SCTP_INP_READ_UNLOCK(inp);
4586 		return;
4587 	}
4588 	if (!(control->spec_flags & M_NOTIFICATION)) {
4589 		atomic_add_int(&inp->total_recvs, 1);
4590 		if (!control->do_not_ref_stcb) {
4591 			atomic_add_int(&stcb->total_recvs, 1);
4592 		}
4593 	}
4594 	m = control->data;
4595 	control->held_length = 0;
4596 	control->length = 0;
4597 	while (m) {
4598 		if (SCTP_BUF_LEN(m) == 0) {
4599 			/* Skip mbufs with NO length */
4600 			if (prev == NULL) {
4601 				/* First one */
4602 				control->data = sctp_m_free(m);
4603 				m = control->data;
4604 			} else {
4605 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4606 				m = SCTP_BUF_NEXT(prev);
4607 			}
4608 			if (m == NULL) {
4609 				control->tail_mbuf = prev;
4610 			}
4611 			continue;
4612 		}
4613 		prev = m;
4614 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4615 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4616 		}
4617 		sctp_sballoc(stcb, sb, m);
4618 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4619 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4620 		}
4621 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4622 		m = SCTP_BUF_NEXT(m);
4623 	}
4624 	if (prev != NULL) {
4625 		control->tail_mbuf = prev;
4626 	} else {
4627 		/* Everything got collapsed out?? */
4628 		if (!control->on_strm_q) {
4629 			sctp_free_remote_addr(control->whoFrom);
4630 			sctp_free_a_readq(stcb, control);
4631 		}
4632 		if (inp_read_lock_held == 0)
4633 			SCTP_INP_READ_UNLOCK(inp);
4634 		return;
4635 	}
4636 	if (end) {
4637 		control->end_added = 1;
4638 	}
4639 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4640 	control->on_read_q = 1;
4641 	if (inp_read_lock_held == 0)
4642 		SCTP_INP_READ_UNLOCK(inp);
4643 	if (inp && inp->sctp_socket) {
4644 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4645 	}
4646 }
4647 
4648 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4649  *************ALTERNATE ROUTING CODE
4650  */
4651 
4652 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4653  *************ALTERNATE ROUTING CODE
4654  */
4655 
4656 struct mbuf *
4657 sctp_generate_cause(uint16_t code, char *info)
4658 {
4659 	struct mbuf *m;
4660 	struct sctp_gen_error_cause *cause;
4661 	size_t info_len;
4662 	uint16_t len;
4663 
4664 	if ((code == 0) || (info == NULL)) {
4665 		return (NULL);
4666 	}
4667 	info_len = strlen(info);
4668 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4669 		return (NULL);
4670 	}
4671 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4672 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4673 	if (m != NULL) {
4674 		SCTP_BUF_LEN(m) = len;
4675 		cause = mtod(m, struct sctp_gen_error_cause *);
4676 		cause->code = htons(code);
4677 		cause->length = htons(len);
4678 		memcpy(cause->info, info, info_len);
4679 	}
4680 	return (m);
4681 }
4682 
4683 struct mbuf *
4684 sctp_generate_no_user_data_cause(uint32_t tsn)
4685 {
4686 	struct mbuf *m;
4687 	struct sctp_error_no_user_data *no_user_data_cause;
4688 	uint16_t len;
4689 
4690 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4691 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4692 	if (m != NULL) {
4693 		SCTP_BUF_LEN(m) = len;
4694 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4695 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4696 		no_user_data_cause->cause.length = htons(len);
4697 		no_user_data_cause->tsn = htonl(tsn);
4698 	}
4699 	return (m);
4700 }
4701 
4702 #ifdef SCTP_MBCNT_LOGGING
4703 void
4704 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4705     struct sctp_tmit_chunk *tp1, int chk_cnt)
4706 {
4707 	if (tp1->data == NULL) {
4708 		return;
4709 	}
4710 	asoc->chunks_on_out_queue -= chk_cnt;
4711 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4712 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4713 		    asoc->total_output_queue_size,
4714 		    tp1->book_size,
4715 		    0,
4716 		    tp1->mbcnt);
4717 	}
4718 	if (asoc->total_output_queue_size >= tp1->book_size) {
4719 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4720 	} else {
4721 		asoc->total_output_queue_size = 0;
4722 	}
4723 
4724 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4725 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4726 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4727 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4728 		} else {
4729 			stcb->sctp_socket->so_snd.sb_cc = 0;
4730 
4731 		}
4732 	}
4733 }
4734 
4735 #endif
4736 
4737 int
4738 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4739     uint8_t sent, int so_locked
4740 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4741     SCTP_UNUSED
4742 #endif
4743 )
4744 {
4745 	struct sctp_stream_out *strq;
4746 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4747 	struct sctp_stream_queue_pending *sp;
4748 	uint32_t mid;
4749 	uint16_t sid;
4750 	uint8_t foundeom = 0;
4751 	int ret_sz = 0;
4752 	int notdone;
4753 	int do_wakeup_routine = 0;
4754 
4755 	sid = tp1->rec.data.sid;
4756 	mid = tp1->rec.data.mid;
4757 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4758 		stcb->asoc.abandoned_sent[0]++;
4759 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4760 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
4761 #if defined(SCTP_DETAILED_STR_STATS)
4762 		stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4763 #endif
4764 	} else {
4765 		stcb->asoc.abandoned_unsent[0]++;
4766 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4767 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4768 #if defined(SCTP_DETAILED_STR_STATS)
4769 		stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4770 #endif
4771 	}
4772 	do {
4773 		ret_sz += tp1->book_size;
4774 		if (tp1->data != NULL) {
4775 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4776 				sctp_flight_size_decrease(tp1);
4777 				sctp_total_flight_decrease(stcb, tp1);
4778 			}
4779 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4780 			stcb->asoc.peers_rwnd += tp1->send_size;
4781 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4782 			if (sent) {
4783 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4784 			} else {
4785 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4786 			}
4787 			if (tp1->data) {
4788 				sctp_m_freem(tp1->data);
4789 				tp1->data = NULL;
4790 			}
4791 			do_wakeup_routine = 1;
4792 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4793 				stcb->asoc.sent_queue_cnt_removeable--;
4794 			}
4795 		}
4796 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4797 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4798 		    SCTP_DATA_NOT_FRAG) {
4799 			/* not frag'ed we ae done   */
4800 			notdone = 0;
4801 			foundeom = 1;
4802 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4803 			/* end of frag, we are done */
4804 			notdone = 0;
4805 			foundeom = 1;
4806 		} else {
4807 			/*
4808 			 * Its a begin or middle piece, we must mark all of
4809 			 * it
4810 			 */
4811 			notdone = 1;
4812 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4813 		}
4814 	} while (tp1 && notdone);
4815 	if (foundeom == 0) {
4816 		/*
4817 		 * The multi-part message was scattered across the send and
4818 		 * sent queue.
4819 		 */
4820 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4821 			if ((tp1->rec.data.sid != sid) ||
4822 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4823 				break;
4824 			}
4825 			/*
4826 			 * save to chk in case we have some on stream out
4827 			 * queue. If so and we have an un-transmitted one we
4828 			 * don't have to fudge the TSN.
4829 			 */
4830 			chk = tp1;
4831 			ret_sz += tp1->book_size;
4832 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4833 			if (sent) {
4834 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4835 			} else {
4836 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4837 			}
4838 			if (tp1->data) {
4839 				sctp_m_freem(tp1->data);
4840 				tp1->data = NULL;
4841 			}
4842 			/* No flight involved here book the size to 0 */
4843 			tp1->book_size = 0;
4844 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4845 				foundeom = 1;
4846 			}
4847 			do_wakeup_routine = 1;
4848 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4849 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4850 			/*
4851 			 * on to the sent queue so we can wait for it to be
4852 			 * passed by.
4853 			 */
4854 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4855 			    sctp_next);
4856 			stcb->asoc.send_queue_cnt--;
4857 			stcb->asoc.sent_queue_cnt++;
4858 		}
4859 	}
4860 	if (foundeom == 0) {
4861 		/*
4862 		 * Still no eom found. That means there is stuff left on the
4863 		 * stream out queue.. yuck.
4864 		 */
4865 		SCTP_TCB_SEND_LOCK(stcb);
4866 		strq = &stcb->asoc.strmout[sid];
4867 		sp = TAILQ_FIRST(&strq->outqueue);
4868 		if (sp != NULL) {
4869 			sp->discard_rest = 1;
4870 			/*
4871 			 * We may need to put a chunk on the queue that
4872 			 * holds the TSN that would have been sent with the
4873 			 * LAST bit.
4874 			 */
4875 			if (chk == NULL) {
4876 				/* Yep, we have to */
4877 				sctp_alloc_a_chunk(stcb, chk);
4878 				if (chk == NULL) {
4879 					/*
4880 					 * we are hosed. All we can do is
4881 					 * nothing.. which will cause an
4882 					 * abort if the peer is paying
4883 					 * attention.
4884 					 */
4885 					goto oh_well;
4886 				}
4887 				memset(chk, 0, sizeof(*chk));
4888 				chk->rec.data.rcv_flags = 0;
4889 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4890 				chk->asoc = &stcb->asoc;
4891 				if (stcb->asoc.idata_supported == 0) {
4892 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4893 						chk->rec.data.mid = 0;
4894 					} else {
4895 						chk->rec.data.mid = strq->next_mid_ordered;
4896 					}
4897 				} else {
4898 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4899 						chk->rec.data.mid = strq->next_mid_unordered;
4900 					} else {
4901 						chk->rec.data.mid = strq->next_mid_ordered;
4902 					}
4903 				}
4904 				chk->rec.data.sid = sp->sid;
4905 				chk->rec.data.ppid = sp->ppid;
4906 				chk->rec.data.context = sp->context;
4907 				chk->flags = sp->act_flags;
4908 				chk->whoTo = NULL;
4909 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4910 				strq->chunks_on_queues++;
4911 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4912 				stcb->asoc.sent_queue_cnt++;
4913 				stcb->asoc.pr_sctp_cnt++;
4914 			}
4915 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4916 			if (sp->sinfo_flags & SCTP_UNORDERED) {
4917 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4918 			}
4919 			if (stcb->asoc.idata_supported == 0) {
4920 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4921 					strq->next_mid_ordered++;
4922 				}
4923 			} else {
4924 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4925 					strq->next_mid_unordered++;
4926 				} else {
4927 					strq->next_mid_ordered++;
4928 				}
4929 			}
4930 	oh_well:
4931 			if (sp->data) {
4932 				/*
4933 				 * Pull any data to free up the SB and allow
4934 				 * sender to "add more" while we will throw
4935 				 * away :-)
4936 				 */
4937 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4938 				ret_sz += sp->length;
4939 				do_wakeup_routine = 1;
4940 				sp->some_taken = 1;
4941 				sctp_m_freem(sp->data);
4942 				sp->data = NULL;
4943 				sp->tail_mbuf = NULL;
4944 				sp->length = 0;
4945 			}
4946 		}
4947 		SCTP_TCB_SEND_UNLOCK(stcb);
4948 	}
4949 	if (do_wakeup_routine) {
4950 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4951 		struct socket *so;
4952 
4953 		so = SCTP_INP_SO(stcb->sctp_ep);
4954 		if (!so_locked) {
4955 			atomic_add_int(&stcb->asoc.refcnt, 1);
4956 			SCTP_TCB_UNLOCK(stcb);
4957 			SCTP_SOCKET_LOCK(so, 1);
4958 			SCTP_TCB_LOCK(stcb);
4959 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4960 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4961 				/* assoc was freed while we were unlocked */
4962 				SCTP_SOCKET_UNLOCK(so, 1);
4963 				return (ret_sz);
4964 			}
4965 		}
4966 #endif
4967 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4968 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4969 		if (!so_locked) {
4970 			SCTP_SOCKET_UNLOCK(so, 1);
4971 		}
4972 #endif
4973 	}
4974 	return (ret_sz);
4975 }
4976 
4977 /*
4978  * checks to see if the given address, sa, is one that is currently known by
4979  * the kernel note: can't distinguish the same address on multiple interfaces
4980  * and doesn't handle multiple addresses with different zone/scope id's note:
4981  * ifa_ifwithaddr() compares the entire sockaddr struct
4982  */
4983 struct sctp_ifa *
4984 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4985     int holds_lock)
4986 {
4987 	struct sctp_laddr *laddr;
4988 
4989 	if (holds_lock == 0) {
4990 		SCTP_INP_RLOCK(inp);
4991 	}
4992 
4993 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4994 		if (laddr->ifa == NULL)
4995 			continue;
4996 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4997 			continue;
4998 #ifdef INET
4999 		if (addr->sa_family == AF_INET) {
5000 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5001 			    laddr->ifa->address.sin.sin_addr.s_addr) {
5002 				/* found him. */
5003 				if (holds_lock == 0) {
5004 					SCTP_INP_RUNLOCK(inp);
5005 				}
5006 				return (laddr->ifa);
5007 				break;
5008 			}
5009 		}
5010 #endif
5011 #ifdef INET6
5012 		if (addr->sa_family == AF_INET6) {
5013 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5014 			    &laddr->ifa->address.sin6)) {
5015 				/* found him. */
5016 				if (holds_lock == 0) {
5017 					SCTP_INP_RUNLOCK(inp);
5018 				}
5019 				return (laddr->ifa);
5020 				break;
5021 			}
5022 		}
5023 #endif
5024 	}
5025 	if (holds_lock == 0) {
5026 		SCTP_INP_RUNLOCK(inp);
5027 	}
5028 	return (NULL);
5029 }
5030 
5031 uint32_t
5032 sctp_get_ifa_hash_val(struct sockaddr *addr)
5033 {
5034 	switch (addr->sa_family) {
5035 #ifdef INET
5036 	case AF_INET:
5037 		{
5038 			struct sockaddr_in *sin;
5039 
5040 			sin = (struct sockaddr_in *)addr;
5041 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5042 		}
5043 #endif
5044 #ifdef INET6
5045 	case AF_INET6:
5046 		{
5047 			struct sockaddr_in6 *sin6;
5048 			uint32_t hash_of_addr;
5049 
5050 			sin6 = (struct sockaddr_in6 *)addr;
5051 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5052 			    sin6->sin6_addr.s6_addr32[1] +
5053 			    sin6->sin6_addr.s6_addr32[2] +
5054 			    sin6->sin6_addr.s6_addr32[3]);
5055 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5056 			return (hash_of_addr);
5057 		}
5058 #endif
5059 	default:
5060 		break;
5061 	}
5062 	return (0);
5063 }
5064 
5065 struct sctp_ifa *
5066 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5067 {
5068 	struct sctp_ifa *sctp_ifap;
5069 	struct sctp_vrf *vrf;
5070 	struct sctp_ifalist *hash_head;
5071 	uint32_t hash_of_addr;
5072 
5073 	if (holds_lock == 0)
5074 		SCTP_IPI_ADDR_RLOCK();
5075 
5076 	vrf = sctp_find_vrf(vrf_id);
5077 	if (vrf == NULL) {
5078 		if (holds_lock == 0)
5079 			SCTP_IPI_ADDR_RUNLOCK();
5080 		return (NULL);
5081 	}
5082 
5083 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5084 
5085 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5086 	if (hash_head == NULL) {
5087 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5088 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5089 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5090 		sctp_print_address(addr);
5091 		SCTP_PRINTF("No such bucket for address\n");
5092 		if (holds_lock == 0)
5093 			SCTP_IPI_ADDR_RUNLOCK();
5094 
5095 		return (NULL);
5096 	}
5097 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5098 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5099 			continue;
5100 #ifdef INET
5101 		if (addr->sa_family == AF_INET) {
5102 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5103 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5104 				/* found him. */
5105 				if (holds_lock == 0)
5106 					SCTP_IPI_ADDR_RUNLOCK();
5107 				return (sctp_ifap);
5108 				break;
5109 			}
5110 		}
5111 #endif
5112 #ifdef INET6
5113 		if (addr->sa_family == AF_INET6) {
5114 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5115 			    &sctp_ifap->address.sin6)) {
5116 				/* found him. */
5117 				if (holds_lock == 0)
5118 					SCTP_IPI_ADDR_RUNLOCK();
5119 				return (sctp_ifap);
5120 				break;
5121 			}
5122 		}
5123 #endif
5124 	}
5125 	if (holds_lock == 0)
5126 		SCTP_IPI_ADDR_RUNLOCK();
5127 	return (NULL);
5128 }
5129 
5130 static void
5131 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5132     uint32_t rwnd_req)
5133 {
5134 	/* User pulled some data, do we need a rwnd update? */
5135 	int r_unlocked = 0;
5136 	uint32_t dif, rwnd;
5137 	struct socket *so = NULL;
5138 
5139 	if (stcb == NULL)
5140 		return;
5141 
5142 	atomic_add_int(&stcb->asoc.refcnt, 1);
5143 
5144 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
5145 	    (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) {
5146 		/* Pre-check If we are freeing no update */
5147 		goto no_lock;
5148 	}
5149 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5150 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5151 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5152 		goto out;
5153 	}
5154 	so = stcb->sctp_socket;
5155 	if (so == NULL) {
5156 		goto out;
5157 	}
5158 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5159 	/* Have you have freed enough to look */
5160 	*freed_so_far = 0;
5161 	/* Yep, its worth a look and the lock overhead */
5162 
5163 	/* Figure out what the rwnd would be */
5164 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5165 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5166 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5167 	} else {
5168 		dif = 0;
5169 	}
5170 	if (dif >= rwnd_req) {
5171 		if (hold_rlock) {
5172 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5173 			r_unlocked = 1;
5174 		}
5175 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5176 			/*
5177 			 * One last check before we allow the guy possibly
5178 			 * to get in. There is a race, where the guy has not
5179 			 * reached the gate. In that case
5180 			 */
5181 			goto out;
5182 		}
5183 		SCTP_TCB_LOCK(stcb);
5184 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5185 			/* No reports here */
5186 			SCTP_TCB_UNLOCK(stcb);
5187 			goto out;
5188 		}
5189 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5190 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5191 
5192 		sctp_chunk_output(stcb->sctp_ep, stcb,
5193 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5194 		/* make sure no timer is running */
5195 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5196 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5197 		SCTP_TCB_UNLOCK(stcb);
5198 	} else {
5199 		/* Update how much we have pending */
5200 		stcb->freed_by_sorcv_sincelast = dif;
5201 	}
5202 out:
5203 	if (so && r_unlocked && hold_rlock) {
5204 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5205 	}
5206 
5207 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5208 no_lock:
5209 	atomic_add_int(&stcb->asoc.refcnt, -1);
5210 	return;
5211 }
5212 
5213 int
5214 sctp_sorecvmsg(struct socket *so,
5215     struct uio *uio,
5216     struct mbuf **mp,
5217     struct sockaddr *from,
5218     int fromlen,
5219     int *msg_flags,
5220     struct sctp_sndrcvinfo *sinfo,
5221     int filling_sinfo)
5222 {
5223 	/*
5224 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5225 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5226 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5227 	 * On the way out we may send out any combination of:
5228 	 * MSG_NOTIFICATION MSG_EOR
5229 	 *
5230 	 */
5231 	struct sctp_inpcb *inp = NULL;
5232 	ssize_t my_len = 0;
5233 	ssize_t cp_len = 0;
5234 	int error = 0;
5235 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5236 	struct mbuf *m = NULL;
5237 	struct sctp_tcb *stcb = NULL;
5238 	int wakeup_read_socket = 0;
5239 	int freecnt_applied = 0;
5240 	int out_flags = 0, in_flags = 0;
5241 	int block_allowed = 1;
5242 	uint32_t freed_so_far = 0;
5243 	ssize_t copied_so_far = 0;
5244 	int in_eeor_mode = 0;
5245 	int no_rcv_needed = 0;
5246 	uint32_t rwnd_req = 0;
5247 	int hold_sblock = 0;
5248 	int hold_rlock = 0;
5249 	ssize_t slen = 0;
5250 	uint32_t held_length = 0;
5251 	int sockbuf_lock = 0;
5252 
5253 	if (uio == NULL) {
5254 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5255 		return (EINVAL);
5256 	}
5257 
5258 	if (msg_flags) {
5259 		in_flags = *msg_flags;
5260 		if (in_flags & MSG_PEEK)
5261 			SCTP_STAT_INCR(sctps_read_peeks);
5262 	} else {
5263 		in_flags = 0;
5264 	}
5265 	slen = uio->uio_resid;
5266 
5267 	/* Pull in and set up our int flags */
5268 	if (in_flags & MSG_OOB) {
5269 		/* Out of band's NOT supported */
5270 		return (EOPNOTSUPP);
5271 	}
5272 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5273 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5274 		return (EINVAL);
5275 	}
5276 	if ((in_flags & (MSG_DONTWAIT
5277 	    | MSG_NBIO
5278 	    )) ||
5279 	    SCTP_SO_IS_NBIO(so)) {
5280 		block_allowed = 0;
5281 	}
5282 	/* setup the endpoint */
5283 	inp = (struct sctp_inpcb *)so->so_pcb;
5284 	if (inp == NULL) {
5285 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5286 		return (EFAULT);
5287 	}
5288 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5289 	/* Must be at least a MTU's worth */
5290 	if (rwnd_req < SCTP_MIN_RWND)
5291 		rwnd_req = SCTP_MIN_RWND;
5292 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5293 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5294 		sctp_misc_ints(SCTP_SORECV_ENTER,
5295 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5296 	}
5297 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5298 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5299 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5300 	}
5301 
5302 
5303 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5304 	if (error) {
5305 		goto release_unlocked;
5306 	}
5307 	sockbuf_lock = 1;
5308 restart:
5309 
5310 
5311 restart_nosblocks:
5312 	if (hold_sblock == 0) {
5313 		SOCKBUF_LOCK(&so->so_rcv);
5314 		hold_sblock = 1;
5315 	}
5316 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5317 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5318 		goto out;
5319 	}
5320 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5321 		if (so->so_error) {
5322 			error = so->so_error;
5323 			if ((in_flags & MSG_PEEK) == 0)
5324 				so->so_error = 0;
5325 			goto out;
5326 		} else {
5327 			if (so->so_rcv.sb_cc == 0) {
5328 				/* indicate EOF */
5329 				error = 0;
5330 				goto out;
5331 			}
5332 		}
5333 	}
5334 	if (so->so_rcv.sb_cc <= held_length) {
5335 		if (so->so_error) {
5336 			error = so->so_error;
5337 			if ((in_flags & MSG_PEEK) == 0) {
5338 				so->so_error = 0;
5339 			}
5340 			goto out;
5341 		}
5342 		if ((so->so_rcv.sb_cc == 0) &&
5343 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5344 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5345 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5346 				/*
5347 				 * For active open side clear flags for
5348 				 * re-use passive open is blocked by
5349 				 * connect.
5350 				 */
5351 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5352 					/*
5353 					 * You were aborted, passive side
5354 					 * always hits here
5355 					 */
5356 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5357 					error = ECONNRESET;
5358 				}
5359 				so->so_state &= ~(SS_ISCONNECTING |
5360 				    SS_ISDISCONNECTING |
5361 				    SS_ISCONFIRMING |
5362 				    SS_ISCONNECTED);
5363 				if (error == 0) {
5364 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5365 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5366 						error = ENOTCONN;
5367 					}
5368 				}
5369 				goto out;
5370 			}
5371 		}
5372 		if (block_allowed) {
5373 			error = sbwait(&so->so_rcv);
5374 			if (error) {
5375 				goto out;
5376 			}
5377 			held_length = 0;
5378 			goto restart_nosblocks;
5379 		} else {
5380 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5381 			error = EWOULDBLOCK;
5382 			goto out;
5383 		}
5384 	}
5385 	if (hold_sblock == 1) {
5386 		SOCKBUF_UNLOCK(&so->so_rcv);
5387 		hold_sblock = 0;
5388 	}
5389 	/* we possibly have data we can read */
5390 	/* sa_ignore FREED_MEMORY */
5391 	control = TAILQ_FIRST(&inp->read_queue);
5392 	if (control == NULL) {
5393 		/*
5394 		 * This could be happening since the appender did the
5395 		 * increment but as not yet did the tailq insert onto the
5396 		 * read_queue
5397 		 */
5398 		if (hold_rlock == 0) {
5399 			SCTP_INP_READ_LOCK(inp);
5400 		}
5401 		control = TAILQ_FIRST(&inp->read_queue);
5402 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5403 #ifdef INVARIANTS
5404 			panic("Huh, its non zero and nothing on control?");
5405 #endif
5406 			so->so_rcv.sb_cc = 0;
5407 		}
5408 		SCTP_INP_READ_UNLOCK(inp);
5409 		hold_rlock = 0;
5410 		goto restart;
5411 	}
5412 
5413 	if ((control->length == 0) &&
5414 	    (control->do_not_ref_stcb)) {
5415 		/*
5416 		 * Clean up code for freeing assoc that left behind a
5417 		 * pdapi.. maybe a peer in EEOR that just closed after
5418 		 * sending and never indicated a EOR.
5419 		 */
5420 		if (hold_rlock == 0) {
5421 			hold_rlock = 1;
5422 			SCTP_INP_READ_LOCK(inp);
5423 		}
5424 		control->held_length = 0;
5425 		if (control->data) {
5426 			/* Hmm there is data here .. fix */
5427 			struct mbuf *m_tmp;
5428 			int cnt = 0;
5429 
5430 			m_tmp = control->data;
5431 			while (m_tmp) {
5432 				cnt += SCTP_BUF_LEN(m_tmp);
5433 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5434 					control->tail_mbuf = m_tmp;
5435 					control->end_added = 1;
5436 				}
5437 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5438 			}
5439 			control->length = cnt;
5440 		} else {
5441 			/* remove it */
5442 			TAILQ_REMOVE(&inp->read_queue, control, next);
5443 			/* Add back any hiddend data */
5444 			sctp_free_remote_addr(control->whoFrom);
5445 			sctp_free_a_readq(stcb, control);
5446 		}
5447 		if (hold_rlock) {
5448 			hold_rlock = 0;
5449 			SCTP_INP_READ_UNLOCK(inp);
5450 		}
5451 		goto restart;
5452 	}
5453 	if ((control->length == 0) &&
5454 	    (control->end_added == 1)) {
5455 		/*
5456 		 * Do we also need to check for (control->pdapi_aborted ==
5457 		 * 1)?
5458 		 */
5459 		if (hold_rlock == 0) {
5460 			hold_rlock = 1;
5461 			SCTP_INP_READ_LOCK(inp);
5462 		}
5463 		TAILQ_REMOVE(&inp->read_queue, control, next);
5464 		if (control->data) {
5465 #ifdef INVARIANTS
5466 			panic("control->data not null but control->length == 0");
5467 #else
5468 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5469 			sctp_m_freem(control->data);
5470 			control->data = NULL;
5471 #endif
5472 		}
5473 		if (control->aux_data) {
5474 			sctp_m_free(control->aux_data);
5475 			control->aux_data = NULL;
5476 		}
5477 #ifdef INVARIANTS
5478 		if (control->on_strm_q) {
5479 			panic("About to free ctl:%p so:%p and its in %d",
5480 			    control, so, control->on_strm_q);
5481 		}
5482 #endif
5483 		sctp_free_remote_addr(control->whoFrom);
5484 		sctp_free_a_readq(stcb, control);
5485 		if (hold_rlock) {
5486 			hold_rlock = 0;
5487 			SCTP_INP_READ_UNLOCK(inp);
5488 		}
5489 		goto restart;
5490 	}
5491 	if (control->length == 0) {
5492 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5493 		    (filling_sinfo)) {
5494 			/* find a more suitable one then this */
5495 			ctl = TAILQ_NEXT(control, next);
5496 			while (ctl) {
5497 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5498 				    (ctl->some_taken ||
5499 				    (ctl->spec_flags & M_NOTIFICATION) ||
5500 				    ((ctl->do_not_ref_stcb == 0) &&
5501 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5502 				    ) {
5503 					/*-
5504 					 * If we have a different TCB next, and there is data
5505 					 * present. If we have already taken some (pdapi), OR we can
5506 					 * ref the tcb and no delivery as started on this stream, we
5507 					 * take it. Note we allow a notification on a different
5508 					 * assoc to be delivered..
5509 					 */
5510 					control = ctl;
5511 					goto found_one;
5512 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5513 					    (ctl->length) &&
5514 					    ((ctl->some_taken) ||
5515 					    ((ctl->do_not_ref_stcb == 0) &&
5516 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5517 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5518 					/*-
5519 					 * If we have the same tcb, and there is data present, and we
5520 					 * have the strm interleave feature present. Then if we have
5521 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5522 					 * not started a delivery for this stream, we can take it.
5523 					 * Note we do NOT allow a notificaiton on the same assoc to
5524 					 * be delivered.
5525 					 */
5526 					control = ctl;
5527 					goto found_one;
5528 				}
5529 				ctl = TAILQ_NEXT(ctl, next);
5530 			}
5531 		}
5532 		/*
5533 		 * if we reach here, not suitable replacement is available
5534 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5535 		 * into the our held count, and its time to sleep again.
5536 		 */
5537 		held_length = so->so_rcv.sb_cc;
5538 		control->held_length = so->so_rcv.sb_cc;
5539 		goto restart;
5540 	}
5541 	/* Clear the held length since there is something to read */
5542 	control->held_length = 0;
5543 found_one:
5544 	/*
5545 	 * If we reach here, control has a some data for us to read off.
5546 	 * Note that stcb COULD be NULL.
5547 	 */
5548 	if (hold_rlock == 0) {
5549 		hold_rlock = 1;
5550 		SCTP_INP_READ_LOCK(inp);
5551 	}
5552 	control->some_taken++;
5553 	stcb = control->stcb;
5554 	if (stcb) {
5555 		if ((control->do_not_ref_stcb == 0) &&
5556 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5557 			if (freecnt_applied == 0)
5558 				stcb = NULL;
5559 		} else if (control->do_not_ref_stcb == 0) {
5560 			/* you can't free it on me please */
5561 			/*
5562 			 * The lock on the socket buffer protects us so the
5563 			 * free code will stop. But since we used the
5564 			 * socketbuf lock and the sender uses the tcb_lock
5565 			 * to increment, we need to use the atomic add to
5566 			 * the refcnt
5567 			 */
5568 			if (freecnt_applied) {
5569 #ifdef INVARIANTS
5570 				panic("refcnt already incremented");
5571 #else
5572 				SCTP_PRINTF("refcnt already incremented?\n");
5573 #endif
5574 			} else {
5575 				atomic_add_int(&stcb->asoc.refcnt, 1);
5576 				freecnt_applied = 1;
5577 			}
5578 			/*
5579 			 * Setup to remember how much we have not yet told
5580 			 * the peer our rwnd has opened up. Note we grab the
5581 			 * value from the tcb from last time. Note too that
5582 			 * sack sending clears this when a sack is sent,
5583 			 * which is fine. Once we hit the rwnd_req, we then
5584 			 * will go to the sctp_user_rcvd() that will not
5585 			 * lock until it KNOWs it MUST send a WUP-SACK.
5586 			 */
5587 			freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast;
5588 			stcb->freed_by_sorcv_sincelast = 0;
5589 		}
5590 	}
5591 	if (stcb &&
5592 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5593 	    control->do_not_ref_stcb == 0) {
5594 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5595 	}
5596 
5597 	/* First lets get off the sinfo and sockaddr info */
5598 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5599 		sinfo->sinfo_stream = control->sinfo_stream;
5600 		sinfo->sinfo_ssn = (uint16_t)control->mid;
5601 		sinfo->sinfo_flags = control->sinfo_flags;
5602 		sinfo->sinfo_ppid = control->sinfo_ppid;
5603 		sinfo->sinfo_context = control->sinfo_context;
5604 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5605 		sinfo->sinfo_tsn = control->sinfo_tsn;
5606 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5607 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5608 		nxt = TAILQ_NEXT(control, next);
5609 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5610 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5611 			struct sctp_extrcvinfo *s_extra;
5612 
5613 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5614 			if ((nxt) &&
5615 			    (nxt->length)) {
5616 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5617 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5618 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5619 				}
5620 				if (nxt->spec_flags & M_NOTIFICATION) {
5621 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5622 				}
5623 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5624 				s_extra->serinfo_next_length = nxt->length;
5625 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5626 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5627 				if (nxt->tail_mbuf != NULL) {
5628 					if (nxt->end_added) {
5629 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5630 					}
5631 				}
5632 			} else {
5633 				/*
5634 				 * we explicitly 0 this, since the memcpy
5635 				 * got some other things beyond the older
5636 				 * sinfo_ that is on the control's structure
5637 				 * :-D
5638 				 */
5639 				nxt = NULL;
5640 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5641 				s_extra->serinfo_next_aid = 0;
5642 				s_extra->serinfo_next_length = 0;
5643 				s_extra->serinfo_next_ppid = 0;
5644 				s_extra->serinfo_next_stream = 0;
5645 			}
5646 		}
5647 		/*
5648 		 * update off the real current cum-ack, if we have an stcb.
5649 		 */
5650 		if ((control->do_not_ref_stcb == 0) && stcb)
5651 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5652 		/*
5653 		 * mask off the high bits, we keep the actual chunk bits in
5654 		 * there.
5655 		 */
5656 		sinfo->sinfo_flags &= 0x00ff;
5657 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5658 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5659 		}
5660 	}
5661 #ifdef SCTP_ASOCLOG_OF_TSNS
5662 	{
5663 		int index, newindex;
5664 		struct sctp_pcbtsn_rlog *entry;
5665 
5666 		do {
5667 			index = inp->readlog_index;
5668 			newindex = index + 1;
5669 			if (newindex >= SCTP_READ_LOG_SIZE) {
5670 				newindex = 0;
5671 			}
5672 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5673 		entry = &inp->readlog[index];
5674 		entry->vtag = control->sinfo_assoc_id;
5675 		entry->strm = control->sinfo_stream;
5676 		entry->seq = (uint16_t)control->mid;
5677 		entry->sz = control->length;
5678 		entry->flgs = control->sinfo_flags;
5679 	}
5680 #endif
5681 	if ((fromlen > 0) && (from != NULL)) {
5682 		union sctp_sockstore store;
5683 		size_t len;
5684 
5685 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5686 #ifdef INET6
5687 		case AF_INET6:
5688 			len = sizeof(struct sockaddr_in6);
5689 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5690 			store.sin6.sin6_port = control->port_from;
5691 			break;
5692 #endif
5693 #ifdef INET
5694 		case AF_INET:
5695 #ifdef INET6
5696 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5697 				len = sizeof(struct sockaddr_in6);
5698 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5699 				    &store.sin6);
5700 				store.sin6.sin6_port = control->port_from;
5701 			} else {
5702 				len = sizeof(struct sockaddr_in);
5703 				store.sin = control->whoFrom->ro._l_addr.sin;
5704 				store.sin.sin_port = control->port_from;
5705 			}
5706 #else
5707 			len = sizeof(struct sockaddr_in);
5708 			store.sin = control->whoFrom->ro._l_addr.sin;
5709 			store.sin.sin_port = control->port_from;
5710 #endif
5711 			break;
5712 #endif
5713 		default:
5714 			len = 0;
5715 			break;
5716 		}
5717 		memcpy(from, &store, min((size_t)fromlen, len));
5718 #ifdef INET6
5719 		{
5720 			struct sockaddr_in6 lsa6, *from6;
5721 
5722 			from6 = (struct sockaddr_in6 *)from;
5723 			sctp_recover_scope_mac(from6, (&lsa6));
5724 		}
5725 #endif
5726 	}
5727 	if (hold_rlock) {
5728 		SCTP_INP_READ_UNLOCK(inp);
5729 		hold_rlock = 0;
5730 	}
5731 	if (hold_sblock) {
5732 		SOCKBUF_UNLOCK(&so->so_rcv);
5733 		hold_sblock = 0;
5734 	}
5735 	/* now copy out what data we can */
5736 	if (mp == NULL) {
5737 		/* copy out each mbuf in the chain up to length */
5738 get_more_data:
5739 		m = control->data;
5740 		while (m) {
5741 			/* Move out all we can */
5742 			cp_len = uio->uio_resid;
5743 			my_len = SCTP_BUF_LEN(m);
5744 			if (cp_len > my_len) {
5745 				/* not enough in this buf */
5746 				cp_len = my_len;
5747 			}
5748 			if (hold_rlock) {
5749 				SCTP_INP_READ_UNLOCK(inp);
5750 				hold_rlock = 0;
5751 			}
5752 			if (cp_len > 0)
5753 				error = uiomove(mtod(m, char *), (int)cp_len, uio);
5754 			/* re-read */
5755 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5756 				goto release;
5757 			}
5758 
5759 			if ((control->do_not_ref_stcb == 0) && stcb &&
5760 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5761 				no_rcv_needed = 1;
5762 			}
5763 			if (error) {
5764 				/* error we are out of here */
5765 				goto release;
5766 			}
5767 			SCTP_INP_READ_LOCK(inp);
5768 			hold_rlock = 1;
5769 			if (cp_len == SCTP_BUF_LEN(m)) {
5770 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5771 				    (control->end_added)) {
5772 					out_flags |= MSG_EOR;
5773 					if ((control->do_not_ref_stcb == 0) &&
5774 					    (control->stcb != NULL) &&
5775 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5776 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5777 				}
5778 				if (control->spec_flags & M_NOTIFICATION) {
5779 					out_flags |= MSG_NOTIFICATION;
5780 				}
5781 				/* we ate up the mbuf */
5782 				if (in_flags & MSG_PEEK) {
5783 					/* just looking */
5784 					m = SCTP_BUF_NEXT(m);
5785 					copied_so_far += cp_len;
5786 				} else {
5787 					/* dispose of the mbuf */
5788 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5789 						sctp_sblog(&so->so_rcv,
5790 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5791 					}
5792 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5793 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5794 						sctp_sblog(&so->so_rcv,
5795 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5796 					}
5797 					copied_so_far += cp_len;
5798 					freed_so_far += (uint32_t)cp_len;
5799 					freed_so_far += MSIZE;
5800 					atomic_subtract_int(&control->length, cp_len);
5801 					control->data = sctp_m_free(m);
5802 					m = control->data;
5803 					/*
5804 					 * been through it all, must hold sb
5805 					 * lock ok to null tail
5806 					 */
5807 					if (control->data == NULL) {
5808 #ifdef INVARIANTS
5809 						if ((control->end_added == 0) ||
5810 						    (TAILQ_NEXT(control, next) == NULL)) {
5811 							/*
5812 							 * If the end is not
5813 							 * added, OR the
5814 							 * next is NOT null
5815 							 * we MUST have the
5816 							 * lock.
5817 							 */
5818 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5819 								panic("Hmm we don't own the lock?");
5820 							}
5821 						}
5822 #endif
5823 						control->tail_mbuf = NULL;
5824 #ifdef INVARIANTS
5825 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5826 							panic("end_added, nothing left and no MSG_EOR");
5827 						}
5828 #endif
5829 					}
5830 				}
5831 			} else {
5832 				/* Do we need to trim the mbuf? */
5833 				if (control->spec_flags & M_NOTIFICATION) {
5834 					out_flags |= MSG_NOTIFICATION;
5835 				}
5836 				if ((in_flags & MSG_PEEK) == 0) {
5837 					SCTP_BUF_RESV_UF(m, cp_len);
5838 					SCTP_BUF_LEN(m) -= (int)cp_len;
5839 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5840 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len);
5841 					}
5842 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5843 					if ((control->do_not_ref_stcb == 0) &&
5844 					    stcb) {
5845 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5846 					}
5847 					copied_so_far += cp_len;
5848 					freed_so_far += (uint32_t)cp_len;
5849 					freed_so_far += MSIZE;
5850 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5851 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5852 						    SCTP_LOG_SBRESULT, 0);
5853 					}
5854 					atomic_subtract_int(&control->length, cp_len);
5855 				} else {
5856 					copied_so_far += cp_len;
5857 				}
5858 			}
5859 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5860 				break;
5861 			}
5862 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5863 			    (control->do_not_ref_stcb == 0) &&
5864 			    (freed_so_far >= rwnd_req)) {
5865 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5866 			}
5867 		}		/* end while(m) */
5868 		/*
5869 		 * At this point we have looked at it all and we either have
5870 		 * a MSG_EOR/or read all the user wants... <OR>
5871 		 * control->length == 0.
5872 		 */
5873 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5874 			/* we are done with this control */
5875 			if (control->length == 0) {
5876 				if (control->data) {
5877 #ifdef INVARIANTS
5878 					panic("control->data not null at read eor?");
5879 #else
5880 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5881 					sctp_m_freem(control->data);
5882 					control->data = NULL;
5883 #endif
5884 				}
5885 		done_with_control:
5886 				if (hold_rlock == 0) {
5887 					SCTP_INP_READ_LOCK(inp);
5888 					hold_rlock = 1;
5889 				}
5890 				TAILQ_REMOVE(&inp->read_queue, control, next);
5891 				/* Add back any hiddend data */
5892 				if (control->held_length) {
5893 					held_length = 0;
5894 					control->held_length = 0;
5895 					wakeup_read_socket = 1;
5896 				}
5897 				if (control->aux_data) {
5898 					sctp_m_free(control->aux_data);
5899 					control->aux_data = NULL;
5900 				}
5901 				no_rcv_needed = control->do_not_ref_stcb;
5902 				sctp_free_remote_addr(control->whoFrom);
5903 				control->data = NULL;
5904 #ifdef INVARIANTS
5905 				if (control->on_strm_q) {
5906 					panic("About to free ctl:%p so:%p and its in %d",
5907 					    control, so, control->on_strm_q);
5908 				}
5909 #endif
5910 				sctp_free_a_readq(stcb, control);
5911 				control = NULL;
5912 				if ((freed_so_far >= rwnd_req) &&
5913 				    (no_rcv_needed == 0))
5914 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5915 
5916 			} else {
5917 				/*
5918 				 * The user did not read all of this
5919 				 * message, turn off the returned MSG_EOR
5920 				 * since we are leaving more behind on the
5921 				 * control to read.
5922 				 */
5923 #ifdef INVARIANTS
5924 				if (control->end_added &&
5925 				    (control->data == NULL) &&
5926 				    (control->tail_mbuf == NULL)) {
5927 					panic("Gak, control->length is corrupt?");
5928 				}
5929 #endif
5930 				no_rcv_needed = control->do_not_ref_stcb;
5931 				out_flags &= ~MSG_EOR;
5932 			}
5933 		}
5934 		if (out_flags & MSG_EOR) {
5935 			goto release;
5936 		}
5937 		if ((uio->uio_resid == 0) ||
5938 		    ((in_eeor_mode) &&
5939 		    (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) {
5940 			goto release;
5941 		}
5942 		/*
5943 		 * If I hit here the receiver wants more and this message is
5944 		 * NOT done (pd-api). So two questions. Can we block? if not
5945 		 * we are done. Did the user NOT set MSG_WAITALL?
5946 		 */
5947 		if (block_allowed == 0) {
5948 			goto release;
5949 		}
5950 		/*
5951 		 * We need to wait for more data a few things: - We don't
5952 		 * sbunlock() so we don't get someone else reading. - We
5953 		 * must be sure to account for the case where what is added
5954 		 * is NOT to our control when we wakeup.
5955 		 */
5956 
5957 		/*
5958 		 * Do we need to tell the transport a rwnd update might be
5959 		 * needed before we go to sleep?
5960 		 */
5961 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5962 		    ((freed_so_far >= rwnd_req) &&
5963 		    (control->do_not_ref_stcb == 0) &&
5964 		    (no_rcv_needed == 0))) {
5965 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5966 		}
5967 wait_some_more:
5968 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5969 			goto release;
5970 		}
5971 
5972 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5973 			goto release;
5974 
5975 		if (hold_rlock == 1) {
5976 			SCTP_INP_READ_UNLOCK(inp);
5977 			hold_rlock = 0;
5978 		}
5979 		if (hold_sblock == 0) {
5980 			SOCKBUF_LOCK(&so->so_rcv);
5981 			hold_sblock = 1;
5982 		}
5983 		if ((copied_so_far) && (control->length == 0) &&
5984 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5985 			goto release;
5986 		}
5987 		if (so->so_rcv.sb_cc <= control->held_length) {
5988 			error = sbwait(&so->so_rcv);
5989 			if (error) {
5990 				goto release;
5991 			}
5992 			control->held_length = 0;
5993 		}
5994 		if (hold_sblock) {
5995 			SOCKBUF_UNLOCK(&so->so_rcv);
5996 			hold_sblock = 0;
5997 		}
5998 		if (control->length == 0) {
5999 			/* still nothing here */
6000 			if (control->end_added == 1) {
6001 				/* he aborted, or is done i.e.did a shutdown */
6002 				out_flags |= MSG_EOR;
6003 				if (control->pdapi_aborted) {
6004 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6005 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6006 
6007 					out_flags |= MSG_TRUNC;
6008 				} else {
6009 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6010 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6011 				}
6012 				goto done_with_control;
6013 			}
6014 			if (so->so_rcv.sb_cc > held_length) {
6015 				control->held_length = so->so_rcv.sb_cc;
6016 				held_length = 0;
6017 			}
6018 			goto wait_some_more;
6019 		} else if (control->data == NULL) {
6020 			/*
6021 			 * we must re-sync since data is probably being
6022 			 * added
6023 			 */
6024 			SCTP_INP_READ_LOCK(inp);
6025 			if ((control->length > 0) && (control->data == NULL)) {
6026 				/*
6027 				 * big trouble.. we have the lock and its
6028 				 * corrupt?
6029 				 */
6030 #ifdef INVARIANTS
6031 				panic("Impossible data==NULL length !=0");
6032 #endif
6033 				out_flags |= MSG_EOR;
6034 				out_flags |= MSG_TRUNC;
6035 				control->length = 0;
6036 				SCTP_INP_READ_UNLOCK(inp);
6037 				goto done_with_control;
6038 			}
6039 			SCTP_INP_READ_UNLOCK(inp);
6040 			/* We will fall around to get more data */
6041 		}
6042 		goto get_more_data;
6043 	} else {
6044 		/*-
6045 		 * Give caller back the mbuf chain,
6046 		 * store in uio_resid the length
6047 		 */
6048 		wakeup_read_socket = 0;
6049 		if ((control->end_added == 0) ||
6050 		    (TAILQ_NEXT(control, next) == NULL)) {
6051 			/* Need to get rlock */
6052 			if (hold_rlock == 0) {
6053 				SCTP_INP_READ_LOCK(inp);
6054 				hold_rlock = 1;
6055 			}
6056 		}
6057 		if (control->end_added) {
6058 			out_flags |= MSG_EOR;
6059 			if ((control->do_not_ref_stcb == 0) &&
6060 			    (control->stcb != NULL) &&
6061 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6062 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6063 		}
6064 		if (control->spec_flags & M_NOTIFICATION) {
6065 			out_flags |= MSG_NOTIFICATION;
6066 		}
6067 		uio->uio_resid = control->length;
6068 		*mp = control->data;
6069 		m = control->data;
6070 		while (m) {
6071 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6072 				sctp_sblog(&so->so_rcv,
6073 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6074 			}
6075 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6076 			freed_so_far += (uint32_t)SCTP_BUF_LEN(m);
6077 			freed_so_far += MSIZE;
6078 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6079 				sctp_sblog(&so->so_rcv,
6080 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6081 			}
6082 			m = SCTP_BUF_NEXT(m);
6083 		}
6084 		control->data = control->tail_mbuf = NULL;
6085 		control->length = 0;
6086 		if (out_flags & MSG_EOR) {
6087 			/* Done with this control */
6088 			goto done_with_control;
6089 		}
6090 	}
6091 release:
6092 	if (hold_rlock == 1) {
6093 		SCTP_INP_READ_UNLOCK(inp);
6094 		hold_rlock = 0;
6095 	}
6096 	if (hold_sblock == 1) {
6097 		SOCKBUF_UNLOCK(&so->so_rcv);
6098 		hold_sblock = 0;
6099 	}
6100 
6101 	sbunlock(&so->so_rcv);
6102 	sockbuf_lock = 0;
6103 
6104 release_unlocked:
6105 	if (hold_sblock) {
6106 		SOCKBUF_UNLOCK(&so->so_rcv);
6107 		hold_sblock = 0;
6108 	}
6109 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6110 		if ((freed_so_far >= rwnd_req) &&
6111 		    (control && (control->do_not_ref_stcb == 0)) &&
6112 		    (no_rcv_needed == 0))
6113 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6114 	}
6115 out:
6116 	if (msg_flags) {
6117 		*msg_flags = out_flags;
6118 	}
6119 	if (((out_flags & MSG_EOR) == 0) &&
6120 	    ((in_flags & MSG_PEEK) == 0) &&
6121 	    (sinfo) &&
6122 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6123 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6124 		struct sctp_extrcvinfo *s_extra;
6125 
6126 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6127 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6128 	}
6129 	if (hold_rlock == 1) {
6130 		SCTP_INP_READ_UNLOCK(inp);
6131 	}
6132 	if (hold_sblock) {
6133 		SOCKBUF_UNLOCK(&so->so_rcv);
6134 	}
6135 	if (sockbuf_lock) {
6136 		sbunlock(&so->so_rcv);
6137 	}
6138 
6139 	if (freecnt_applied) {
6140 		/*
6141 		 * The lock on the socket buffer protects us so the free
6142 		 * code will stop. But since we used the socketbuf lock and
6143 		 * the sender uses the tcb_lock to increment, we need to use
6144 		 * the atomic add to the refcnt.
6145 		 */
6146 		if (stcb == NULL) {
6147 #ifdef INVARIANTS
6148 			panic("stcb for refcnt has gone NULL?");
6149 			goto stage_left;
6150 #else
6151 			goto stage_left;
6152 #endif
6153 		}
6154 		/* Save the value back for next time */
6155 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6156 		atomic_add_int(&stcb->asoc.refcnt, -1);
6157 	}
6158 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6159 		if (stcb) {
6160 			sctp_misc_ints(SCTP_SORECV_DONE,
6161 			    freed_so_far,
6162 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6163 			    stcb->asoc.my_rwnd,
6164 			    so->so_rcv.sb_cc);
6165 		} else {
6166 			sctp_misc_ints(SCTP_SORECV_DONE,
6167 			    freed_so_far,
6168 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6169 			    0,
6170 			    so->so_rcv.sb_cc);
6171 		}
6172 	}
6173 stage_left:
6174 	if (wakeup_read_socket) {
6175 		sctp_sorwakeup(inp, so);
6176 	}
6177 	return (error);
6178 }
6179 
6180 
6181 #ifdef SCTP_MBUF_LOGGING
6182 struct mbuf *
6183 sctp_m_free(struct mbuf *m)
6184 {
6185 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6186 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6187 	}
6188 	return (m_free(m));
6189 }
6190 
6191 void
6192 sctp_m_freem(struct mbuf *mb)
6193 {
6194 	while (mb != NULL)
6195 		mb = sctp_m_free(mb);
6196 }
6197 
6198 #endif
6199 
6200 int
6201 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6202 {
6203 	/*
6204 	 * Given a local address. For all associations that holds the
6205 	 * address, request a peer-set-primary.
6206 	 */
6207 	struct sctp_ifa *ifa;
6208 	struct sctp_laddr *wi;
6209 
6210 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6211 	if (ifa == NULL) {
6212 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6213 		return (EADDRNOTAVAIL);
6214 	}
6215 	/*
6216 	 * Now that we have the ifa we must awaken the iterator with this
6217 	 * message.
6218 	 */
6219 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6220 	if (wi == NULL) {
6221 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6222 		return (ENOMEM);
6223 	}
6224 	/* Now incr the count and int wi structure */
6225 	SCTP_INCR_LADDR_COUNT();
6226 	memset(wi, 0, sizeof(*wi));
6227 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6228 	wi->ifa = ifa;
6229 	wi->action = SCTP_SET_PRIM_ADDR;
6230 	atomic_add_int(&ifa->refcount, 1);
6231 
6232 	/* Now add it to the work queue */
6233 	SCTP_WQ_ADDR_LOCK();
6234 	/*
6235 	 * Should this really be a tailq? As it is we will process the
6236 	 * newest first :-0
6237 	 */
6238 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6239 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6240 	    (struct sctp_inpcb *)NULL,
6241 	    (struct sctp_tcb *)NULL,
6242 	    (struct sctp_nets *)NULL);
6243 	SCTP_WQ_ADDR_UNLOCK();
6244 	return (0);
6245 }
6246 
6247 
6248 int
6249 sctp_soreceive(struct socket *so,
6250     struct sockaddr **psa,
6251     struct uio *uio,
6252     struct mbuf **mp0,
6253     struct mbuf **controlp,
6254     int *flagsp)
6255 {
6256 	int error, fromlen;
6257 	uint8_t sockbuf[256];
6258 	struct sockaddr *from;
6259 	struct sctp_extrcvinfo sinfo;
6260 	int filling_sinfo = 1;
6261 	int flags;
6262 	struct sctp_inpcb *inp;
6263 
6264 	inp = (struct sctp_inpcb *)so->so_pcb;
6265 	/* pickup the assoc we are reading from */
6266 	if (inp == NULL) {
6267 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6268 		return (EINVAL);
6269 	}
6270 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6271 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6272 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6273 	    (controlp == NULL)) {
6274 		/* user does not want the sndrcv ctl */
6275 		filling_sinfo = 0;
6276 	}
6277 	if (psa) {
6278 		from = (struct sockaddr *)sockbuf;
6279 		fromlen = sizeof(sockbuf);
6280 		from->sa_len = 0;
6281 	} else {
6282 		from = NULL;
6283 		fromlen = 0;
6284 	}
6285 
6286 	if (filling_sinfo) {
6287 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6288 	}
6289 	if (flagsp != NULL) {
6290 		flags = *flagsp;
6291 	} else {
6292 		flags = 0;
6293 	}
6294 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags,
6295 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6296 	if (flagsp != NULL) {
6297 		*flagsp = flags;
6298 	}
6299 	if (controlp != NULL) {
6300 		/* copy back the sinfo in a CMSG format */
6301 		if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) {
6302 			*controlp = sctp_build_ctl_nchunk(inp,
6303 			    (struct sctp_sndrcvinfo *)&sinfo);
6304 		} else {
6305 			*controlp = NULL;
6306 		}
6307 	}
6308 	if (psa) {
6309 		/* copy back the address info */
6310 		if (from && from->sa_len) {
6311 			*psa = sodupsockaddr(from, M_NOWAIT);
6312 		} else {
6313 			*psa = NULL;
6314 		}
6315 	}
6316 	return (error);
6317 }
6318 
6319 
6320 
6321 
6322 
6323 int
6324 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6325     int totaddr, int *error)
6326 {
6327 	int added = 0;
6328 	int i;
6329 	struct sctp_inpcb *inp;
6330 	struct sockaddr *sa;
6331 	size_t incr = 0;
6332 #ifdef INET
6333 	struct sockaddr_in *sin;
6334 #endif
6335 #ifdef INET6
6336 	struct sockaddr_in6 *sin6;
6337 #endif
6338 
6339 	sa = addr;
6340 	inp = stcb->sctp_ep;
6341 	*error = 0;
6342 	for (i = 0; i < totaddr; i++) {
6343 		switch (sa->sa_family) {
6344 #ifdef INET
6345 		case AF_INET:
6346 			incr = sizeof(struct sockaddr_in);
6347 			sin = (struct sockaddr_in *)sa;
6348 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6349 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6350 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6351 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6352 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6353 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6354 				*error = EINVAL;
6355 				goto out_now;
6356 			}
6357 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6358 			    SCTP_DONOT_SETSCOPE,
6359 			    SCTP_ADDR_IS_CONFIRMED)) {
6360 				/* assoc gone no un-lock */
6361 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6362 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6363 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6364 				*error = ENOBUFS;
6365 				goto out_now;
6366 			}
6367 			added++;
6368 			break;
6369 #endif
6370 #ifdef INET6
6371 		case AF_INET6:
6372 			incr = sizeof(struct sockaddr_in6);
6373 			sin6 = (struct sockaddr_in6 *)sa;
6374 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6375 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6376 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6377 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6378 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6379 				*error = EINVAL;
6380 				goto out_now;
6381 			}
6382 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6383 			    SCTP_DONOT_SETSCOPE,
6384 			    SCTP_ADDR_IS_CONFIRMED)) {
6385 				/* assoc gone no un-lock */
6386 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6387 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6388 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6389 				*error = ENOBUFS;
6390 				goto out_now;
6391 			}
6392 			added++;
6393 			break;
6394 #endif
6395 		default:
6396 			break;
6397 		}
6398 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6399 	}
6400 out_now:
6401 	return (added);
6402 }
6403 
6404 int
6405 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6406     unsigned int totaddr,
6407     unsigned int *num_v4, unsigned int *num_v6,
6408     unsigned int limit)
6409 {
6410 	struct sockaddr *sa;
6411 	struct sctp_tcb *stcb;
6412 	unsigned int incr, at, i;
6413 
6414 	at = 0;
6415 	sa = addr;
6416 	*num_v6 = *num_v4 = 0;
6417 	/* account and validate addresses */
6418 	if (totaddr == 0) {
6419 		return (EINVAL);
6420 	}
6421 	for (i = 0; i < totaddr; i++) {
6422 		if (at + sizeof(struct sockaddr) > limit) {
6423 			return (EINVAL);
6424 		}
6425 		switch (sa->sa_family) {
6426 #ifdef INET
6427 		case AF_INET:
6428 			incr = (unsigned int)sizeof(struct sockaddr_in);
6429 			if (sa->sa_len != incr) {
6430 				return (EINVAL);
6431 			}
6432 			(*num_v4) += 1;
6433 			break;
6434 #endif
6435 #ifdef INET6
6436 		case AF_INET6:
6437 			{
6438 				struct sockaddr_in6 *sin6;
6439 
6440 				sin6 = (struct sockaddr_in6 *)sa;
6441 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6442 					/* Must be non-mapped for connectx */
6443 					return (EINVAL);
6444 				}
6445 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6446 				if (sa->sa_len != incr) {
6447 					return (EINVAL);
6448 				}
6449 				(*num_v6) += 1;
6450 				break;
6451 			}
6452 #endif
6453 		default:
6454 			return (EINVAL);
6455 		}
6456 		if ((at + incr) > limit) {
6457 			return (EINVAL);
6458 		}
6459 		SCTP_INP_INCR_REF(inp);
6460 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6461 		if (stcb != NULL) {
6462 			SCTP_TCB_UNLOCK(stcb);
6463 			return (EALREADY);
6464 		} else {
6465 			SCTP_INP_DECR_REF(inp);
6466 		}
6467 		at += incr;
6468 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6469 	}
6470 	return (0);
6471 }
6472 
6473 /*
6474  * sctp_bindx(ADD) for one address.
6475  * assumes all arguments are valid/checked by caller.
6476  */
6477 void
6478 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6479     struct sockaddr *sa, sctp_assoc_t assoc_id,
6480     uint32_t vrf_id, int *error, void *p)
6481 {
6482 	struct sockaddr *addr_touse;
6483 #if defined(INET) && defined(INET6)
6484 	struct sockaddr_in sin;
6485 #endif
6486 
6487 	/* see if we're bound all already! */
6488 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6489 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6490 		*error = EINVAL;
6491 		return;
6492 	}
6493 	addr_touse = sa;
6494 #ifdef INET6
6495 	if (sa->sa_family == AF_INET6) {
6496 #ifdef INET
6497 		struct sockaddr_in6 *sin6;
6498 
6499 #endif
6500 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6501 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6502 			*error = EINVAL;
6503 			return;
6504 		}
6505 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6506 			/* can only bind v6 on PF_INET6 sockets */
6507 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6508 			*error = EINVAL;
6509 			return;
6510 		}
6511 #ifdef INET
6512 		sin6 = (struct sockaddr_in6 *)addr_touse;
6513 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6514 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6515 			    SCTP_IPV6_V6ONLY(inp)) {
6516 				/* can't bind v4-mapped on PF_INET sockets */
6517 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6518 				*error = EINVAL;
6519 				return;
6520 			}
6521 			in6_sin6_2_sin(&sin, sin6);
6522 			addr_touse = (struct sockaddr *)&sin;
6523 		}
6524 #endif
6525 	}
6526 #endif
6527 #ifdef INET
6528 	if (sa->sa_family == AF_INET) {
6529 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6530 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6531 			*error = EINVAL;
6532 			return;
6533 		}
6534 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6535 		    SCTP_IPV6_V6ONLY(inp)) {
6536 			/* can't bind v4 on PF_INET sockets */
6537 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6538 			*error = EINVAL;
6539 			return;
6540 		}
6541 	}
6542 #endif
6543 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6544 		if (p == NULL) {
6545 			/* Can't get proc for Net/Open BSD */
6546 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6547 			*error = EINVAL;
6548 			return;
6549 		}
6550 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6551 		return;
6552 	}
6553 	/*
6554 	 * No locks required here since bind and mgmt_ep_sa all do their own
6555 	 * locking. If we do something for the FIX: below we may need to
6556 	 * lock in that case.
6557 	 */
6558 	if (assoc_id == 0) {
6559 		/* add the address */
6560 		struct sctp_inpcb *lep;
6561 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6562 
6563 		/* validate the incoming port */
6564 		if ((lsin->sin_port != 0) &&
6565 		    (lsin->sin_port != inp->sctp_lport)) {
6566 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6567 			*error = EINVAL;
6568 			return;
6569 		} else {
6570 			/* user specified 0 port, set it to existing port */
6571 			lsin->sin_port = inp->sctp_lport;
6572 		}
6573 
6574 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6575 		if (lep != NULL) {
6576 			/*
6577 			 * We must decrement the refcount since we have the
6578 			 * ep already and are binding. No remove going on
6579 			 * here.
6580 			 */
6581 			SCTP_INP_DECR_REF(lep);
6582 		}
6583 		if (lep == inp) {
6584 			/* already bound to it.. ok */
6585 			return;
6586 		} else if (lep == NULL) {
6587 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6588 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6589 			    SCTP_ADD_IP_ADDRESS,
6590 			    vrf_id, NULL);
6591 		} else {
6592 			*error = EADDRINUSE;
6593 		}
6594 		if (*error)
6595 			return;
6596 	} else {
6597 		/*
6598 		 * FIX: decide whether we allow assoc based bindx
6599 		 */
6600 	}
6601 }
6602 
6603 /*
6604  * sctp_bindx(DELETE) for one address.
6605  * assumes all arguments are valid/checked by caller.
6606  */
6607 void
6608 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6609     struct sockaddr *sa, sctp_assoc_t assoc_id,
6610     uint32_t vrf_id, int *error)
6611 {
6612 	struct sockaddr *addr_touse;
6613 #if defined(INET) && defined(INET6)
6614 	struct sockaddr_in sin;
6615 #endif
6616 
6617 	/* see if we're bound all already! */
6618 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6619 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6620 		*error = EINVAL;
6621 		return;
6622 	}
6623 	addr_touse = sa;
6624 #ifdef INET6
6625 	if (sa->sa_family == AF_INET6) {
6626 #ifdef INET
6627 		struct sockaddr_in6 *sin6;
6628 #endif
6629 
6630 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6631 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6632 			*error = EINVAL;
6633 			return;
6634 		}
6635 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6636 			/* can only bind v6 on PF_INET6 sockets */
6637 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6638 			*error = EINVAL;
6639 			return;
6640 		}
6641 #ifdef INET
6642 		sin6 = (struct sockaddr_in6 *)addr_touse;
6643 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6644 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6645 			    SCTP_IPV6_V6ONLY(inp)) {
6646 				/* can't bind mapped-v4 on PF_INET sockets */
6647 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6648 				*error = EINVAL;
6649 				return;
6650 			}
6651 			in6_sin6_2_sin(&sin, sin6);
6652 			addr_touse = (struct sockaddr *)&sin;
6653 		}
6654 #endif
6655 	}
6656 #endif
6657 #ifdef INET
6658 	if (sa->sa_family == AF_INET) {
6659 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6660 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6661 			*error = EINVAL;
6662 			return;
6663 		}
6664 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6665 		    SCTP_IPV6_V6ONLY(inp)) {
6666 			/* can't bind v4 on PF_INET sockets */
6667 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6668 			*error = EINVAL;
6669 			return;
6670 		}
6671 	}
6672 #endif
6673 	/*
6674 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6675 	 * below is ever changed we may need to lock before calling
6676 	 * association level binding.
6677 	 */
6678 	if (assoc_id == 0) {
6679 		/* delete the address */
6680 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6681 		    SCTP_DEL_IP_ADDRESS,
6682 		    vrf_id, NULL);
6683 	} else {
6684 		/*
6685 		 * FIX: decide whether we allow assoc based bindx
6686 		 */
6687 	}
6688 }
6689 
6690 /*
6691  * returns the valid local address count for an assoc, taking into account
6692  * all scoping rules
6693  */
6694 int
6695 sctp_local_addr_count(struct sctp_tcb *stcb)
6696 {
6697 	int loopback_scope;
6698 #if defined(INET)
6699 	int ipv4_local_scope, ipv4_addr_legal;
6700 #endif
6701 #if defined (INET6)
6702 	int local_scope, site_scope, ipv6_addr_legal;
6703 #endif
6704 	struct sctp_vrf *vrf;
6705 	struct sctp_ifn *sctp_ifn;
6706 	struct sctp_ifa *sctp_ifa;
6707 	int count = 0;
6708 
6709 	/* Turn on all the appropriate scopes */
6710 	loopback_scope = stcb->asoc.scope.loopback_scope;
6711 #if defined(INET)
6712 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6713 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6714 #endif
6715 #if defined(INET6)
6716 	local_scope = stcb->asoc.scope.local_scope;
6717 	site_scope = stcb->asoc.scope.site_scope;
6718 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6719 #endif
6720 	SCTP_IPI_ADDR_RLOCK();
6721 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6722 	if (vrf == NULL) {
6723 		/* no vrf, no addresses */
6724 		SCTP_IPI_ADDR_RUNLOCK();
6725 		return (0);
6726 	}
6727 
6728 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6729 		/*
6730 		 * bound all case: go through all ifns on the vrf
6731 		 */
6732 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6733 			if ((loopback_scope == 0) &&
6734 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6735 				continue;
6736 			}
6737 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6738 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6739 					continue;
6740 				switch (sctp_ifa->address.sa.sa_family) {
6741 #ifdef INET
6742 				case AF_INET:
6743 					if (ipv4_addr_legal) {
6744 						struct sockaddr_in *sin;
6745 
6746 						sin = &sctp_ifa->address.sin;
6747 						if (sin->sin_addr.s_addr == 0) {
6748 							/*
6749 							 * skip unspecified
6750 							 * addrs
6751 							 */
6752 							continue;
6753 						}
6754 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6755 						    &sin->sin_addr) != 0) {
6756 							continue;
6757 						}
6758 						if ((ipv4_local_scope == 0) &&
6759 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6760 							continue;
6761 						}
6762 						/* count this one */
6763 						count++;
6764 					} else {
6765 						continue;
6766 					}
6767 					break;
6768 #endif
6769 #ifdef INET6
6770 				case AF_INET6:
6771 					if (ipv6_addr_legal) {
6772 						struct sockaddr_in6 *sin6;
6773 
6774 						sin6 = &sctp_ifa->address.sin6;
6775 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6776 							continue;
6777 						}
6778 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6779 						    &sin6->sin6_addr) != 0) {
6780 							continue;
6781 						}
6782 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6783 							if (local_scope == 0)
6784 								continue;
6785 							if (sin6->sin6_scope_id == 0) {
6786 								if (sa6_recoverscope(sin6) != 0)
6787 									/*
6788 									 *
6789 									 * bad
6790 									 * link
6791 									 *
6792 									 * local
6793 									 *
6794 									 * address
6795 									 */
6796 									continue;
6797 							}
6798 						}
6799 						if ((site_scope == 0) &&
6800 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6801 							continue;
6802 						}
6803 						/* count this one */
6804 						count++;
6805 					}
6806 					break;
6807 #endif
6808 				default:
6809 					/* TSNH */
6810 					break;
6811 				}
6812 			}
6813 		}
6814 	} else {
6815 		/*
6816 		 * subset bound case
6817 		 */
6818 		struct sctp_laddr *laddr;
6819 
6820 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6821 		    sctp_nxt_addr) {
6822 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6823 				continue;
6824 			}
6825 			/* count this one */
6826 			count++;
6827 		}
6828 	}
6829 	SCTP_IPI_ADDR_RUNLOCK();
6830 	return (count);
6831 }
6832 
6833 #if defined(SCTP_LOCAL_TRACE_BUF)
6834 
6835 void
6836 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6837 {
6838 	uint32_t saveindex, newindex;
6839 
6840 	do {
6841 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6842 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6843 			newindex = 1;
6844 		} else {
6845 			newindex = saveindex + 1;
6846 		}
6847 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6848 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6849 		saveindex = 0;
6850 	}
6851 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6852 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6853 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6854 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6855 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6856 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6857 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6858 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6859 }
6860 
6861 #endif
6862 static void
6863 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6864     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6865 {
6866 	struct ip *iph;
6867 #ifdef INET6
6868 	struct ip6_hdr *ip6;
6869 #endif
6870 	struct mbuf *sp, *last;
6871 	struct udphdr *uhdr;
6872 	uint16_t port;
6873 
6874 	if ((m->m_flags & M_PKTHDR) == 0) {
6875 		/* Can't handle one that is not a pkt hdr */
6876 		goto out;
6877 	}
6878 	/* Pull the src port */
6879 	iph = mtod(m, struct ip *);
6880 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6881 	port = uhdr->uh_sport;
6882 	/*
6883 	 * Split out the mbuf chain. Leave the IP header in m, place the
6884 	 * rest in the sp.
6885 	 */
6886 	sp = m_split(m, off, M_NOWAIT);
6887 	if (sp == NULL) {
6888 		/* Gak, drop packet, we can't do a split */
6889 		goto out;
6890 	}
6891 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6892 		/* Gak, packet can't have an SCTP header in it - too small */
6893 		m_freem(sp);
6894 		goto out;
6895 	}
6896 	/* Now pull up the UDP header and SCTP header together */
6897 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6898 	if (sp == NULL) {
6899 		/* Gak pullup failed */
6900 		goto out;
6901 	}
6902 	/* Trim out the UDP header */
6903 	m_adj(sp, sizeof(struct udphdr));
6904 
6905 	/* Now reconstruct the mbuf chain */
6906 	for (last = m; last->m_next; last = last->m_next);
6907 	last->m_next = sp;
6908 	m->m_pkthdr.len += sp->m_pkthdr.len;
6909 	/*
6910 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6911 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6912 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6913 	 * SCTP checksum. Therefore, clear the bit.
6914 	 */
6915 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6916 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6917 	    m->m_pkthdr.len,
6918 	    if_name(m->m_pkthdr.rcvif),
6919 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6920 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6921 	iph = mtod(m, struct ip *);
6922 	switch (iph->ip_v) {
6923 #ifdef INET
6924 	case IPVERSION:
6925 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6926 		sctp_input_with_port(m, off, port);
6927 		break;
6928 #endif
6929 #ifdef INET6
6930 	case IPV6_VERSION >> 4:
6931 		ip6 = mtod(m, struct ip6_hdr *);
6932 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6933 		sctp6_input_with_port(&m, &off, port);
6934 		break;
6935 #endif
6936 	default:
6937 		goto out;
6938 		break;
6939 	}
6940 	return;
6941 out:
6942 	m_freem(m);
6943 }
6944 
6945 #ifdef INET
6946 static void
6947 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6948 {
6949 	struct ip *outer_ip, *inner_ip;
6950 	struct sctphdr *sh;
6951 	struct icmp *icmp;
6952 	struct udphdr *udp;
6953 	struct sctp_inpcb *inp;
6954 	struct sctp_tcb *stcb;
6955 	struct sctp_nets *net;
6956 	struct sctp_init_chunk *ch;
6957 	struct sockaddr_in src, dst;
6958 	uint8_t type, code;
6959 
6960 	inner_ip = (struct ip *)vip;
6961 	icmp = (struct icmp *)((caddr_t)inner_ip -
6962 	    (sizeof(struct icmp) - sizeof(struct ip)));
6963 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6964 	if (ntohs(outer_ip->ip_len) <
6965 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6966 		return;
6967 	}
6968 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6969 	sh = (struct sctphdr *)(udp + 1);
6970 	memset(&src, 0, sizeof(struct sockaddr_in));
6971 	src.sin_family = AF_INET;
6972 	src.sin_len = sizeof(struct sockaddr_in);
6973 	src.sin_port = sh->src_port;
6974 	src.sin_addr = inner_ip->ip_src;
6975 	memset(&dst, 0, sizeof(struct sockaddr_in));
6976 	dst.sin_family = AF_INET;
6977 	dst.sin_len = sizeof(struct sockaddr_in);
6978 	dst.sin_port = sh->dest_port;
6979 	dst.sin_addr = inner_ip->ip_dst;
6980 	/*
6981 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6982 	 * holds our local endpoint address. Thus we reverse the dst and the
6983 	 * src in the lookup.
6984 	 */
6985 	inp = NULL;
6986 	net = NULL;
6987 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6988 	    (struct sockaddr *)&src,
6989 	    &inp, &net, 1,
6990 	    SCTP_DEFAULT_VRFID);
6991 	if ((stcb != NULL) &&
6992 	    (net != NULL) &&
6993 	    (inp != NULL)) {
6994 		/* Check the UDP port numbers */
6995 		if ((udp->uh_dport != net->port) ||
6996 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6997 			SCTP_TCB_UNLOCK(stcb);
6998 			return;
6999 		}
7000 		/* Check the verification tag */
7001 		if (ntohl(sh->v_tag) != 0) {
7002 			/*
7003 			 * This must be the verification tag used for
7004 			 * sending out packets. We don't consider packets
7005 			 * reflecting the verification tag.
7006 			 */
7007 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
7008 				SCTP_TCB_UNLOCK(stcb);
7009 				return;
7010 			}
7011 		} else {
7012 			if (ntohs(outer_ip->ip_len) >=
7013 			    sizeof(struct ip) +
7014 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
7015 				/*
7016 				 * In this case we can check if we got an
7017 				 * INIT chunk and if the initiate tag
7018 				 * matches.
7019 				 */
7020 				ch = (struct sctp_init_chunk *)(sh + 1);
7021 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
7022 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
7023 					SCTP_TCB_UNLOCK(stcb);
7024 					return;
7025 				}
7026 			} else {
7027 				SCTP_TCB_UNLOCK(stcb);
7028 				return;
7029 			}
7030 		}
7031 		type = icmp->icmp_type;
7032 		code = icmp->icmp_code;
7033 		if ((type == ICMP_UNREACH) &&
7034 		    (code == ICMP_UNREACH_PORT)) {
7035 			code = ICMP_UNREACH_PROTOCOL;
7036 		}
7037 		sctp_notify(inp, stcb, net, type, code,
7038 		    ntohs(inner_ip->ip_len),
7039 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
7040 	} else {
7041 		if ((stcb == NULL) && (inp != NULL)) {
7042 			/* reduce ref-count */
7043 			SCTP_INP_WLOCK(inp);
7044 			SCTP_INP_DECR_REF(inp);
7045 			SCTP_INP_WUNLOCK(inp);
7046 		}
7047 		if (stcb) {
7048 			SCTP_TCB_UNLOCK(stcb);
7049 		}
7050 	}
7051 	return;
7052 }
7053 #endif
7054 
7055 #ifdef INET6
7056 static void
7057 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7058 {
7059 	struct ip6ctlparam *ip6cp;
7060 	struct sctp_inpcb *inp;
7061 	struct sctp_tcb *stcb;
7062 	struct sctp_nets *net;
7063 	struct sctphdr sh;
7064 	struct udphdr udp;
7065 	struct sockaddr_in6 src, dst;
7066 	uint8_t type, code;
7067 
7068 	ip6cp = (struct ip6ctlparam *)d;
7069 	/*
7070 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7071 	 */
7072 	if (ip6cp->ip6c_m == NULL) {
7073 		return;
7074 	}
7075 	/*
7076 	 * Check if we can safely examine the ports and the verification tag
7077 	 * of the SCTP common header.
7078 	 */
7079 	if (ip6cp->ip6c_m->m_pkthdr.len <
7080 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7081 		return;
7082 	}
7083 	/* Copy out the UDP header. */
7084 	memset(&udp, 0, sizeof(struct udphdr));
7085 	m_copydata(ip6cp->ip6c_m,
7086 	    ip6cp->ip6c_off,
7087 	    sizeof(struct udphdr),
7088 	    (caddr_t)&udp);
7089 	/* Copy out the port numbers and the verification tag. */
7090 	memset(&sh, 0, sizeof(struct sctphdr));
7091 	m_copydata(ip6cp->ip6c_m,
7092 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7093 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7094 	    (caddr_t)&sh);
7095 	memset(&src, 0, sizeof(struct sockaddr_in6));
7096 	src.sin6_family = AF_INET6;
7097 	src.sin6_len = sizeof(struct sockaddr_in6);
7098 	src.sin6_port = sh.src_port;
7099 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7100 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7101 		return;
7102 	}
7103 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7104 	dst.sin6_family = AF_INET6;
7105 	dst.sin6_len = sizeof(struct sockaddr_in6);
7106 	dst.sin6_port = sh.dest_port;
7107 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7108 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7109 		return;
7110 	}
7111 	inp = NULL;
7112 	net = NULL;
7113 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7114 	    (struct sockaddr *)&src,
7115 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7116 	if ((stcb != NULL) &&
7117 	    (net != NULL) &&
7118 	    (inp != NULL)) {
7119 		/* Check the UDP port numbers */
7120 		if ((udp.uh_dport != net->port) ||
7121 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7122 			SCTP_TCB_UNLOCK(stcb);
7123 			return;
7124 		}
7125 		/* Check the verification tag */
7126 		if (ntohl(sh.v_tag) != 0) {
7127 			/*
7128 			 * This must be the verification tag used for
7129 			 * sending out packets. We don't consider packets
7130 			 * reflecting the verification tag.
7131 			 */
7132 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7133 				SCTP_TCB_UNLOCK(stcb);
7134 				return;
7135 			}
7136 		} else {
7137 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7138 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7139 			    sizeof(struct sctphdr) +
7140 			    sizeof(struct sctp_chunkhdr) +
7141 			    offsetof(struct sctp_init, a_rwnd)) {
7142 				/*
7143 				 * In this case we can check if we got an
7144 				 * INIT chunk and if the initiate tag
7145 				 * matches.
7146 				 */
7147 				uint32_t initiate_tag;
7148 				uint8_t chunk_type;
7149 
7150 				m_copydata(ip6cp->ip6c_m,
7151 				    ip6cp->ip6c_off +
7152 				    sizeof(struct udphdr) +
7153 				    sizeof(struct sctphdr),
7154 				    sizeof(uint8_t),
7155 				    (caddr_t)&chunk_type);
7156 				m_copydata(ip6cp->ip6c_m,
7157 				    ip6cp->ip6c_off +
7158 				    sizeof(struct udphdr) +
7159 				    sizeof(struct sctphdr) +
7160 				    sizeof(struct sctp_chunkhdr),
7161 				    sizeof(uint32_t),
7162 				    (caddr_t)&initiate_tag);
7163 				if ((chunk_type != SCTP_INITIATION) ||
7164 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7165 					SCTP_TCB_UNLOCK(stcb);
7166 					return;
7167 				}
7168 			} else {
7169 				SCTP_TCB_UNLOCK(stcb);
7170 				return;
7171 			}
7172 		}
7173 		type = ip6cp->ip6c_icmp6->icmp6_type;
7174 		code = ip6cp->ip6c_icmp6->icmp6_code;
7175 		if ((type == ICMP6_DST_UNREACH) &&
7176 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7177 			type = ICMP6_PARAM_PROB;
7178 			code = ICMP6_PARAMPROB_NEXTHEADER;
7179 		}
7180 		sctp6_notify(inp, stcb, net, type, code,
7181 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7182 	} else {
7183 		if ((stcb == NULL) && (inp != NULL)) {
7184 			/* reduce inp's ref-count */
7185 			SCTP_INP_WLOCK(inp);
7186 			SCTP_INP_DECR_REF(inp);
7187 			SCTP_INP_WUNLOCK(inp);
7188 		}
7189 		if (stcb) {
7190 			SCTP_TCB_UNLOCK(stcb);
7191 		}
7192 	}
7193 }
7194 #endif
7195 
7196 void
7197 sctp_over_udp_stop(void)
7198 {
7199 	/*
7200 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7201 	 * for writting!
7202 	 */
7203 #ifdef INET
7204 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7205 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7206 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7207 	}
7208 #endif
7209 #ifdef INET6
7210 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7211 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7212 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7213 	}
7214 #endif
7215 }
7216 
7217 int
7218 sctp_over_udp_start(void)
7219 {
7220 	uint16_t port;
7221 	int ret;
7222 #ifdef INET
7223 	struct sockaddr_in sin;
7224 #endif
7225 #ifdef INET6
7226 	struct sockaddr_in6 sin6;
7227 #endif
7228 	/*
7229 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7230 	 * for writting!
7231 	 */
7232 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7233 	if (ntohs(port) == 0) {
7234 		/* Must have a port set */
7235 		return (EINVAL);
7236 	}
7237 #ifdef INET
7238 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7239 		/* Already running -- must stop first */
7240 		return (EALREADY);
7241 	}
7242 #endif
7243 #ifdef INET6
7244 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7245 		/* Already running -- must stop first */
7246 		return (EALREADY);
7247 	}
7248 #endif
7249 #ifdef INET
7250 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7251 	    SOCK_DGRAM, IPPROTO_UDP,
7252 	    curthread->td_ucred, curthread))) {
7253 		sctp_over_udp_stop();
7254 		return (ret);
7255 	}
7256 	/* Call the special UDP hook. */
7257 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7258 	    sctp_recv_udp_tunneled_packet,
7259 	    sctp_recv_icmp_tunneled_packet,
7260 	    NULL))) {
7261 		sctp_over_udp_stop();
7262 		return (ret);
7263 	}
7264 	/* Ok, we have a socket, bind it to the port. */
7265 	memset(&sin, 0, sizeof(struct sockaddr_in));
7266 	sin.sin_len = sizeof(struct sockaddr_in);
7267 	sin.sin_family = AF_INET;
7268 	sin.sin_port = htons(port);
7269 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7270 	    (struct sockaddr *)&sin, curthread))) {
7271 		sctp_over_udp_stop();
7272 		return (ret);
7273 	}
7274 #endif
7275 #ifdef INET6
7276 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7277 	    SOCK_DGRAM, IPPROTO_UDP,
7278 	    curthread->td_ucred, curthread))) {
7279 		sctp_over_udp_stop();
7280 		return (ret);
7281 	}
7282 	/* Call the special UDP hook. */
7283 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7284 	    sctp_recv_udp_tunneled_packet,
7285 	    sctp_recv_icmp6_tunneled_packet,
7286 	    NULL))) {
7287 		sctp_over_udp_stop();
7288 		return (ret);
7289 	}
7290 	/* Ok, we have a socket, bind it to the port. */
7291 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7292 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7293 	sin6.sin6_family = AF_INET6;
7294 	sin6.sin6_port = htons(port);
7295 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7296 	    (struct sockaddr *)&sin6, curthread))) {
7297 		sctp_over_udp_stop();
7298 		return (ret);
7299 	}
7300 #endif
7301 	return (0);
7302 }
7303 
7304 /*
7305  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7306  * If all arguments are zero, zero is returned.
7307  */
7308 uint32_t
7309 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7310 {
7311 	if (mtu1 > 0) {
7312 		if (mtu2 > 0) {
7313 			if (mtu3 > 0) {
7314 				return (min(mtu1, min(mtu2, mtu3)));
7315 			} else {
7316 				return (min(mtu1, mtu2));
7317 			}
7318 		} else {
7319 			if (mtu3 > 0) {
7320 				return (min(mtu1, mtu3));
7321 			} else {
7322 				return (mtu1);
7323 			}
7324 		}
7325 	} else {
7326 		if (mtu2 > 0) {
7327 			if (mtu3 > 0) {
7328 				return (min(mtu2, mtu3));
7329 			} else {
7330 				return (mtu2);
7331 			}
7332 		} else {
7333 			return (mtu3);
7334 		}
7335 	}
7336 }
7337 
7338 void
7339 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7340 {
7341 	struct in_conninfo inc;
7342 
7343 	memset(&inc, 0, sizeof(struct in_conninfo));
7344 	inc.inc_fibnum = fibnum;
7345 	switch (addr->sa.sa_family) {
7346 #ifdef INET
7347 	case AF_INET:
7348 		inc.inc_faddr = addr->sin.sin_addr;
7349 		break;
7350 #endif
7351 #ifdef INET6
7352 	case AF_INET6:
7353 		inc.inc_flags |= INC_ISIPV6;
7354 		inc.inc6_faddr = addr->sin6.sin6_addr;
7355 		break;
7356 #endif
7357 	default:
7358 		return;
7359 	}
7360 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7361 }
7362 
7363 uint32_t
7364 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7365 {
7366 	struct in_conninfo inc;
7367 
7368 	memset(&inc, 0, sizeof(struct in_conninfo));
7369 	inc.inc_fibnum = fibnum;
7370 	switch (addr->sa.sa_family) {
7371 #ifdef INET
7372 	case AF_INET:
7373 		inc.inc_faddr = addr->sin.sin_addr;
7374 		break;
7375 #endif
7376 #ifdef INET6
7377 	case AF_INET6:
7378 		inc.inc_flags |= INC_ISIPV6;
7379 		inc.inc6_faddr = addr->sin6.sin6_addr;
7380 		break;
7381 #endif
7382 	default:
7383 		return (0);
7384 	}
7385 	return ((uint32_t)tcp_hc_getmtu(&inc));
7386 }
7387 
7388 void
7389 sctp_set_state(struct sctp_tcb *stcb, int new_state)
7390 {
7391 #if defined(KDTRACE_HOOKS)
7392 	int old_state = stcb->asoc.state;
7393 #endif
7394 
7395 	KASSERT((new_state & ~SCTP_STATE_MASK) == 0,
7396 	    ("sctp_set_state: Can't set substate (new_state = %x)",
7397 	    new_state));
7398 	stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state;
7399 	if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) ||
7400 	    (new_state == SCTP_STATE_SHUTDOWN_SENT) ||
7401 	    (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7402 		SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
7403 	}
7404 #if defined(KDTRACE_HOOKS)
7405 	if (((old_state & SCTP_STATE_MASK) != new_state) &&
7406 	    !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) &&
7407 	    (new_state == SCTP_STATE_INUSE))) {
7408 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7409 	}
7410 #endif
7411 }
7412 
7413 void
7414 sctp_add_substate(struct sctp_tcb *stcb, int substate)
7415 {
7416 #if defined(KDTRACE_HOOKS)
7417 	int old_state = stcb->asoc.state;
7418 #endif
7419 
7420 	KASSERT((substate & SCTP_STATE_MASK) == 0,
7421 	    ("sctp_add_substate: Can't set state (substate = %x)",
7422 	    substate));
7423 	stcb->asoc.state |= substate;
7424 #if defined(KDTRACE_HOOKS)
7425 	if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) &&
7426 	    ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) ||
7427 	    ((substate & SCTP_STATE_SHUTDOWN_PENDING) &&
7428 	    ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) {
7429 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7430 	}
7431 #endif
7432 }
7433