xref: /freebsd/sys/netinet/sctputil.c (revision ee55186dfd98663c567b0dfdccf0c3c31282f37a)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #include <netinet6/sctp6_var.h>
45 #endif
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_output.h>
48 #include <netinet/sctp_uio.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_auth.h>
52 #include <netinet/sctp_asconf.h>
53 #include <netinet/sctp_bsd_addr.h>
54 #include <netinet/sctp_kdtrace.h>
55 #if defined(INET6) || defined(INET)
56 #include <netinet/tcp_var.h>
57 #endif
58 #include <netinet/udp.h>
59 #include <netinet/udp_var.h>
60 #include <sys/proc.h>
61 #ifdef INET6
62 #include <netinet/icmp6.h>
63 #endif
64 
65 
66 #ifndef KTR_SCTP
67 #define KTR_SCTP KTR_SUBSYS
68 #endif
69 
70 extern const struct sctp_cc_functions sctp_cc_functions[];
71 extern const struct sctp_ss_functions sctp_ss_functions[];
72 
73 void
74 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
75 {
76 #if defined(SCTP_LOCAL_TRACE_BUF)
77 	struct sctp_cwnd_log sctp_clog;
78 
79 	sctp_clog.x.sb.stcb = stcb;
80 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
81 	if (stcb)
82 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
83 	else
84 		sctp_clog.x.sb.stcb_sbcc = 0;
85 	sctp_clog.x.sb.incr = incr;
86 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
87 	    SCTP_LOG_EVENT_SB,
88 	    from,
89 	    sctp_clog.x.misc.log1,
90 	    sctp_clog.x.misc.log2,
91 	    sctp_clog.x.misc.log3,
92 	    sctp_clog.x.misc.log4);
93 #endif
94 }
95 
96 void
97 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
98 {
99 #if defined(SCTP_LOCAL_TRACE_BUF)
100 	struct sctp_cwnd_log sctp_clog;
101 
102 	sctp_clog.x.close.inp = (void *)inp;
103 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
104 	if (stcb) {
105 		sctp_clog.x.close.stcb = (void *)stcb;
106 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
107 	} else {
108 		sctp_clog.x.close.stcb = 0;
109 		sctp_clog.x.close.state = 0;
110 	}
111 	sctp_clog.x.close.loc = loc;
112 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
113 	    SCTP_LOG_EVENT_CLOSE,
114 	    0,
115 	    sctp_clog.x.misc.log1,
116 	    sctp_clog.x.misc.log2,
117 	    sctp_clog.x.misc.log3,
118 	    sctp_clog.x.misc.log4);
119 #endif
120 }
121 
122 void
123 rto_logging(struct sctp_nets *net, int from)
124 {
125 #if defined(SCTP_LOCAL_TRACE_BUF)
126 	struct sctp_cwnd_log sctp_clog;
127 
128 	memset(&sctp_clog, 0, sizeof(sctp_clog));
129 	sctp_clog.x.rto.net = (void *)net;
130 	sctp_clog.x.rto.rtt = net->rtt / 1000;
131 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
132 	    SCTP_LOG_EVENT_RTT,
133 	    from,
134 	    sctp_clog.x.misc.log1,
135 	    sctp_clog.x.misc.log2,
136 	    sctp_clog.x.misc.log3,
137 	    sctp_clog.x.misc.log4);
138 #endif
139 }
140 
141 void
142 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
143 {
144 #if defined(SCTP_LOCAL_TRACE_BUF)
145 	struct sctp_cwnd_log sctp_clog;
146 
147 	sctp_clog.x.strlog.stcb = stcb;
148 	sctp_clog.x.strlog.n_tsn = tsn;
149 	sctp_clog.x.strlog.n_sseq = sseq;
150 	sctp_clog.x.strlog.e_tsn = 0;
151 	sctp_clog.x.strlog.e_sseq = 0;
152 	sctp_clog.x.strlog.strm = stream;
153 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
154 	    SCTP_LOG_EVENT_STRM,
155 	    from,
156 	    sctp_clog.x.misc.log1,
157 	    sctp_clog.x.misc.log2,
158 	    sctp_clog.x.misc.log3,
159 	    sctp_clog.x.misc.log4);
160 #endif
161 }
162 
163 void
164 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
165 {
166 #if defined(SCTP_LOCAL_TRACE_BUF)
167 	struct sctp_cwnd_log sctp_clog;
168 
169 	sctp_clog.x.nagle.stcb = (void *)stcb;
170 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
171 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
172 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
173 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
174 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
175 	    SCTP_LOG_EVENT_NAGLE,
176 	    action,
177 	    sctp_clog.x.misc.log1,
178 	    sctp_clog.x.misc.log2,
179 	    sctp_clog.x.misc.log3,
180 	    sctp_clog.x.misc.log4);
181 #endif
182 }
183 
184 void
185 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
186 {
187 #if defined(SCTP_LOCAL_TRACE_BUF)
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	sctp_clog.x.sack.cumack = cumack;
191 	sctp_clog.x.sack.oldcumack = old_cumack;
192 	sctp_clog.x.sack.tsn = tsn;
193 	sctp_clog.x.sack.numGaps = gaps;
194 	sctp_clog.x.sack.numDups = dups;
195 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
196 	    SCTP_LOG_EVENT_SACK,
197 	    from,
198 	    sctp_clog.x.misc.log1,
199 	    sctp_clog.x.misc.log2,
200 	    sctp_clog.x.misc.log3,
201 	    sctp_clog.x.misc.log4);
202 #endif
203 }
204 
205 void
206 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
207 {
208 #if defined(SCTP_LOCAL_TRACE_BUF)
209 	struct sctp_cwnd_log sctp_clog;
210 
211 	memset(&sctp_clog, 0, sizeof(sctp_clog));
212 	sctp_clog.x.map.base = map;
213 	sctp_clog.x.map.cum = cum;
214 	sctp_clog.x.map.high = high;
215 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
216 	    SCTP_LOG_EVENT_MAP,
217 	    from,
218 	    sctp_clog.x.misc.log1,
219 	    sctp_clog.x.misc.log2,
220 	    sctp_clog.x.misc.log3,
221 	    sctp_clog.x.misc.log4);
222 #endif
223 }
224 
225 void
226 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
227 {
228 #if defined(SCTP_LOCAL_TRACE_BUF)
229 	struct sctp_cwnd_log sctp_clog;
230 
231 	memset(&sctp_clog, 0, sizeof(sctp_clog));
232 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
233 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
234 	sctp_clog.x.fr.tsn = tsn;
235 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
236 	    SCTP_LOG_EVENT_FR,
237 	    from,
238 	    sctp_clog.x.misc.log1,
239 	    sctp_clog.x.misc.log2,
240 	    sctp_clog.x.misc.log3,
241 	    sctp_clog.x.misc.log4);
242 #endif
243 }
244 
245 #ifdef SCTP_MBUF_LOGGING
246 void
247 sctp_log_mb(struct mbuf *m, int from)
248 {
249 #if defined(SCTP_LOCAL_TRACE_BUF)
250 	struct sctp_cwnd_log sctp_clog;
251 
252 	sctp_clog.x.mb.mp = m;
253 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
254 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
255 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
256 	if (SCTP_BUF_IS_EXTENDED(m)) {
257 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
258 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
259 	} else {
260 		sctp_clog.x.mb.ext = 0;
261 		sctp_clog.x.mb.refcnt = 0;
262 	}
263 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
264 	    SCTP_LOG_EVENT_MBUF,
265 	    from,
266 	    sctp_clog.x.misc.log1,
267 	    sctp_clog.x.misc.log2,
268 	    sctp_clog.x.misc.log3,
269 	    sctp_clog.x.misc.log4);
270 #endif
271 }
272 
273 void
274 sctp_log_mbc(struct mbuf *m, int from)
275 {
276 	struct mbuf *mat;
277 
278 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
279 		sctp_log_mb(mat, from);
280 	}
281 }
282 #endif
283 
284 void
285 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
286 {
287 #if defined(SCTP_LOCAL_TRACE_BUF)
288 	struct sctp_cwnd_log sctp_clog;
289 
290 	if (control == NULL) {
291 		SCTP_PRINTF("Gak log of NULL?\n");
292 		return;
293 	}
294 	sctp_clog.x.strlog.stcb = control->stcb;
295 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
296 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
297 	sctp_clog.x.strlog.strm = control->sinfo_stream;
298 	if (poschk != NULL) {
299 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
300 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
301 	} else {
302 		sctp_clog.x.strlog.e_tsn = 0;
303 		sctp_clog.x.strlog.e_sseq = 0;
304 	}
305 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
306 	    SCTP_LOG_EVENT_STRM,
307 	    from,
308 	    sctp_clog.x.misc.log1,
309 	    sctp_clog.x.misc.log2,
310 	    sctp_clog.x.misc.log3,
311 	    sctp_clog.x.misc.log4);
312 #endif
313 }
314 
315 void
316 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
317 {
318 #if defined(SCTP_LOCAL_TRACE_BUF)
319 	struct sctp_cwnd_log sctp_clog;
320 
321 	sctp_clog.x.cwnd.net = net;
322 	if (stcb->asoc.send_queue_cnt > 255)
323 		sctp_clog.x.cwnd.cnt_in_send = 255;
324 	else
325 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
326 	if (stcb->asoc.stream_queue_cnt > 255)
327 		sctp_clog.x.cwnd.cnt_in_str = 255;
328 	else
329 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
330 
331 	if (net) {
332 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
333 		sctp_clog.x.cwnd.inflight = net->flight_size;
334 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
335 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
336 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
337 	}
338 	if (SCTP_CWNDLOG_PRESEND == from) {
339 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
340 	}
341 	sctp_clog.x.cwnd.cwnd_augment = augment;
342 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
343 	    SCTP_LOG_EVENT_CWND,
344 	    from,
345 	    sctp_clog.x.misc.log1,
346 	    sctp_clog.x.misc.log2,
347 	    sctp_clog.x.misc.log3,
348 	    sctp_clog.x.misc.log4);
349 #endif
350 }
351 
352 void
353 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
354 {
355 #if defined(SCTP_LOCAL_TRACE_BUF)
356 	struct sctp_cwnd_log sctp_clog;
357 
358 	memset(&sctp_clog, 0, sizeof(sctp_clog));
359 	if (inp) {
360 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
361 
362 	} else {
363 		sctp_clog.x.lock.sock = (void *)NULL;
364 	}
365 	sctp_clog.x.lock.inp = (void *)inp;
366 	if (stcb) {
367 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
368 	} else {
369 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
370 	}
371 	if (inp) {
372 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
373 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
374 	} else {
375 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
376 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
377 	}
378 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
379 	if (inp && (inp->sctp_socket)) {
380 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
381 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
382 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
383 	} else {
384 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
385 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
386 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
387 	}
388 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
389 	    SCTP_LOG_LOCK_EVENT,
390 	    from,
391 	    sctp_clog.x.misc.log1,
392 	    sctp_clog.x.misc.log2,
393 	    sctp_clog.x.misc.log3,
394 	    sctp_clog.x.misc.log4);
395 #endif
396 }
397 
398 void
399 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
400 {
401 #if defined(SCTP_LOCAL_TRACE_BUF)
402 	struct sctp_cwnd_log sctp_clog;
403 
404 	memset(&sctp_clog, 0, sizeof(sctp_clog));
405 	sctp_clog.x.cwnd.net = net;
406 	sctp_clog.x.cwnd.cwnd_new_value = error;
407 	sctp_clog.x.cwnd.inflight = net->flight_size;
408 	sctp_clog.x.cwnd.cwnd_augment = burst;
409 	if (stcb->asoc.send_queue_cnt > 255)
410 		sctp_clog.x.cwnd.cnt_in_send = 255;
411 	else
412 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
413 	if (stcb->asoc.stream_queue_cnt > 255)
414 		sctp_clog.x.cwnd.cnt_in_str = 255;
415 	else
416 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
417 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
418 	    SCTP_LOG_EVENT_MAXBURST,
419 	    from,
420 	    sctp_clog.x.misc.log1,
421 	    sctp_clog.x.misc.log2,
422 	    sctp_clog.x.misc.log3,
423 	    sctp_clog.x.misc.log4);
424 #endif
425 }
426 
427 void
428 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
429 {
430 #if defined(SCTP_LOCAL_TRACE_BUF)
431 	struct sctp_cwnd_log sctp_clog;
432 
433 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
434 	sctp_clog.x.rwnd.send_size = snd_size;
435 	sctp_clog.x.rwnd.overhead = overhead;
436 	sctp_clog.x.rwnd.new_rwnd = 0;
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_EVENT_RWND,
439 	    from,
440 	    sctp_clog.x.misc.log1,
441 	    sctp_clog.x.misc.log2,
442 	    sctp_clog.x.misc.log3,
443 	    sctp_clog.x.misc.log4);
444 #endif
445 }
446 
447 void
448 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
449 {
450 #if defined(SCTP_LOCAL_TRACE_BUF)
451 	struct sctp_cwnd_log sctp_clog;
452 
453 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
454 	sctp_clog.x.rwnd.send_size = flight_size;
455 	sctp_clog.x.rwnd.overhead = overhead;
456 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
457 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
458 	    SCTP_LOG_EVENT_RWND,
459 	    from,
460 	    sctp_clog.x.misc.log1,
461 	    sctp_clog.x.misc.log2,
462 	    sctp_clog.x.misc.log3,
463 	    sctp_clog.x.misc.log4);
464 #endif
465 }
466 
467 #ifdef SCTP_MBCNT_LOGGING
468 static void
469 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
470 {
471 #if defined(SCTP_LOCAL_TRACE_BUF)
472 	struct sctp_cwnd_log sctp_clog;
473 
474 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
475 	sctp_clog.x.mbcnt.size_change = book;
476 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
477 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
478 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
479 	    SCTP_LOG_EVENT_MBCNT,
480 	    from,
481 	    sctp_clog.x.misc.log1,
482 	    sctp_clog.x.misc.log2,
483 	    sctp_clog.x.misc.log3,
484 	    sctp_clog.x.misc.log4);
485 #endif
486 }
487 #endif
488 
489 void
490 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
491 {
492 #if defined(SCTP_LOCAL_TRACE_BUF)
493 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
494 	    SCTP_LOG_MISC_EVENT,
495 	    from,
496 	    a, b, c, d);
497 #endif
498 }
499 
500 void
501 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
502 {
503 #if defined(SCTP_LOCAL_TRACE_BUF)
504 	struct sctp_cwnd_log sctp_clog;
505 
506 	sctp_clog.x.wake.stcb = (void *)stcb;
507 	sctp_clog.x.wake.wake_cnt = wake_cnt;
508 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
509 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
510 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
511 
512 	if (stcb->asoc.stream_queue_cnt < 0xff)
513 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
514 	else
515 		sctp_clog.x.wake.stream_qcnt = 0xff;
516 
517 	if (stcb->asoc.chunks_on_out_queue < 0xff)
518 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
519 	else
520 		sctp_clog.x.wake.chunks_on_oque = 0xff;
521 
522 	sctp_clog.x.wake.sctpflags = 0;
523 	/* set in the defered mode stuff */
524 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
525 		sctp_clog.x.wake.sctpflags |= 1;
526 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
527 		sctp_clog.x.wake.sctpflags |= 2;
528 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
529 		sctp_clog.x.wake.sctpflags |= 4;
530 	/* what about the sb */
531 	if (stcb->sctp_socket) {
532 		struct socket *so = stcb->sctp_socket;
533 
534 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
535 	} else {
536 		sctp_clog.x.wake.sbflags = 0xff;
537 	}
538 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
539 	    SCTP_LOG_EVENT_WAKE,
540 	    from,
541 	    sctp_clog.x.misc.log1,
542 	    sctp_clog.x.misc.log2,
543 	    sctp_clog.x.misc.log3,
544 	    sctp_clog.x.misc.log4);
545 #endif
546 }
547 
548 void
549 sctp_log_block(uint8_t from, struct sctp_association *asoc, ssize_t sendlen)
550 {
551 #if defined(SCTP_LOCAL_TRACE_BUF)
552 	struct sctp_cwnd_log sctp_clog;
553 
554 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
555 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
556 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
557 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
558 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
559 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
560 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
561 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
562 	    SCTP_LOG_EVENT_BLOCK,
563 	    from,
564 	    sctp_clog.x.misc.log1,
565 	    sctp_clog.x.misc.log2,
566 	    sctp_clog.x.misc.log3,
567 	    sctp_clog.x.misc.log4);
568 #endif
569 }
570 
571 int
572 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
573 {
574 	/* May need to fix this if ktrdump does not work */
575 	return (0);
576 }
577 
578 #ifdef SCTP_AUDITING_ENABLED
579 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
580 static int sctp_audit_indx = 0;
581 
582 static
583 void
584 sctp_print_audit_report(void)
585 {
586 	int i;
587 	int cnt;
588 
589 	cnt = 0;
590 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
591 		if ((sctp_audit_data[i][0] == 0xe0) &&
592 		    (sctp_audit_data[i][1] == 0x01)) {
593 			cnt = 0;
594 			SCTP_PRINTF("\n");
595 		} else if (sctp_audit_data[i][0] == 0xf0) {
596 			cnt = 0;
597 			SCTP_PRINTF("\n");
598 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
599 		    (sctp_audit_data[i][1] == 0x01)) {
600 			SCTP_PRINTF("\n");
601 			cnt = 0;
602 		}
603 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
604 		    (uint32_t)sctp_audit_data[i][1]);
605 		cnt++;
606 		if ((cnt % 14) == 0)
607 			SCTP_PRINTF("\n");
608 	}
609 	for (i = 0; i < sctp_audit_indx; i++) {
610 		if ((sctp_audit_data[i][0] == 0xe0) &&
611 		    (sctp_audit_data[i][1] == 0x01)) {
612 			cnt = 0;
613 			SCTP_PRINTF("\n");
614 		} else if (sctp_audit_data[i][0] == 0xf0) {
615 			cnt = 0;
616 			SCTP_PRINTF("\n");
617 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
618 		    (sctp_audit_data[i][1] == 0x01)) {
619 			SCTP_PRINTF("\n");
620 			cnt = 0;
621 		}
622 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
623 		    (uint32_t)sctp_audit_data[i][1]);
624 		cnt++;
625 		if ((cnt % 14) == 0)
626 			SCTP_PRINTF("\n");
627 	}
628 	SCTP_PRINTF("\n");
629 }
630 
631 void
632 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
633     struct sctp_nets *net)
634 {
635 	int resend_cnt, tot_out, rep, tot_book_cnt;
636 	struct sctp_nets *lnet;
637 	struct sctp_tmit_chunk *chk;
638 
639 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
640 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
641 	sctp_audit_indx++;
642 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
643 		sctp_audit_indx = 0;
644 	}
645 	if (inp == NULL) {
646 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
647 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
648 		sctp_audit_indx++;
649 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
650 			sctp_audit_indx = 0;
651 		}
652 		return;
653 	}
654 	if (stcb == NULL) {
655 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
656 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
657 		sctp_audit_indx++;
658 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
659 			sctp_audit_indx = 0;
660 		}
661 		return;
662 	}
663 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
664 	sctp_audit_data[sctp_audit_indx][1] =
665 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
666 	sctp_audit_indx++;
667 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
668 		sctp_audit_indx = 0;
669 	}
670 	rep = 0;
671 	tot_book_cnt = 0;
672 	resend_cnt = tot_out = 0;
673 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
674 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
675 			resend_cnt++;
676 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
677 			tot_out += chk->book_size;
678 			tot_book_cnt++;
679 		}
680 	}
681 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
682 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
683 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
684 		sctp_audit_indx++;
685 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
686 			sctp_audit_indx = 0;
687 		}
688 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
689 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
690 		rep = 1;
691 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
692 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
693 		sctp_audit_data[sctp_audit_indx][1] =
694 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
695 		sctp_audit_indx++;
696 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
697 			sctp_audit_indx = 0;
698 		}
699 	}
700 	if (tot_out != stcb->asoc.total_flight) {
701 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
702 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
703 		sctp_audit_indx++;
704 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
705 			sctp_audit_indx = 0;
706 		}
707 		rep = 1;
708 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
709 		    (int)stcb->asoc.total_flight);
710 		stcb->asoc.total_flight = tot_out;
711 	}
712 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
713 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
714 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
715 		sctp_audit_indx++;
716 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
717 			sctp_audit_indx = 0;
718 		}
719 		rep = 1;
720 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
721 
722 		stcb->asoc.total_flight_count = tot_book_cnt;
723 	}
724 	tot_out = 0;
725 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
726 		tot_out += lnet->flight_size;
727 	}
728 	if (tot_out != stcb->asoc.total_flight) {
729 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
730 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
731 		sctp_audit_indx++;
732 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
733 			sctp_audit_indx = 0;
734 		}
735 		rep = 1;
736 		SCTP_PRINTF("real flight:%d net total was %d\n",
737 		    stcb->asoc.total_flight, tot_out);
738 		/* now corrective action */
739 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
740 
741 			tot_out = 0;
742 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
743 				if ((chk->whoTo == lnet) &&
744 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
745 					tot_out += chk->book_size;
746 				}
747 			}
748 			if (lnet->flight_size != tot_out) {
749 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
750 				    (void *)lnet, lnet->flight_size,
751 				    tot_out);
752 				lnet->flight_size = tot_out;
753 			}
754 		}
755 	}
756 	if (rep) {
757 		sctp_print_audit_report();
758 	}
759 }
760 
761 void
762 sctp_audit_log(uint8_t ev, uint8_t fd)
763 {
764 
765 	sctp_audit_data[sctp_audit_indx][0] = ev;
766 	sctp_audit_data[sctp_audit_indx][1] = fd;
767 	sctp_audit_indx++;
768 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
769 		sctp_audit_indx = 0;
770 	}
771 }
772 
773 #endif
774 
775 /*
776  * sctp_stop_timers_for_shutdown() should be called
777  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
778  * state to make sure that all timers are stopped.
779  */
780 void
781 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
782 {
783 	struct sctp_inpcb *inp;
784 	struct sctp_nets *net;
785 
786 	inp = stcb->sctp_ep;
787 
788 	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
789 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_12);
790 	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
791 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_13);
792 	sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
793 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_14);
794 	sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
795 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_15);
796 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
797 		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
798 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_16);
799 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
800 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_17);
801 	}
802 }
803 
804 void
805 sctp_stop_association_timers(struct sctp_tcb *stcb, bool stop_assoc_kill_timer)
806 {
807 	struct sctp_inpcb *inp;
808 	struct sctp_nets *net;
809 
810 	inp = stcb->sctp_ep;
811 	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, inp, stcb, NULL,
812 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_18);
813 	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, inp, stcb, NULL,
814 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_19);
815 	if (stop_assoc_kill_timer) {
816 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
817 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_20);
818 	}
819 	sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, inp, stcb, NULL,
820 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_21);
821 	sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL,
822 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_22);
823 	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNGUARD, inp, stcb, NULL,
824 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_23);
825 	/* Mobility adaptation */
826 	sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, inp, stcb, NULL,
827 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_24);
828 	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
829 		sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
830 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_25);
831 		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net,
832 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_26);
833 		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, net,
834 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_27);
835 		sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net,
836 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_28);
837 		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, net,
838 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_29);
839 		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
840 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_30);
841 		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
842 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_31);
843 	}
844 }
845 
846 /*
847  * A list of sizes based on typical mtu's, used only if next hop size not
848  * returned. These values MUST be multiples of 4 and MUST be ordered.
849  */
850 static uint32_t sctp_mtu_sizes[] = {
851 	68,
852 	296,
853 	508,
854 	512,
855 	544,
856 	576,
857 	1004,
858 	1492,
859 	1500,
860 	1536,
861 	2000,
862 	2048,
863 	4352,
864 	4464,
865 	8168,
866 	17912,
867 	32000,
868 	65532
869 };
870 
871 /*
872  * Return the largest MTU in sctp_mtu_sizes smaller than val.
873  * If val is smaller than the minimum, just return the largest
874  * multiple of 4 smaller or equal to val.
875  * Ensure that the result is a multiple of 4.
876  */
877 uint32_t
878 sctp_get_prev_mtu(uint32_t val)
879 {
880 	uint32_t i;
881 
882 	val &= 0xfffffffc;
883 	if (val <= sctp_mtu_sizes[0]) {
884 		return (val);
885 	}
886 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
887 		if (val <= sctp_mtu_sizes[i]) {
888 			break;
889 		}
890 	}
891 	KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0,
892 	    ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1));
893 	return (sctp_mtu_sizes[i - 1]);
894 }
895 
896 /*
897  * Return the smallest MTU in sctp_mtu_sizes larger than val.
898  * If val is larger than the maximum, just return the largest multiple of 4 smaller
899  * or equal to val.
900  * Ensure that the result is a multiple of 4.
901  */
902 uint32_t
903 sctp_get_next_mtu(uint32_t val)
904 {
905 	/* select another MTU that is just bigger than this one */
906 	uint32_t i;
907 
908 	val &= 0xfffffffc;
909 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
910 		if (val < sctp_mtu_sizes[i]) {
911 			KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0,
912 			    ("sctp_mtu_sizes[%u] not a multiple of 4", i));
913 			return (sctp_mtu_sizes[i]);
914 		}
915 	}
916 	return (val);
917 }
918 
919 void
920 sctp_fill_random_store(struct sctp_pcb *m)
921 {
922 	/*
923 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
924 	 * our counter. The result becomes our good random numbers and we
925 	 * then setup to give these out. Note that we do no locking to
926 	 * protect this. This is ok, since if competing folks call this we
927 	 * will get more gobbled gook in the random store which is what we
928 	 * want. There is a danger that two guys will use the same random
929 	 * numbers, but thats ok too since that is random as well :->
930 	 */
931 	m->store_at = 0;
932 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
933 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
934 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
935 	m->random_counter++;
936 }
937 
938 uint32_t
939 sctp_select_initial_TSN(struct sctp_pcb *inp)
940 {
941 	/*
942 	 * A true implementation should use random selection process to get
943 	 * the initial stream sequence number, using RFC1750 as a good
944 	 * guideline
945 	 */
946 	uint32_t x, *xp;
947 	uint8_t *p;
948 	int store_at, new_store;
949 
950 	if (inp->initial_sequence_debug != 0) {
951 		uint32_t ret;
952 
953 		ret = inp->initial_sequence_debug;
954 		inp->initial_sequence_debug++;
955 		return (ret);
956 	}
957 retry:
958 	store_at = inp->store_at;
959 	new_store = store_at + sizeof(uint32_t);
960 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
961 		new_store = 0;
962 	}
963 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
964 		goto retry;
965 	}
966 	if (new_store == 0) {
967 		/* Refill the random store */
968 		sctp_fill_random_store(inp);
969 	}
970 	p = &inp->random_store[store_at];
971 	xp = (uint32_t *)p;
972 	x = *xp;
973 	return (x);
974 }
975 
976 uint32_t
977 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
978 {
979 	uint32_t x;
980 	struct timeval now;
981 
982 	if (check) {
983 		(void)SCTP_GETTIME_TIMEVAL(&now);
984 	}
985 	for (;;) {
986 		x = sctp_select_initial_TSN(&inp->sctp_ep);
987 		if (x == 0) {
988 			/* we never use 0 */
989 			continue;
990 		}
991 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
992 			break;
993 		}
994 	}
995 	return (x);
996 }
997 
998 int32_t
999 sctp_map_assoc_state(int kernel_state)
1000 {
1001 	int32_t user_state;
1002 
1003 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
1004 		user_state = SCTP_CLOSED;
1005 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
1006 		user_state = SCTP_SHUTDOWN_PENDING;
1007 	} else {
1008 		switch (kernel_state & SCTP_STATE_MASK) {
1009 		case SCTP_STATE_EMPTY:
1010 			user_state = SCTP_CLOSED;
1011 			break;
1012 		case SCTP_STATE_INUSE:
1013 			user_state = SCTP_CLOSED;
1014 			break;
1015 		case SCTP_STATE_COOKIE_WAIT:
1016 			user_state = SCTP_COOKIE_WAIT;
1017 			break;
1018 		case SCTP_STATE_COOKIE_ECHOED:
1019 			user_state = SCTP_COOKIE_ECHOED;
1020 			break;
1021 		case SCTP_STATE_OPEN:
1022 			user_state = SCTP_ESTABLISHED;
1023 			break;
1024 		case SCTP_STATE_SHUTDOWN_SENT:
1025 			user_state = SCTP_SHUTDOWN_SENT;
1026 			break;
1027 		case SCTP_STATE_SHUTDOWN_RECEIVED:
1028 			user_state = SCTP_SHUTDOWN_RECEIVED;
1029 			break;
1030 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
1031 			user_state = SCTP_SHUTDOWN_ACK_SENT;
1032 			break;
1033 		default:
1034 			user_state = SCTP_CLOSED;
1035 			break;
1036 		}
1037 	}
1038 	return (user_state);
1039 }
1040 
1041 int
1042 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1043     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
1044 {
1045 	struct sctp_association *asoc;
1046 
1047 	/*
1048 	 * Anything set to zero is taken care of by the allocation routine's
1049 	 * bzero
1050 	 */
1051 
1052 	/*
1053 	 * Up front select what scoping to apply on addresses I tell my peer
1054 	 * Not sure what to do with these right now, we will need to come up
1055 	 * with a way to set them. We may need to pass them through from the
1056 	 * caller in the sctp_aloc_assoc() function.
1057 	 */
1058 	int i;
1059 #if defined(SCTP_DETAILED_STR_STATS)
1060 	int j;
1061 #endif
1062 
1063 	asoc = &stcb->asoc;
1064 	/* init all variables to a known value. */
1065 	SCTP_SET_STATE(stcb, SCTP_STATE_INUSE);
1066 	asoc->max_burst = inp->sctp_ep.max_burst;
1067 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
1068 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
1069 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
1070 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
1071 	asoc->ecn_supported = inp->ecn_supported;
1072 	asoc->prsctp_supported = inp->prsctp_supported;
1073 	asoc->idata_supported = inp->idata_supported;
1074 	asoc->auth_supported = inp->auth_supported;
1075 	asoc->asconf_supported = inp->asconf_supported;
1076 	asoc->reconfig_supported = inp->reconfig_supported;
1077 	asoc->nrsack_supported = inp->nrsack_supported;
1078 	asoc->pktdrop_supported = inp->pktdrop_supported;
1079 	asoc->idata_supported = inp->idata_supported;
1080 	asoc->sctp_cmt_pf = (uint8_t)0;
1081 	asoc->sctp_frag_point = inp->sctp_frag_point;
1082 	asoc->sctp_features = inp->sctp_features;
1083 	asoc->default_dscp = inp->sctp_ep.default_dscp;
1084 	asoc->max_cwnd = inp->max_cwnd;
1085 #ifdef INET6
1086 	if (inp->sctp_ep.default_flowlabel) {
1087 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
1088 	} else {
1089 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
1090 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
1091 			asoc->default_flowlabel &= 0x000fffff;
1092 			asoc->default_flowlabel |= 0x80000000;
1093 		} else {
1094 			asoc->default_flowlabel = 0;
1095 		}
1096 	}
1097 #endif
1098 	asoc->sb_send_resv = 0;
1099 	if (override_tag) {
1100 		asoc->my_vtag = override_tag;
1101 	} else {
1102 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1103 	}
1104 	/* Get the nonce tags */
1105 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1106 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1107 	asoc->vrf_id = vrf_id;
1108 
1109 #ifdef SCTP_ASOCLOG_OF_TSNS
1110 	asoc->tsn_in_at = 0;
1111 	asoc->tsn_out_at = 0;
1112 	asoc->tsn_in_wrapped = 0;
1113 	asoc->tsn_out_wrapped = 0;
1114 	asoc->cumack_log_at = 0;
1115 	asoc->cumack_log_atsnt = 0;
1116 #endif
1117 #ifdef SCTP_FS_SPEC_LOG
1118 	asoc->fs_index = 0;
1119 #endif
1120 	asoc->refcnt = 0;
1121 	asoc->assoc_up_sent = 0;
1122 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1123 	    sctp_select_initial_TSN(&inp->sctp_ep);
1124 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1125 	/* we are optimisitic here */
1126 	asoc->peer_supports_nat = 0;
1127 	asoc->sent_queue_retran_cnt = 0;
1128 
1129 	/* for CMT */
1130 	asoc->last_net_cmt_send_started = NULL;
1131 
1132 	/* This will need to be adjusted */
1133 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1134 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1135 	asoc->asconf_seq_in = asoc->last_acked_seq;
1136 
1137 	/* here we are different, we hold the next one we expect */
1138 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1139 
1140 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1141 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1142 
1143 	asoc->default_mtu = inp->sctp_ep.default_mtu;
1144 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1145 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1146 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1147 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1148 	asoc->free_chunk_cnt = 0;
1149 
1150 	asoc->iam_blocking = 0;
1151 	asoc->context = inp->sctp_context;
1152 	asoc->local_strreset_support = inp->local_strreset_support;
1153 	asoc->def_send = inp->def_send;
1154 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1155 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1156 	asoc->pr_sctp_cnt = 0;
1157 	asoc->total_output_queue_size = 0;
1158 
1159 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1160 		asoc->scope.ipv6_addr_legal = 1;
1161 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1162 			asoc->scope.ipv4_addr_legal = 1;
1163 		} else {
1164 			asoc->scope.ipv4_addr_legal = 0;
1165 		}
1166 	} else {
1167 		asoc->scope.ipv6_addr_legal = 0;
1168 		asoc->scope.ipv4_addr_legal = 1;
1169 	}
1170 
1171 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1172 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1173 
1174 	asoc->smallest_mtu = inp->sctp_frag_point;
1175 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1176 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1177 
1178 	asoc->stream_locked_on = 0;
1179 	asoc->ecn_echo_cnt_onq = 0;
1180 	asoc->stream_locked = 0;
1181 
1182 	asoc->send_sack = 1;
1183 
1184 	LIST_INIT(&asoc->sctp_restricted_addrs);
1185 
1186 	TAILQ_INIT(&asoc->nets);
1187 	TAILQ_INIT(&asoc->pending_reply_queue);
1188 	TAILQ_INIT(&asoc->asconf_ack_sent);
1189 	/* Setup to fill the hb random cache at first HB */
1190 	asoc->hb_random_idx = 4;
1191 
1192 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1193 
1194 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1195 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1196 
1197 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1198 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1199 
1200 	/*
1201 	 * Now the stream parameters, here we allocate space for all streams
1202 	 * that we request by default.
1203 	 */
1204 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1205 	    o_strms;
1206 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1207 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1208 	    SCTP_M_STRMO);
1209 	if (asoc->strmout == NULL) {
1210 		/* big trouble no memory */
1211 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1212 		return (ENOMEM);
1213 	}
1214 	for (i = 0; i < asoc->streamoutcnt; i++) {
1215 		/*
1216 		 * inbound side must be set to 0xffff, also NOTE when we get
1217 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1218 		 * count (streamoutcnt) but first check if we sent to any of
1219 		 * the upper streams that were dropped (if some were). Those
1220 		 * that were dropped must be notified to the upper layer as
1221 		 * failed to send.
1222 		 */
1223 		asoc->strmout[i].next_mid_ordered = 0;
1224 		asoc->strmout[i].next_mid_unordered = 0;
1225 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1226 		asoc->strmout[i].chunks_on_queues = 0;
1227 #if defined(SCTP_DETAILED_STR_STATS)
1228 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1229 			asoc->strmout[i].abandoned_sent[j] = 0;
1230 			asoc->strmout[i].abandoned_unsent[j] = 0;
1231 		}
1232 #else
1233 		asoc->strmout[i].abandoned_sent[0] = 0;
1234 		asoc->strmout[i].abandoned_unsent[0] = 0;
1235 #endif
1236 		asoc->strmout[i].sid = i;
1237 		asoc->strmout[i].last_msg_incomplete = 0;
1238 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1239 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1240 	}
1241 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1242 
1243 	/* Now the mapping array */
1244 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1245 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1246 	    SCTP_M_MAP);
1247 	if (asoc->mapping_array == NULL) {
1248 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1249 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1250 		return (ENOMEM);
1251 	}
1252 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1253 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1254 	    SCTP_M_MAP);
1255 	if (asoc->nr_mapping_array == NULL) {
1256 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1257 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1258 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1259 		return (ENOMEM);
1260 	}
1261 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1262 
1263 	/* Now the init of the other outqueues */
1264 	TAILQ_INIT(&asoc->free_chunks);
1265 	TAILQ_INIT(&asoc->control_send_queue);
1266 	TAILQ_INIT(&asoc->asconf_send_queue);
1267 	TAILQ_INIT(&asoc->send_queue);
1268 	TAILQ_INIT(&asoc->sent_queue);
1269 	TAILQ_INIT(&asoc->resetHead);
1270 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1271 	TAILQ_INIT(&asoc->asconf_queue);
1272 	/* authentication fields */
1273 	asoc->authinfo.random = NULL;
1274 	asoc->authinfo.active_keyid = 0;
1275 	asoc->authinfo.assoc_key = NULL;
1276 	asoc->authinfo.assoc_keyid = 0;
1277 	asoc->authinfo.recv_key = NULL;
1278 	asoc->authinfo.recv_keyid = 0;
1279 	LIST_INIT(&asoc->shared_keys);
1280 	asoc->marked_retrans = 0;
1281 	asoc->port = inp->sctp_ep.port;
1282 	asoc->timoinit = 0;
1283 	asoc->timodata = 0;
1284 	asoc->timosack = 0;
1285 	asoc->timoshutdown = 0;
1286 	asoc->timoheartbeat = 0;
1287 	asoc->timocookie = 0;
1288 	asoc->timoshutdownack = 0;
1289 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1290 	asoc->discontinuity_time = asoc->start_time;
1291 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1292 		asoc->abandoned_unsent[i] = 0;
1293 		asoc->abandoned_sent[i] = 0;
1294 	}
1295 	/*
1296 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1297 	 * freed later when the association is freed.
1298 	 */
1299 	return (0);
1300 }
1301 
1302 void
1303 sctp_print_mapping_array(struct sctp_association *asoc)
1304 {
1305 	unsigned int i, limit;
1306 
1307 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1308 	    asoc->mapping_array_size,
1309 	    asoc->mapping_array_base_tsn,
1310 	    asoc->cumulative_tsn,
1311 	    asoc->highest_tsn_inside_map,
1312 	    asoc->highest_tsn_inside_nr_map);
1313 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1314 		if (asoc->mapping_array[limit - 1] != 0) {
1315 			break;
1316 		}
1317 	}
1318 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1319 	for (i = 0; i < limit; i++) {
1320 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1321 	}
1322 	if (limit % 16)
1323 		SCTP_PRINTF("\n");
1324 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1325 		if (asoc->nr_mapping_array[limit - 1]) {
1326 			break;
1327 		}
1328 	}
1329 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1330 	for (i = 0; i < limit; i++) {
1331 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1332 	}
1333 	if (limit % 16)
1334 		SCTP_PRINTF("\n");
1335 }
1336 
1337 int
1338 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1339 {
1340 	/* mapping array needs to grow */
1341 	uint8_t *new_array1, *new_array2;
1342 	uint32_t new_size;
1343 
1344 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1345 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1346 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1347 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1348 		/* can't get more, forget it */
1349 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1350 		if (new_array1) {
1351 			SCTP_FREE(new_array1, SCTP_M_MAP);
1352 		}
1353 		if (new_array2) {
1354 			SCTP_FREE(new_array2, SCTP_M_MAP);
1355 		}
1356 		return (-1);
1357 	}
1358 	memset(new_array1, 0, new_size);
1359 	memset(new_array2, 0, new_size);
1360 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1361 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1362 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1363 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1364 	asoc->mapping_array = new_array1;
1365 	asoc->nr_mapping_array = new_array2;
1366 	asoc->mapping_array_size = new_size;
1367 	return (0);
1368 }
1369 
1370 
1371 static void
1372 sctp_iterator_work(struct sctp_iterator *it)
1373 {
1374 	struct epoch_tracker et;
1375 	struct sctp_inpcb *tinp;
1376 	int iteration_count = 0;
1377 	int inp_skip = 0;
1378 	int first_in = 1;
1379 
1380 	NET_EPOCH_ENTER(et);
1381 	SCTP_INP_INFO_RLOCK();
1382 	SCTP_ITERATOR_LOCK();
1383 	sctp_it_ctl.cur_it = it;
1384 	if (it->inp) {
1385 		SCTP_INP_RLOCK(it->inp);
1386 		SCTP_INP_DECR_REF(it->inp);
1387 	}
1388 	if (it->inp == NULL) {
1389 		/* iterator is complete */
1390 done_with_iterator:
1391 		sctp_it_ctl.cur_it = NULL;
1392 		SCTP_ITERATOR_UNLOCK();
1393 		SCTP_INP_INFO_RUNLOCK();
1394 		if (it->function_atend != NULL) {
1395 			(*it->function_atend) (it->pointer, it->val);
1396 		}
1397 		SCTP_FREE(it, SCTP_M_ITER);
1398 		NET_EPOCH_EXIT(et);
1399 		return;
1400 	}
1401 select_a_new_ep:
1402 	if (first_in) {
1403 		first_in = 0;
1404 	} else {
1405 		SCTP_INP_RLOCK(it->inp);
1406 	}
1407 	while (((it->pcb_flags) &&
1408 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1409 	    ((it->pcb_features) &&
1410 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1411 		/* endpoint flags or features don't match, so keep looking */
1412 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1413 			SCTP_INP_RUNLOCK(it->inp);
1414 			goto done_with_iterator;
1415 		}
1416 		tinp = it->inp;
1417 		it->inp = LIST_NEXT(it->inp, sctp_list);
1418 		SCTP_INP_RUNLOCK(tinp);
1419 		if (it->inp == NULL) {
1420 			goto done_with_iterator;
1421 		}
1422 		SCTP_INP_RLOCK(it->inp);
1423 	}
1424 	/* now go through each assoc which is in the desired state */
1425 	if (it->done_current_ep == 0) {
1426 		if (it->function_inp != NULL)
1427 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1428 		it->done_current_ep = 1;
1429 	}
1430 	if (it->stcb == NULL) {
1431 		/* run the per instance function */
1432 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1433 	}
1434 	if ((inp_skip) || it->stcb == NULL) {
1435 		if (it->function_inp_end != NULL) {
1436 			inp_skip = (*it->function_inp_end) (it->inp,
1437 			    it->pointer,
1438 			    it->val);
1439 		}
1440 		SCTP_INP_RUNLOCK(it->inp);
1441 		goto no_stcb;
1442 	}
1443 	while (it->stcb) {
1444 		SCTP_TCB_LOCK(it->stcb);
1445 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1446 			/* not in the right state... keep looking */
1447 			SCTP_TCB_UNLOCK(it->stcb);
1448 			goto next_assoc;
1449 		}
1450 		/* see if we have limited out the iterator loop */
1451 		iteration_count++;
1452 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1453 			/* Pause to let others grab the lock */
1454 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1455 			SCTP_TCB_UNLOCK(it->stcb);
1456 			SCTP_INP_INCR_REF(it->inp);
1457 			SCTP_INP_RUNLOCK(it->inp);
1458 			SCTP_ITERATOR_UNLOCK();
1459 			SCTP_INP_INFO_RUNLOCK();
1460 			SCTP_INP_INFO_RLOCK();
1461 			SCTP_ITERATOR_LOCK();
1462 			if (sctp_it_ctl.iterator_flags) {
1463 				/* We won't be staying here */
1464 				SCTP_INP_DECR_REF(it->inp);
1465 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1466 				if (sctp_it_ctl.iterator_flags &
1467 				    SCTP_ITERATOR_STOP_CUR_IT) {
1468 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1469 					goto done_with_iterator;
1470 				}
1471 				if (sctp_it_ctl.iterator_flags &
1472 				    SCTP_ITERATOR_STOP_CUR_INP) {
1473 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1474 					goto no_stcb;
1475 				}
1476 				/* If we reach here huh? */
1477 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1478 				    sctp_it_ctl.iterator_flags);
1479 				sctp_it_ctl.iterator_flags = 0;
1480 			}
1481 			SCTP_INP_RLOCK(it->inp);
1482 			SCTP_INP_DECR_REF(it->inp);
1483 			SCTP_TCB_LOCK(it->stcb);
1484 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1485 			iteration_count = 0;
1486 		}
1487 
1488 		/* run function on this one */
1489 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1490 
1491 		/*
1492 		 * we lie here, it really needs to have its own type but
1493 		 * first I must verify that this won't effect things :-0
1494 		 */
1495 		if (it->no_chunk_output == 0)
1496 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1497 
1498 		SCTP_TCB_UNLOCK(it->stcb);
1499 next_assoc:
1500 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1501 		if (it->stcb == NULL) {
1502 			/* Run last function */
1503 			if (it->function_inp_end != NULL) {
1504 				inp_skip = (*it->function_inp_end) (it->inp,
1505 				    it->pointer,
1506 				    it->val);
1507 			}
1508 		}
1509 	}
1510 	SCTP_INP_RUNLOCK(it->inp);
1511 no_stcb:
1512 	/* done with all assocs on this endpoint, move on to next endpoint */
1513 	it->done_current_ep = 0;
1514 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1515 		it->inp = NULL;
1516 	} else {
1517 		it->inp = LIST_NEXT(it->inp, sctp_list);
1518 	}
1519 	if (it->inp == NULL) {
1520 		goto done_with_iterator;
1521 	}
1522 	goto select_a_new_ep;
1523 }
1524 
1525 void
1526 sctp_iterator_worker(void)
1527 {
1528 	struct sctp_iterator *it;
1529 
1530 	/* This function is called with the WQ lock in place */
1531 	sctp_it_ctl.iterator_running = 1;
1532 	while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) {
1533 		/* now lets work on this one */
1534 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1535 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1536 		CURVNET_SET(it->vn);
1537 		sctp_iterator_work(it);
1538 		CURVNET_RESTORE();
1539 		SCTP_IPI_ITERATOR_WQ_LOCK();
1540 		/* sa_ignore FREED_MEMORY */
1541 	}
1542 	sctp_it_ctl.iterator_running = 0;
1543 	return;
1544 }
1545 
1546 
1547 static void
1548 sctp_handle_addr_wq(void)
1549 {
1550 	/* deal with the ADDR wq from the rtsock calls */
1551 	struct sctp_laddr *wi, *nwi;
1552 	struct sctp_asconf_iterator *asc;
1553 
1554 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1555 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1556 	if (asc == NULL) {
1557 		/* Try later, no memory */
1558 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1559 		    (struct sctp_inpcb *)NULL,
1560 		    (struct sctp_tcb *)NULL,
1561 		    (struct sctp_nets *)NULL);
1562 		return;
1563 	}
1564 	LIST_INIT(&asc->list_of_work);
1565 	asc->cnt = 0;
1566 
1567 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1568 		LIST_REMOVE(wi, sctp_nxt_addr);
1569 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1570 		asc->cnt++;
1571 	}
1572 
1573 	if (asc->cnt == 0) {
1574 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1575 	} else {
1576 		int ret;
1577 
1578 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1579 		    sctp_asconf_iterator_stcb,
1580 		    NULL,	/* No ep end for boundall */
1581 		    SCTP_PCB_FLAGS_BOUNDALL,
1582 		    SCTP_PCB_ANY_FEATURES,
1583 		    SCTP_ASOC_ANY_STATE,
1584 		    (void *)asc, 0,
1585 		    sctp_asconf_iterator_end, NULL, 0);
1586 		if (ret) {
1587 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1588 			/*
1589 			 * Freeing if we are stopping or put back on the
1590 			 * addr_wq.
1591 			 */
1592 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1593 				sctp_asconf_iterator_end(asc, 0);
1594 			} else {
1595 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1596 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1597 				}
1598 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1599 			}
1600 		}
1601 	}
1602 }
1603 
1604 /*-
1605  * The following table shows which pointers for the inp, stcb, or net are
1606  * stored for each timer after it was started.
1607  *
1608  *|Name                         |Timer                        |inp |stcb|net |
1609  *|-----------------------------|-----------------------------|----|----|----|
1610  *|SCTP_TIMER_TYPE_SEND         |net->rxt_timer               |Yes |Yes |Yes |
1611  *|SCTP_TIMER_TYPE_INIT         |net->rxt_timer               |Yes |Yes |Yes |
1612  *|SCTP_TIMER_TYPE_RECV         |stcb->asoc.dack_timer        |Yes |Yes |No  |
1613  *|SCTP_TIMER_TYPE_SHUTDOWN     |net->rxt_timer               |Yes |Yes |Yes |
1614  *|SCTP_TIMER_TYPE_HEARTBEAT    |net->hb_timer                |Yes |Yes |Yes |
1615  *|SCTP_TIMER_TYPE_COOKIE       |net->rxt_timer               |Yes |Yes |Yes |
1616  *|SCTP_TIMER_TYPE_NEWCOOKIE    |inp->sctp_ep.signature_change|Yes |No  |No  |
1617  *|SCTP_TIMER_TYPE_PATHMTURAISE |net->pmtu_timer              |Yes |Yes |Yes |
1618  *|SCTP_TIMER_TYPE_SHUTDOWNACK  |net->rxt_timer               |Yes |Yes |Yes |
1619  *|SCTP_TIMER_TYPE_ASCONF       |stcb->asoc.asconf_timer      |Yes |Yes |Yes |
1620  *|SCTP_TIMER_TYPE_SHUTDOWNGUARD|stcb->asoc.shut_guard_timer  |Yes |Yes |No  |
1621  *|SCTP_TIMER_TYPE_AUTOCLOSE    |stcb->asoc.autoclose_timer   |Yes |Yes |No  |
1622  *|SCTP_TIMER_TYPE_STRRESET     |stcb->asoc.strreset_timer    |Yes |Yes |No  |
1623  *|SCTP_TIMER_TYPE_INPKILL      |inp->sctp_ep.signature_change|Yes |No  |No  |
1624  *|SCTP_TIMER_TYPE_ASOCKILL     |stcb->asoc.strreset_timer    |Yes |Yes |No  |
1625  *|SCTP_TIMER_TYPE_ADDR_WQ      |SCTP_BASE_INFO(addr_wq_timer)|No  |No  |No  |
1626  *|SCTP_TIMER_TYPE_PRIM_DELETED |stcb->asoc.delete_prim_timer |Yes |Yes |No  |
1627  */
1628 
1629 void
1630 sctp_timeout_handler(void *t)
1631 {
1632 	struct epoch_tracker et;
1633 	struct timeval tv;
1634 	struct sctp_inpcb *inp;
1635 	struct sctp_tcb *stcb;
1636 	struct sctp_nets *net;
1637 	struct sctp_timer *tmr;
1638 	struct mbuf *op_err;
1639 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1640 	struct socket *so;
1641 #endif
1642 	int did_output;
1643 	int type;
1644 	int i, secret;
1645 
1646 	tmr = (struct sctp_timer *)t;
1647 	inp = (struct sctp_inpcb *)tmr->ep;
1648 	stcb = (struct sctp_tcb *)tmr->tcb;
1649 	net = (struct sctp_nets *)tmr->net;
1650 	CURVNET_SET((struct vnet *)tmr->vnet);
1651 	did_output = 1;
1652 
1653 #ifdef SCTP_AUDITING_ENABLED
1654 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1655 	sctp_auditing(3, inp, stcb, net);
1656 #endif
1657 
1658 	/* sanity checks... */
1659 	KASSERT(tmr->self == tmr, ("tmr->self corrupted"));
1660 	KASSERT(SCTP_IS_TIMER_TYPE_VALID(tmr->type), ("Invalid timer type %d", tmr->type));
1661 	type = tmr->type;
1662 	tmr->stopped_from = 0xa001;
1663 	if (inp) {
1664 		SCTP_INP_INCR_REF(inp);
1665 		if ((inp->sctp_socket == NULL) &&
1666 		    ((type != SCTP_TIMER_TYPE_INPKILL) &&
1667 		    (type != SCTP_TIMER_TYPE_INIT) &&
1668 		    (type != SCTP_TIMER_TYPE_SEND) &&
1669 		    (type != SCTP_TIMER_TYPE_RECV) &&
1670 		    (type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1671 		    (type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1672 		    (type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1673 		    (type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1674 		    (type != SCTP_TIMER_TYPE_ASOCKILL))) {
1675 			SCTP_INP_DECR_REF(inp);
1676 			CURVNET_RESTORE();
1677 			SCTPDBG(SCTP_DEBUG_TIMER2,
1678 			    "Timer type = %d handler exiting due to closed socket\n",
1679 			    type);
1680 			return;
1681 		}
1682 	}
1683 	tmr->stopped_from = 0xa002;
1684 	if (stcb) {
1685 		atomic_add_int(&stcb->asoc.refcnt, 1);
1686 		if (stcb->asoc.state == 0) {
1687 			atomic_add_int(&stcb->asoc.refcnt, -1);
1688 			if (inp) {
1689 				SCTP_INP_DECR_REF(inp);
1690 			}
1691 			CURVNET_RESTORE();
1692 			SCTPDBG(SCTP_DEBUG_TIMER2,
1693 			    "Timer type = %d handler exiting due to CLOSED association\n",
1694 			    type);
1695 			return;
1696 		}
1697 	}
1698 	tmr->stopped_from = 0xa003;
1699 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1700 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1701 		if (inp) {
1702 			SCTP_INP_DECR_REF(inp);
1703 		}
1704 		if (stcb) {
1705 			atomic_add_int(&stcb->asoc.refcnt, -1);
1706 		}
1707 		CURVNET_RESTORE();
1708 		SCTPDBG(SCTP_DEBUG_TIMER2,
1709 		    "Timer type = %d handler exiting due to not being active\n",
1710 		    type);
1711 		return;
1712 	}
1713 	tmr->stopped_from = 0xa004;
1714 
1715 	if (stcb) {
1716 		SCTP_TCB_LOCK(stcb);
1717 		atomic_add_int(&stcb->asoc.refcnt, -1);
1718 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1719 		    ((stcb->asoc.state == 0) ||
1720 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1721 			SCTP_TCB_UNLOCK(stcb);
1722 			if (inp) {
1723 				SCTP_INP_DECR_REF(inp);
1724 			}
1725 			CURVNET_RESTORE();
1726 			SCTPDBG(SCTP_DEBUG_TIMER2,
1727 			    "Timer type = %d handler exiting due to CLOSED association\n",
1728 			    type);
1729 			return;
1730 		}
1731 	} else if (inp != NULL) {
1732 		SCTP_INP_WLOCK(inp);
1733 	} else {
1734 		SCTP_WQ_ADDR_LOCK();
1735 	}
1736 	/* record in stopped what t-o occurred */
1737 	tmr->stopped_from = type;
1738 
1739 	NET_EPOCH_ENTER(et);
1740 	/* mark as being serviced now */
1741 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1742 		/*
1743 		 * Callout has been rescheduled.
1744 		 */
1745 		goto get_out;
1746 	}
1747 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1748 		/*
1749 		 * Not active, so no action.
1750 		 */
1751 		goto get_out;
1752 	}
1753 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1754 
1755 	/* call the handler for the appropriate timer type */
1756 	switch (type) {
1757 	case SCTP_TIMER_TYPE_SEND:
1758 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1759 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1760 		    type, inp, stcb, net));
1761 		SCTP_STAT_INCR(sctps_timodata);
1762 		stcb->asoc.timodata++;
1763 		stcb->asoc.num_send_timers_up--;
1764 		if (stcb->asoc.num_send_timers_up < 0) {
1765 			stcb->asoc.num_send_timers_up = 0;
1766 		}
1767 		SCTP_TCB_LOCK_ASSERT(stcb);
1768 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1769 			/* no need to unlock on tcb its gone */
1770 
1771 			goto out_decr;
1772 		}
1773 		SCTP_TCB_LOCK_ASSERT(stcb);
1774 #ifdef SCTP_AUDITING_ENABLED
1775 		sctp_auditing(4, inp, stcb, net);
1776 #endif
1777 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1778 		if ((stcb->asoc.num_send_timers_up == 0) &&
1779 		    (stcb->asoc.sent_queue_cnt > 0)) {
1780 			struct sctp_tmit_chunk *chk;
1781 
1782 			/*
1783 			 * safeguard. If there on some on the sent queue
1784 			 * somewhere but no timers running something is
1785 			 * wrong... so we start a timer on the first chunk
1786 			 * on the send queue on whatever net it is sent to.
1787 			 */
1788 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1789 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1790 			    chk->whoTo);
1791 		}
1792 		break;
1793 	case SCTP_TIMER_TYPE_INIT:
1794 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1795 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1796 		    type, inp, stcb, net));
1797 		SCTP_STAT_INCR(sctps_timoinit);
1798 		stcb->asoc.timoinit++;
1799 		if (sctp_t1init_timer(inp, stcb, net)) {
1800 			/* no need to unlock on tcb its gone */
1801 			goto out_decr;
1802 		}
1803 		/* We do output but not here */
1804 		did_output = 0;
1805 		break;
1806 	case SCTP_TIMER_TYPE_RECV:
1807 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
1808 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1809 		    type, inp, stcb, net));
1810 		SCTP_STAT_INCR(sctps_timosack);
1811 		stcb->asoc.timosack++;
1812 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1813 #ifdef SCTP_AUDITING_ENABLED
1814 		sctp_auditing(4, inp, stcb, NULL);
1815 #endif
1816 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1817 		break;
1818 	case SCTP_TIMER_TYPE_SHUTDOWN:
1819 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1820 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1821 		    type, inp, stcb, net));
1822 		SCTP_STAT_INCR(sctps_timoshutdown);
1823 		stcb->asoc.timoshutdown++;
1824 		if (sctp_shutdown_timer(inp, stcb, net)) {
1825 			/* no need to unlock on tcb its gone */
1826 			goto out_decr;
1827 		}
1828 #ifdef SCTP_AUDITING_ENABLED
1829 		sctp_auditing(4, inp, stcb, net);
1830 #endif
1831 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1832 		break;
1833 	case SCTP_TIMER_TYPE_HEARTBEAT:
1834 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1835 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1836 		    type, inp, stcb, net));
1837 		SCTP_STAT_INCR(sctps_timoheartbeat);
1838 		stcb->asoc.timoheartbeat++;
1839 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1840 			/* no need to unlock on tcb its gone */
1841 			goto out_decr;
1842 		}
1843 #ifdef SCTP_AUDITING_ENABLED
1844 		sctp_auditing(4, inp, stcb, net);
1845 #endif
1846 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1847 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1848 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1849 		}
1850 		break;
1851 	case SCTP_TIMER_TYPE_COOKIE:
1852 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1853 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1854 		    type, inp, stcb, net));
1855 		SCTP_STAT_INCR(sctps_timocookie);
1856 		stcb->asoc.timocookie++;
1857 		if (sctp_cookie_timer(inp, stcb, net)) {
1858 			/* no need to unlock on tcb its gone */
1859 			goto out_decr;
1860 		}
1861 #ifdef SCTP_AUDITING_ENABLED
1862 		sctp_auditing(4, inp, stcb, net);
1863 #endif
1864 		/*
1865 		 * We consider T3 and Cookie timer pretty much the same with
1866 		 * respect to where from in chunk_output.
1867 		 */
1868 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1869 		break;
1870 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1871 		KASSERT(inp != NULL && stcb == NULL && net == NULL,
1872 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1873 		    type, inp, stcb, net));
1874 		SCTP_STAT_INCR(sctps_timosecret);
1875 		(void)SCTP_GETTIME_TIMEVAL(&tv);
1876 		inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1877 		inp->sctp_ep.last_secret_number =
1878 		    inp->sctp_ep.current_secret_number;
1879 		inp->sctp_ep.current_secret_number++;
1880 		if (inp->sctp_ep.current_secret_number >=
1881 		    SCTP_HOW_MANY_SECRETS) {
1882 			inp->sctp_ep.current_secret_number = 0;
1883 		}
1884 		secret = (int)inp->sctp_ep.current_secret_number;
1885 		for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1886 			inp->sctp_ep.secret_key[secret][i] =
1887 			    sctp_select_initial_TSN(&inp->sctp_ep);
1888 		}
1889 		sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL);
1890 		did_output = 0;
1891 		break;
1892 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1893 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1894 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1895 		    type, inp, stcb, net));
1896 		SCTP_STAT_INCR(sctps_timopathmtu);
1897 		sctp_pathmtu_timer(inp, stcb, net);
1898 		did_output = 0;
1899 		break;
1900 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1901 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1902 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1903 		    type, inp, stcb, net));
1904 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1905 			/* no need to unlock on tcb its gone */
1906 			goto out_decr;
1907 		}
1908 		SCTP_STAT_INCR(sctps_timoshutdownack);
1909 		stcb->asoc.timoshutdownack++;
1910 #ifdef SCTP_AUDITING_ENABLED
1911 		sctp_auditing(4, inp, stcb, net);
1912 #endif
1913 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1914 		break;
1915 	case SCTP_TIMER_TYPE_ASCONF:
1916 		KASSERT(inp != NULL && stcb != NULL && net != NULL,
1917 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1918 		    type, inp, stcb, net));
1919 		SCTP_STAT_INCR(sctps_timoasconf);
1920 		if (sctp_asconf_timer(inp, stcb, net)) {
1921 			/* no need to unlock on tcb its gone */
1922 			goto out_decr;
1923 		}
1924 #ifdef SCTP_AUDITING_ENABLED
1925 		sctp_auditing(4, inp, stcb, net);
1926 #endif
1927 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1928 		break;
1929 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1930 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
1931 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1932 		    type, inp, stcb, net));
1933 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1934 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1935 		    "Shutdown guard timer expired");
1936 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1937 		/* no need to unlock on tcb its gone */
1938 		goto out_decr;
1939 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1940 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
1941 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1942 		    type, inp, stcb, net));
1943 		SCTP_STAT_INCR(sctps_timoautoclose);
1944 		sctp_autoclose_timer(inp, stcb);
1945 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1946 		did_output = 0;
1947 		break;
1948 	case SCTP_TIMER_TYPE_STRRESET:
1949 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
1950 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1951 		    type, inp, stcb, net));
1952 		SCTP_STAT_INCR(sctps_timostrmrst);
1953 		if (sctp_strreset_timer(inp, stcb)) {
1954 			/* no need to unlock on tcb its gone */
1955 			goto out_decr;
1956 		}
1957 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1958 		break;
1959 	case SCTP_TIMER_TYPE_INPKILL:
1960 		KASSERT(inp != NULL && stcb == NULL && net == NULL,
1961 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1962 		    type, inp, stcb, net));
1963 		SCTP_STAT_INCR(sctps_timoinpkill);
1964 		/*
1965 		 * special case, take away our increment since WE are the
1966 		 * killer
1967 		 */
1968 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1969 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1970 		SCTP_INP_DECR_REF(inp);
1971 		SCTP_INP_WUNLOCK(inp);
1972 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1973 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1974 		inp = NULL;
1975 		goto out_no_decr;
1976 	case SCTP_TIMER_TYPE_ASOCKILL:
1977 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
1978 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
1979 		    type, inp, stcb, net));
1980 		SCTP_STAT_INCR(sctps_timoassockill);
1981 		/* Can we free it yet? */
1982 		SCTP_INP_DECR_REF(inp);
1983 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1984 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1985 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1986 		so = SCTP_INP_SO(inp);
1987 		atomic_add_int(&stcb->asoc.refcnt, 1);
1988 		SCTP_TCB_UNLOCK(stcb);
1989 		SCTP_SOCKET_LOCK(so, 1);
1990 		SCTP_TCB_LOCK(stcb);
1991 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1992 #endif
1993 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1994 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1995 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1996 		SCTP_SOCKET_UNLOCK(so, 1);
1997 #endif
1998 		/*
1999 		 * free asoc, always unlocks (or destroy's) so prevent
2000 		 * duplicate unlock or unlock of a free mtx :-0
2001 		 */
2002 		stcb = NULL;
2003 		goto out_no_decr;
2004 	case SCTP_TIMER_TYPE_ADDR_WQ:
2005 		KASSERT(inp == NULL && stcb == NULL && net == NULL,
2006 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2007 		    type, inp, stcb, net));
2008 		sctp_handle_addr_wq();
2009 		break;
2010 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2011 		KASSERT(inp != NULL && stcb != NULL && net == NULL,
2012 		    ("timeout of type %d: inp = %p, stcb = %p, net = %p",
2013 		    type, inp, stcb, net));
2014 		SCTP_STAT_INCR(sctps_timodelprim);
2015 		sctp_delete_prim_timer(inp, stcb);
2016 		break;
2017 	default:
2018 #ifdef INVARIANTS
2019 		panic("Unknown timer type %d", type);
2020 #else
2021 		goto get_out;
2022 #endif
2023 	}
2024 #ifdef SCTP_AUDITING_ENABLED
2025 	sctp_audit_log(0xF1, (uint8_t)type);
2026 	if (inp)
2027 		sctp_auditing(5, inp, stcb, net);
2028 #endif
2029 	if ((did_output) && stcb) {
2030 		/*
2031 		 * Now we need to clean up the control chunk chain if an
2032 		 * ECNE is on it. It must be marked as UNSENT again so next
2033 		 * call will continue to send it until such time that we get
2034 		 * a CWR, to remove it. It is, however, less likely that we
2035 		 * will find a ecn echo on the chain though.
2036 		 */
2037 		sctp_fix_ecn_echo(&stcb->asoc);
2038 	}
2039 get_out:
2040 	if (stcb) {
2041 		SCTP_TCB_UNLOCK(stcb);
2042 	} else if (inp != NULL) {
2043 		SCTP_INP_WUNLOCK(inp);
2044 	} else {
2045 		SCTP_WQ_ADDR_UNLOCK();
2046 	}
2047 
2048 out_decr:
2049 	if (inp) {
2050 		SCTP_INP_DECR_REF(inp);
2051 	}
2052 
2053 out_no_decr:
2054 	SCTPDBG(SCTP_DEBUG_TIMER2, "Timer type = %d handler finished\n", type);
2055 	CURVNET_RESTORE();
2056 	NET_EPOCH_EXIT(et);
2057 }
2058 
2059 /*-
2060  * The following table shows which parameters must be provided
2061  * when calling sctp_timer_start(). For parameters not being
2062  * provided, NULL must be used.
2063  *
2064  * |Name                         |inp |stcb|net |
2065  * |-----------------------------|----|----|----|
2066  * |SCTP_TIMER_TYPE_SEND         |Yes |Yes |Yes |
2067  * |SCTP_TIMER_TYPE_INIT         |Yes |Yes |Yes |
2068  * |SCTP_TIMER_TYPE_RECV         |Yes |Yes |No  |
2069  * |SCTP_TIMER_TYPE_SHUTDOWN     |Yes |Yes |Yes |
2070  * |SCTP_TIMER_TYPE_HEARTBEAT    |Yes |Yes |Yes |
2071  * |SCTP_TIMER_TYPE_COOKIE       |Yes |Yes |Yes |
2072  * |SCTP_TIMER_TYPE_NEWCOOKIE    |Yes |No  |No  |
2073  * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes |
2074  * |SCTP_TIMER_TYPE_SHUTDOWNACK  |Yes |Yes |Yes |
2075  * |SCTP_TIMER_TYPE_ASCONF       |Yes |Yes |Yes |
2076  * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No  |
2077  * |SCTP_TIMER_TYPE_AUTOCLOSE    |Yes |Yes |No  |
2078  * |SCTP_TIMER_TYPE_STRRESET     |Yes |Yes |Yes |
2079  * |SCTP_TIMER_TYPE_INPKILL      |Yes |No  |No  |
2080  * |SCTP_TIMER_TYPE_ASOCKILL     |Yes |Yes |No  |
2081  * |SCTP_TIMER_TYPE_ADDR_WQ      |No  |No  |No  |
2082  * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No  |
2083  *
2084  */
2085 
2086 void
2087 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2088     struct sctp_nets *net)
2089 {
2090 	struct sctp_timer *tmr;
2091 	uint32_t to_ticks;
2092 	uint32_t rndval, jitter;
2093 
2094 	tmr = NULL;
2095 	to_ticks = 0;
2096 	if (stcb != NULL) {
2097 		SCTP_TCB_LOCK_ASSERT(stcb);
2098 	} else if (inp != NULL) {
2099 		SCTP_INP_WLOCK_ASSERT(inp);
2100 	} else {
2101 		SCTP_WQ_ADDR_LOCK_ASSERT();
2102 	}
2103 	if (stcb != NULL) {
2104 		/*
2105 		 * Don't restart timer on association that's about to be
2106 		 * killed.
2107 		 */
2108 		if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) &&
2109 		    (t_type != SCTP_TIMER_TYPE_ASOCKILL)) {
2110 			SCTPDBG(SCTP_DEBUG_TIMER2,
2111 			    "timer type %d not started: inp=%p, stcb=%p, net=%p (stcb deleted).\n",
2112 			    t_type, inp, stcb, net);
2113 			return;
2114 		}
2115 		/* Don't restart timer on net that's been removed. */
2116 		if (net != NULL && (net->dest_state & SCTP_ADDR_BEING_DELETED)) {
2117 			SCTPDBG(SCTP_DEBUG_TIMER2,
2118 			    "timer type %d not started: inp=%p, stcb=%p, net=%p (net deleted).\n",
2119 			    t_type, inp, stcb, net);
2120 			return;
2121 		}
2122 	}
2123 	switch (t_type) {
2124 	case SCTP_TIMER_TYPE_SEND:
2125 		/* Here we use the RTO timer. */
2126 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2127 #ifdef INVARIANTS
2128 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2129 			    t_type, inp, stcb, net);
2130 #else
2131 			return;
2132 #endif
2133 		}
2134 		tmr = &net->rxt_timer;
2135 		if (net->RTO == 0) {
2136 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2137 		} else {
2138 			to_ticks = MSEC_TO_TICKS(net->RTO);
2139 		}
2140 		break;
2141 	case SCTP_TIMER_TYPE_INIT:
2142 		/*
2143 		 * Here we use the INIT timer default usually about 1
2144 		 * second.
2145 		 */
2146 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2147 #ifdef INVARIANTS
2148 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2149 			    t_type, inp, stcb, net);
2150 #else
2151 			return;
2152 #endif
2153 		}
2154 		tmr = &net->rxt_timer;
2155 		if (net->RTO == 0) {
2156 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2157 		} else {
2158 			to_ticks = MSEC_TO_TICKS(net->RTO);
2159 		}
2160 		break;
2161 	case SCTP_TIMER_TYPE_RECV:
2162 		/*
2163 		 * Here we use the Delayed-Ack timer value from the inp,
2164 		 * ususually about 200ms.
2165 		 */
2166 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2167 #ifdef INVARIANTS
2168 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2169 			    t_type, inp, stcb, net);
2170 #else
2171 			return;
2172 #endif
2173 		}
2174 		tmr = &stcb->asoc.dack_timer;
2175 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2176 		break;
2177 	case SCTP_TIMER_TYPE_SHUTDOWN:
2178 		/* Here we use the RTO of the destination. */
2179 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2180 #ifdef INVARIANTS
2181 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2182 			    t_type, inp, stcb, net);
2183 #else
2184 			return;
2185 #endif
2186 		}
2187 		tmr = &net->rxt_timer;
2188 		if (net->RTO == 0) {
2189 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2190 		} else {
2191 			to_ticks = MSEC_TO_TICKS(net->RTO);
2192 		}
2193 		break;
2194 	case SCTP_TIMER_TYPE_HEARTBEAT:
2195 		/*
2196 		 * The net is used here so that we can add in the RTO. Even
2197 		 * though we use a different timer. We also add the HB timer
2198 		 * PLUS a random jitter.
2199 		 */
2200 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2201 #ifdef INVARIANTS
2202 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2203 			    t_type, inp, stcb, net);
2204 #else
2205 			return;
2206 #endif
2207 		}
2208 		if ((net->dest_state & SCTP_ADDR_NOHB) &&
2209 		    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2210 			SCTPDBG(SCTP_DEBUG_TIMER2,
2211 			    "timer type %d not started: inp=%p, stcb=%p, net=%p.\n",
2212 			    t_type, inp, stcb, net);
2213 			return;
2214 		}
2215 		tmr = &net->hb_timer;
2216 		if (net->RTO == 0) {
2217 			to_ticks = stcb->asoc.initial_rto;
2218 		} else {
2219 			to_ticks = net->RTO;
2220 		}
2221 		rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2222 		jitter = rndval % to_ticks;
2223 		if (jitter >= (to_ticks >> 1)) {
2224 			to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2225 		} else {
2226 			to_ticks = to_ticks - jitter;
2227 		}
2228 		if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2229 		    !(net->dest_state & SCTP_ADDR_PF)) {
2230 			to_ticks += net->heart_beat_delay;
2231 		}
2232 		/*
2233 		 * Now we must convert the to_ticks that are now in ms to
2234 		 * ticks.
2235 		 */
2236 		to_ticks = MSEC_TO_TICKS(to_ticks);
2237 		break;
2238 	case SCTP_TIMER_TYPE_COOKIE:
2239 		/*
2240 		 * Here we can use the RTO timer from the network since one
2241 		 * RTT was complete. If a retransmission happened then we
2242 		 * will be using the RTO initial value.
2243 		 */
2244 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2245 #ifdef INVARIANTS
2246 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2247 			    t_type, inp, stcb, net);
2248 #else
2249 			return;
2250 #endif
2251 		}
2252 		tmr = &net->rxt_timer;
2253 		if (net->RTO == 0) {
2254 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2255 		} else {
2256 			to_ticks = MSEC_TO_TICKS(net->RTO);
2257 		}
2258 		break;
2259 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2260 		/*
2261 		 * Nothing needed but the endpoint here ususually about 60
2262 		 * minutes.
2263 		 */
2264 		if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2265 #ifdef INVARIANTS
2266 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2267 			    t_type, inp, stcb, net);
2268 #else
2269 			return;
2270 #endif
2271 		}
2272 		tmr = &inp->sctp_ep.signature_change;
2273 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2274 		break;
2275 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2276 		/*
2277 		 * Here we use the value found in the EP for PMTUD,
2278 		 * ususually about 10 minutes.
2279 		 */
2280 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2281 #ifdef INVARIANTS
2282 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2283 			    t_type, inp, stcb, net);
2284 #else
2285 			return;
2286 #endif
2287 		}
2288 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2289 			SCTPDBG(SCTP_DEBUG_TIMER2,
2290 			    "timer type %d not started: inp=%p, stcb=%p, net=%p.\n",
2291 			    t_type, inp, stcb, net);
2292 			return;
2293 		}
2294 		tmr = &net->pmtu_timer;
2295 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2296 		break;
2297 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2298 		/* Here we use the RTO of the destination. */
2299 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2300 #ifdef INVARIANTS
2301 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2302 			    t_type, inp, stcb, net);
2303 #else
2304 			return;
2305 #endif
2306 		}
2307 		tmr = &net->rxt_timer;
2308 		if (net->RTO == 0) {
2309 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2310 		} else {
2311 			to_ticks = MSEC_TO_TICKS(net->RTO);
2312 		}
2313 		break;
2314 	case SCTP_TIMER_TYPE_ASCONF:
2315 		/*
2316 		 * Here the timer comes from the stcb but its value is from
2317 		 * the net's RTO.
2318 		 */
2319 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2320 #ifdef INVARIANTS
2321 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2322 			    t_type, inp, stcb, net);
2323 #else
2324 			return;
2325 #endif
2326 		}
2327 		tmr = &stcb->asoc.asconf_timer;
2328 		if (net->RTO == 0) {
2329 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2330 		} else {
2331 			to_ticks = MSEC_TO_TICKS(net->RTO);
2332 		}
2333 		break;
2334 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2335 		/*
2336 		 * Here we use the endpoints shutdown guard timer usually
2337 		 * about 3 minutes.
2338 		 */
2339 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2340 #ifdef INVARIANTS
2341 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2342 			    t_type, inp, stcb, net);
2343 #else
2344 			return;
2345 #endif
2346 		}
2347 		tmr = &stcb->asoc.shut_guard_timer;
2348 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2349 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2350 		} else {
2351 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2352 		}
2353 		break;
2354 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2355 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2356 #ifdef INVARIANTS
2357 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2358 			    t_type, inp, stcb, net);
2359 #else
2360 			return;
2361 #endif
2362 		}
2363 		tmr = &stcb->asoc.autoclose_timer;
2364 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2365 		break;
2366 	case SCTP_TIMER_TYPE_STRRESET:
2367 		/*
2368 		 * Here the timer comes from the stcb but its value is from
2369 		 * the net's RTO.
2370 		 */
2371 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2372 #ifdef INVARIANTS
2373 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2374 			    t_type, inp, stcb, net);
2375 #else
2376 			return;
2377 #endif
2378 		}
2379 		tmr = &stcb->asoc.strreset_timer;
2380 		if (net->RTO == 0) {
2381 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2382 		} else {
2383 			to_ticks = MSEC_TO_TICKS(net->RTO);
2384 		}
2385 		break;
2386 	case SCTP_TIMER_TYPE_INPKILL:
2387 		/*
2388 		 * The inp is setup to die. We re-use the signature_chage
2389 		 * timer since that has stopped and we are in the GONE
2390 		 * state.
2391 		 */
2392 		if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2393 #ifdef INVARIANTS
2394 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2395 			    t_type, inp, stcb, net);
2396 #else
2397 			return;
2398 #endif
2399 		}
2400 		tmr = &inp->sctp_ep.signature_change;
2401 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2402 		break;
2403 	case SCTP_TIMER_TYPE_ASOCKILL:
2404 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2405 #ifdef INVARIANTS
2406 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2407 			    t_type, inp, stcb, net);
2408 #else
2409 			return;
2410 #endif
2411 		}
2412 		tmr = &stcb->asoc.strreset_timer;
2413 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2414 		break;
2415 	case SCTP_TIMER_TYPE_ADDR_WQ:
2416 		if ((inp != NULL) || (stcb != NULL) || (net != NULL)) {
2417 #ifdef INVARIANTS
2418 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2419 			    t_type, inp, stcb, net);
2420 #else
2421 			return;
2422 #endif
2423 		}
2424 		/* Only 1 tick away :-) */
2425 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2426 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
2427 		break;
2428 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2429 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2430 #ifdef INVARIANTS
2431 			panic("sctp_timer_start of type %d: inp = %p, stcb = %p, net = %p",
2432 			    t_type, inp, stcb, net);
2433 #else
2434 			return;
2435 #endif
2436 		}
2437 		tmr = &stcb->asoc.delete_prim_timer;
2438 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2439 		break;
2440 	default:
2441 #ifdef INVARIANTS
2442 		panic("Unknown timer type %d", t_type);
2443 #else
2444 		return;
2445 #endif
2446 	}
2447 	KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type));
2448 	KASSERT(to_ticks > 0, ("to_ticks == 0 for timer type %d", t_type));
2449 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2450 		/*
2451 		 * We do NOT allow you to have it already running. If it is,
2452 		 * we leave the current one up unchanged.
2453 		 */
2454 		SCTPDBG(SCTP_DEBUG_TIMER2,
2455 		    "timer type %d already running: inp=%p, stcb=%p, net=%p.\n",
2456 		    t_type, inp, stcb, net);
2457 		return;
2458 	}
2459 	/* At this point we can proceed. */
2460 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2461 		stcb->asoc.num_send_timers_up++;
2462 	}
2463 	tmr->stopped_from = 0;
2464 	tmr->type = t_type;
2465 	tmr->ep = (void *)inp;
2466 	tmr->tcb = (void *)stcb;
2467 	if (t_type == SCTP_TIMER_TYPE_STRRESET) {
2468 		tmr->net = NULL;
2469 	} else {
2470 		tmr->net = (void *)net;
2471 	}
2472 	tmr->self = (void *)tmr;
2473 	tmr->vnet = (void *)curvnet;
2474 	tmr->ticks = sctp_get_tick_count();
2475 	if (SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr) == 0) {
2476 		SCTPDBG(SCTP_DEBUG_TIMER2,
2477 		    "timer type %d started: ticks=%u, inp=%p, stcb=%p, net=%p.\n",
2478 		    t_type, to_ticks, inp, stcb, net);
2479 	} else {
2480 		/*
2481 		 * This should not happen, since we checked for pending
2482 		 * above.
2483 		 */
2484 		SCTPDBG(SCTP_DEBUG_TIMER2,
2485 		    "timer type %d restarted: ticks=%u, inp=%p, stcb=%p, net=%p.\n",
2486 		    t_type, to_ticks, inp, stcb, net);
2487 	}
2488 	return;
2489 }
2490 
2491 /*-
2492  * The following table shows which parameters must be provided
2493  * when calling sctp_timer_stop(). For parameters not being
2494  * provided, NULL must be used.
2495  *
2496  * |Name                         |inp |stcb|net |
2497  * |-----------------------------|----|----|----|
2498  * |SCTP_TIMER_TYPE_SEND         |Yes |Yes |Yes |
2499  * |SCTP_TIMER_TYPE_INIT         |Yes |Yes |Yes |
2500  * |SCTP_TIMER_TYPE_RECV         |Yes |Yes |No  |
2501  * |SCTP_TIMER_TYPE_SHUTDOWN     |Yes |Yes |Yes |
2502  * |SCTP_TIMER_TYPE_HEARTBEAT    |Yes |Yes |Yes |
2503  * |SCTP_TIMER_TYPE_COOKIE       |Yes |Yes |Yes |
2504  * |SCTP_TIMER_TYPE_NEWCOOKIE    |Yes |No  |No  |
2505  * |SCTP_TIMER_TYPE_PATHMTURAISE |Yes |Yes |Yes |
2506  * |SCTP_TIMER_TYPE_SHUTDOWNACK  |Yes |Yes |Yes |
2507  * |SCTP_TIMER_TYPE_ASCONF       |Yes |Yes |No  |
2508  * |SCTP_TIMER_TYPE_SHUTDOWNGUARD|Yes |Yes |No  |
2509  * |SCTP_TIMER_TYPE_AUTOCLOSE    |Yes |Yes |No  |
2510  * |SCTP_TIMER_TYPE_STRRESET     |Yes |Yes |No  |
2511  * |SCTP_TIMER_TYPE_INPKILL      |Yes |No  |No  |
2512  * |SCTP_TIMER_TYPE_ASOCKILL     |Yes |Yes |No  |
2513  * |SCTP_TIMER_TYPE_ADDR_WQ      |No  |No  |No  |
2514  * |SCTP_TIMER_TYPE_PRIM_DELETED |Yes |Yes |No  |
2515  *
2516  */
2517 
2518 void
2519 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2520     struct sctp_nets *net, uint32_t from)
2521 {
2522 	struct sctp_timer *tmr;
2523 
2524 	if (stcb != NULL) {
2525 		SCTP_TCB_LOCK_ASSERT(stcb);
2526 	} else if (inp != NULL) {
2527 		SCTP_INP_WLOCK_ASSERT(inp);
2528 	} else {
2529 		SCTP_WQ_ADDR_LOCK_ASSERT();
2530 	}
2531 	tmr = NULL;
2532 	switch (t_type) {
2533 	case SCTP_TIMER_TYPE_SEND:
2534 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2535 #ifdef INVARIANTS
2536 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2537 			    t_type, inp, stcb, net);
2538 #else
2539 			return;
2540 #endif
2541 		}
2542 		tmr = &net->rxt_timer;
2543 		break;
2544 	case SCTP_TIMER_TYPE_INIT:
2545 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2546 #ifdef INVARIANTS
2547 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2548 			    t_type, inp, stcb, net);
2549 #else
2550 			return;
2551 #endif
2552 		}
2553 		tmr = &net->rxt_timer;
2554 		break;
2555 	case SCTP_TIMER_TYPE_RECV:
2556 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2557 #ifdef INVARIANTS
2558 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2559 			    t_type, inp, stcb, net);
2560 #else
2561 			return;
2562 #endif
2563 		}
2564 		tmr = &stcb->asoc.dack_timer;
2565 		break;
2566 	case SCTP_TIMER_TYPE_SHUTDOWN:
2567 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2568 #ifdef INVARIANTS
2569 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2570 			    t_type, inp, stcb, net);
2571 #else
2572 			return;
2573 #endif
2574 		}
2575 		tmr = &net->rxt_timer;
2576 		break;
2577 	case SCTP_TIMER_TYPE_HEARTBEAT:
2578 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2579 #ifdef INVARIANTS
2580 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2581 			    t_type, inp, stcb, net);
2582 #else
2583 			return;
2584 #endif
2585 		}
2586 		tmr = &net->hb_timer;
2587 		break;
2588 	case SCTP_TIMER_TYPE_COOKIE:
2589 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2590 #ifdef INVARIANTS
2591 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2592 			    t_type, inp, stcb, net);
2593 #else
2594 			return;
2595 #endif
2596 		}
2597 		tmr = &net->rxt_timer;
2598 		break;
2599 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2600 		if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2601 #ifdef INVARIANTS
2602 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2603 			    t_type, inp, stcb, net);
2604 #else
2605 			return;
2606 #endif
2607 		}
2608 		tmr = &inp->sctp_ep.signature_change;
2609 		break;
2610 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2611 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2612 #ifdef INVARIANTS
2613 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2614 			    t_type, inp, stcb, net);
2615 #else
2616 			return;
2617 #endif
2618 		}
2619 		tmr = &net->pmtu_timer;
2620 		break;
2621 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2622 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
2623 #ifdef INVARIANTS
2624 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2625 			    t_type, inp, stcb, net);
2626 #else
2627 			return;
2628 #endif
2629 		}
2630 		tmr = &net->rxt_timer;
2631 		break;
2632 	case SCTP_TIMER_TYPE_ASCONF:
2633 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2634 #ifdef INVARIANTS
2635 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2636 			    t_type, inp, stcb, net);
2637 #else
2638 			return;
2639 #endif
2640 		}
2641 		tmr = &stcb->asoc.asconf_timer;
2642 		break;
2643 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2644 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2645 #ifdef INVARIANTS
2646 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2647 			    t_type, inp, stcb, net);
2648 #else
2649 			return;
2650 #endif
2651 		}
2652 		tmr = &stcb->asoc.shut_guard_timer;
2653 		break;
2654 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2655 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2656 #ifdef INVARIANTS
2657 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2658 			    t_type, inp, stcb, net);
2659 #else
2660 			return;
2661 #endif
2662 		}
2663 		tmr = &stcb->asoc.autoclose_timer;
2664 		break;
2665 	case SCTP_TIMER_TYPE_STRRESET:
2666 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2667 #ifdef INVARIANTS
2668 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2669 			    t_type, inp, stcb, net);
2670 #else
2671 			return;
2672 #endif
2673 		}
2674 		tmr = &stcb->asoc.strreset_timer;
2675 		break;
2676 	case SCTP_TIMER_TYPE_INPKILL:
2677 		/*
2678 		 * The inp is setup to die. We re-use the signature_chage
2679 		 * timer since that has stopped and we are in the GONE
2680 		 * state.
2681 		 */
2682 		if ((inp == NULL) || (stcb != NULL) || (net != NULL)) {
2683 #ifdef INVARIANTS
2684 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2685 			    t_type, inp, stcb, net);
2686 #else
2687 			return;
2688 #endif
2689 		}
2690 		tmr = &inp->sctp_ep.signature_change;
2691 		break;
2692 	case SCTP_TIMER_TYPE_ASOCKILL:
2693 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2694 #ifdef INVARIANTS
2695 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2696 			    t_type, inp, stcb, net);
2697 #else
2698 			return;
2699 #endif
2700 		}
2701 		tmr = &stcb->asoc.strreset_timer;
2702 		break;
2703 	case SCTP_TIMER_TYPE_ADDR_WQ:
2704 		if ((inp != NULL) || (stcb != NULL) || (net != NULL)) {
2705 #ifdef INVARIANTS
2706 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2707 			    t_type, inp, stcb, net);
2708 #else
2709 			return;
2710 #endif
2711 		}
2712 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2713 		break;
2714 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2715 		if ((inp == NULL) || (stcb == NULL) || (net != NULL)) {
2716 #ifdef INVARIANTS
2717 			panic("sctp_timer_stop of type %d: inp = %p, stcb = %p, net = %p",
2718 			    t_type, inp, stcb, net);
2719 #else
2720 			return;
2721 #endif
2722 		}
2723 		tmr = &stcb->asoc.delete_prim_timer;
2724 		break;
2725 	default:
2726 #ifdef INVARIANTS
2727 		panic("Unknown timer type %d", t_type);
2728 #else
2729 		return;
2730 #endif
2731 	}
2732 	KASSERT(tmr != NULL, ("tmr is NULL for timer type %d", t_type));
2733 	if ((tmr->type != SCTP_TIMER_TYPE_NONE) &&
2734 	    (tmr->type != t_type)) {
2735 		/*
2736 		 * Ok we have a timer that is under joint use. Cookie timer
2737 		 * per chance with the SEND timer. We therefore are NOT
2738 		 * running the timer that the caller wants stopped.  So just
2739 		 * return.
2740 		 */
2741 		SCTPDBG(SCTP_DEBUG_TIMER2,
2742 		    "shared timer type %d not running: inp=%p, stcb=%p, net=%p.\n",
2743 		    t_type, inp, stcb, net);
2744 		return;
2745 	}
2746 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2747 		stcb->asoc.num_send_timers_up--;
2748 		if (stcb->asoc.num_send_timers_up < 0) {
2749 			stcb->asoc.num_send_timers_up = 0;
2750 		}
2751 	}
2752 	tmr->self = NULL;
2753 	tmr->stopped_from = from;
2754 	if (SCTP_OS_TIMER_STOP(&tmr->timer) == 1) {
2755 		KASSERT(tmr->ep == inp,
2756 		    ("sctp_timer_stop of type %d: inp = %p, tmr->inp = %p",
2757 		    t_type, inp, tmr->ep));
2758 		KASSERT(tmr->tcb == stcb,
2759 		    ("sctp_timer_stop of type %d: stcb = %p, tmr->stcb = %p",
2760 		    t_type, stcb, tmr->tcb));
2761 		KASSERT(((t_type == SCTP_TIMER_TYPE_ASCONF) && (tmr->net != NULL)) ||
2762 		    ((t_type != SCTP_TIMER_TYPE_ASCONF) && (tmr->net == net)),
2763 		    ("sctp_timer_stop of type %d: net = %p, tmr->net = %p",
2764 		    t_type, net, tmr->net));
2765 		SCTPDBG(SCTP_DEBUG_TIMER2,
2766 		    "timer type %d stopped: inp=%p, stcb=%p, net=%p.\n",
2767 		    t_type, inp, stcb, net);
2768 		tmr->ep = NULL;
2769 		tmr->tcb = NULL;
2770 		tmr->net = NULL;
2771 	} else {
2772 		SCTPDBG(SCTP_DEBUG_TIMER2,
2773 		    "timer type %d not stopped: inp=%p, stcb=%p, net=%p.\n",
2774 		    t_type, inp, stcb, net);
2775 	}
2776 	return;
2777 }
2778 
2779 uint32_t
2780 sctp_calculate_len(struct mbuf *m)
2781 {
2782 	uint32_t tlen = 0;
2783 	struct mbuf *at;
2784 
2785 	at = m;
2786 	while (at) {
2787 		tlen += SCTP_BUF_LEN(at);
2788 		at = SCTP_BUF_NEXT(at);
2789 	}
2790 	return (tlen);
2791 }
2792 
2793 void
2794 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2795     struct sctp_association *asoc, uint32_t mtu)
2796 {
2797 	/*
2798 	 * Reset the P-MTU size on this association, this involves changing
2799 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2800 	 * allow the DF flag to be cleared.
2801 	 */
2802 	struct sctp_tmit_chunk *chk;
2803 	unsigned int eff_mtu, ovh;
2804 
2805 	asoc->smallest_mtu = mtu;
2806 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2807 		ovh = SCTP_MIN_OVERHEAD;
2808 	} else {
2809 		ovh = SCTP_MIN_V4_OVERHEAD;
2810 	}
2811 	eff_mtu = mtu - ovh;
2812 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2813 		if (chk->send_size > eff_mtu) {
2814 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2815 		}
2816 	}
2817 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2818 		if (chk->send_size > eff_mtu) {
2819 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2820 		}
2821 	}
2822 }
2823 
2824 
2825 /*
2826  * Given an association and starting time of the current RTT period, update
2827  * RTO in number of msecs. net should point to the current network.
2828  * Return 1, if an RTO update was performed, return 0 if no update was
2829  * performed due to invalid starting point.
2830  */
2831 
2832 int
2833 sctp_calculate_rto(struct sctp_tcb *stcb,
2834     struct sctp_association *asoc,
2835     struct sctp_nets *net,
2836     struct timeval *old,
2837     int rtt_from_sack)
2838 {
2839 	struct timeval now;
2840 	uint64_t rtt_us;	/* RTT in us */
2841 	int32_t rtt;		/* RTT in ms */
2842 	uint32_t new_rto;
2843 	int first_measure = 0;
2844 
2845 	/************************/
2846 	/* 1. calculate new RTT */
2847 	/************************/
2848 	/* get the current time */
2849 	if (stcb->asoc.use_precise_time) {
2850 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2851 	} else {
2852 		(void)SCTP_GETTIME_TIMEVAL(&now);
2853 	}
2854 	if ((old->tv_sec > now.tv_sec) ||
2855 	    ((old->tv_sec == now.tv_sec) && (old->tv_sec > now.tv_sec))) {
2856 		/* The starting point is in the future. */
2857 		return (0);
2858 	}
2859 	timevalsub(&now, old);
2860 	rtt_us = (uint64_t)1000000 * (uint64_t)now.tv_sec + (uint64_t)now.tv_usec;
2861 	if (rtt_us > SCTP_RTO_UPPER_BOUND * 1000) {
2862 		/* The RTT is larger than a sane value. */
2863 		return (0);
2864 	}
2865 	/* store the current RTT in us */
2866 	net->rtt = rtt_us;
2867 	/* compute rtt in ms */
2868 	rtt = (int32_t)(net->rtt / 1000);
2869 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2870 		/*
2871 		 * Tell the CC module that a new update has just occurred
2872 		 * from a sack
2873 		 */
2874 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2875 	}
2876 	/*
2877 	 * Do we need to determine the lan? We do this only on sacks i.e.
2878 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2879 	 */
2880 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2881 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2882 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2883 			net->lan_type = SCTP_LAN_INTERNET;
2884 		} else {
2885 			net->lan_type = SCTP_LAN_LOCAL;
2886 		}
2887 	}
2888 
2889 	/***************************/
2890 	/* 2. update RTTVAR & SRTT */
2891 	/***************************/
2892 	/*-
2893 	 * Compute the scaled average lastsa and the
2894 	 * scaled variance lastsv as described in van Jacobson
2895 	 * Paper "Congestion Avoidance and Control", Annex A.
2896 	 *
2897 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2898 	 * (net->lastsv >> SCTP_RTT_VAR_SHIFT) is the rttvar
2899 	 */
2900 	if (net->RTO_measured) {
2901 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2902 		net->lastsa += rtt;
2903 		if (rtt < 0) {
2904 			rtt = -rtt;
2905 		}
2906 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2907 		net->lastsv += rtt;
2908 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2909 			rto_logging(net, SCTP_LOG_RTTVAR);
2910 		}
2911 	} else {
2912 		/* First RTO measurment */
2913 		net->RTO_measured = 1;
2914 		first_measure = 1;
2915 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2916 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2917 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2918 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2919 		}
2920 	}
2921 	if (net->lastsv == 0) {
2922 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2923 	}
2924 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2925 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2926 	    (stcb->asoc.sat_network_lockout == 0)) {
2927 		stcb->asoc.sat_network = 1;
2928 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2929 		stcb->asoc.sat_network = 0;
2930 		stcb->asoc.sat_network_lockout = 1;
2931 	}
2932 	/* bound it, per C6/C7 in Section 5.3.1 */
2933 	if (new_rto < stcb->asoc.minrto) {
2934 		new_rto = stcb->asoc.minrto;
2935 	}
2936 	if (new_rto > stcb->asoc.maxrto) {
2937 		new_rto = stcb->asoc.maxrto;
2938 	}
2939 	net->RTO = new_rto;
2940 	return (1);
2941 }
2942 
2943 /*
2944  * return a pointer to a contiguous piece of data from the given mbuf chain
2945  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2946  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2947  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2948  */
2949 caddr_t
2950 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2951 {
2952 	uint32_t count;
2953 	uint8_t *ptr;
2954 
2955 	ptr = in_ptr;
2956 	if ((off < 0) || (len <= 0))
2957 		return (NULL);
2958 
2959 	/* find the desired start location */
2960 	while ((m != NULL) && (off > 0)) {
2961 		if (off < SCTP_BUF_LEN(m))
2962 			break;
2963 		off -= SCTP_BUF_LEN(m);
2964 		m = SCTP_BUF_NEXT(m);
2965 	}
2966 	if (m == NULL)
2967 		return (NULL);
2968 
2969 	/* is the current mbuf large enough (eg. contiguous)? */
2970 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2971 		return (mtod(m, caddr_t)+off);
2972 	} else {
2973 		/* else, it spans more than one mbuf, so save a temp copy... */
2974 		while ((m != NULL) && (len > 0)) {
2975 			count = min(SCTP_BUF_LEN(m) - off, len);
2976 			memcpy(ptr, mtod(m, caddr_t)+off, count);
2977 			len -= count;
2978 			ptr += count;
2979 			off = 0;
2980 			m = SCTP_BUF_NEXT(m);
2981 		}
2982 		if ((m == NULL) && (len > 0))
2983 			return (NULL);
2984 		else
2985 			return ((caddr_t)in_ptr);
2986 	}
2987 }
2988 
2989 
2990 
2991 struct sctp_paramhdr *
2992 sctp_get_next_param(struct mbuf *m,
2993     int offset,
2994     struct sctp_paramhdr *pull,
2995     int pull_limit)
2996 {
2997 	/* This just provides a typed signature to Peter's Pull routine */
2998 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2999 	    (uint8_t *)pull));
3000 }
3001 
3002 
3003 struct mbuf *
3004 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
3005 {
3006 	struct mbuf *m_last;
3007 	caddr_t dp;
3008 
3009 	if (padlen > 3) {
3010 		return (NULL);
3011 	}
3012 	if (padlen <= M_TRAILINGSPACE(m)) {
3013 		/*
3014 		 * The easy way. We hope the majority of the time we hit
3015 		 * here :)
3016 		 */
3017 		m_last = m;
3018 	} else {
3019 		/* Hard way we must grow the mbuf chain */
3020 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
3021 		if (m_last == NULL) {
3022 			return (NULL);
3023 		}
3024 		SCTP_BUF_LEN(m_last) = 0;
3025 		SCTP_BUF_NEXT(m_last) = NULL;
3026 		SCTP_BUF_NEXT(m) = m_last;
3027 	}
3028 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
3029 	SCTP_BUF_LEN(m_last) += padlen;
3030 	memset(dp, 0, padlen);
3031 	return (m_last);
3032 }
3033 
3034 struct mbuf *
3035 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
3036 {
3037 	/* find the last mbuf in chain and pad it */
3038 	struct mbuf *m_at;
3039 
3040 	if (last_mbuf != NULL) {
3041 		return (sctp_add_pad_tombuf(last_mbuf, padval));
3042 	} else {
3043 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3044 			if (SCTP_BUF_NEXT(m_at) == NULL) {
3045 				return (sctp_add_pad_tombuf(m_at, padval));
3046 			}
3047 		}
3048 	}
3049 	return (NULL);
3050 }
3051 
3052 static void
3053 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
3054     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
3055 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3056     SCTP_UNUSED
3057 #endif
3058 )
3059 {
3060 	struct mbuf *m_notify;
3061 	struct sctp_assoc_change *sac;
3062 	struct sctp_queued_to_read *control;
3063 	unsigned int notif_len;
3064 	uint16_t abort_len;
3065 	unsigned int i;
3066 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3067 	struct socket *so;
3068 #endif
3069 
3070 	if (stcb == NULL) {
3071 		return;
3072 	}
3073 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
3074 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
3075 		if (abort != NULL) {
3076 			abort_len = ntohs(abort->ch.chunk_length);
3077 			/*
3078 			 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
3079 			 * contiguous.
3080 			 */
3081 			if (abort_len > SCTP_CHUNK_BUFFER_SIZE) {
3082 				abort_len = SCTP_CHUNK_BUFFER_SIZE;
3083 			}
3084 		} else {
3085 			abort_len = 0;
3086 		}
3087 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
3088 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
3089 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
3090 			notif_len += abort_len;
3091 		}
3092 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3093 		if (m_notify == NULL) {
3094 			/* Retry with smaller value. */
3095 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
3096 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3097 			if (m_notify == NULL) {
3098 				goto set_error;
3099 			}
3100 		}
3101 		SCTP_BUF_NEXT(m_notify) = NULL;
3102 		sac = mtod(m_notify, struct sctp_assoc_change *);
3103 		memset(sac, 0, notif_len);
3104 		sac->sac_type = SCTP_ASSOC_CHANGE;
3105 		sac->sac_flags = 0;
3106 		sac->sac_length = sizeof(struct sctp_assoc_change);
3107 		sac->sac_state = state;
3108 		sac->sac_error = error;
3109 		/* XXX verify these stream counts */
3110 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
3111 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
3112 		sac->sac_assoc_id = sctp_get_associd(stcb);
3113 		if (notif_len > sizeof(struct sctp_assoc_change)) {
3114 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
3115 				i = 0;
3116 				if (stcb->asoc.prsctp_supported == 1) {
3117 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
3118 				}
3119 				if (stcb->asoc.auth_supported == 1) {
3120 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
3121 				}
3122 				if (stcb->asoc.asconf_supported == 1) {
3123 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
3124 				}
3125 				if (stcb->asoc.idata_supported == 1) {
3126 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
3127 				}
3128 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
3129 				if (stcb->asoc.reconfig_supported == 1) {
3130 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
3131 				}
3132 				sac->sac_length += i;
3133 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
3134 				memcpy(sac->sac_info, abort, abort_len);
3135 				sac->sac_length += abort_len;
3136 			}
3137 		}
3138 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
3139 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3140 		    0, 0, stcb->asoc.context, 0, 0, 0,
3141 		    m_notify);
3142 		if (control != NULL) {
3143 			control->length = SCTP_BUF_LEN(m_notify);
3144 			control->spec_flags = M_NOTIFICATION;
3145 			/* not that we need this */
3146 			control->tail_mbuf = m_notify;
3147 			sctp_add_to_readq(stcb->sctp_ep, stcb,
3148 			    control,
3149 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
3150 			    so_locked);
3151 		} else {
3152 			sctp_m_freem(m_notify);
3153 		}
3154 	}
3155 	/*
3156 	 * For 1-to-1 style sockets, we send up and error when an ABORT
3157 	 * comes in.
3158 	 */
3159 set_error:
3160 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3161 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
3162 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
3163 		SOCK_LOCK(stcb->sctp_socket);
3164 		if (from_peer) {
3165 			if (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) {
3166 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
3167 				stcb->sctp_socket->so_error = ECONNREFUSED;
3168 			} else {
3169 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
3170 				stcb->sctp_socket->so_error = ECONNRESET;
3171 			}
3172 		} else {
3173 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
3174 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
3175 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
3176 				stcb->sctp_socket->so_error = ETIMEDOUT;
3177 			} else {
3178 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
3179 				stcb->sctp_socket->so_error = ECONNABORTED;
3180 			}
3181 		}
3182 		SOCK_UNLOCK(stcb->sctp_socket);
3183 	}
3184 	/* Wake ANY sleepers */
3185 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3186 	so = SCTP_INP_SO(stcb->sctp_ep);
3187 	if (!so_locked) {
3188 		atomic_add_int(&stcb->asoc.refcnt, 1);
3189 		SCTP_TCB_UNLOCK(stcb);
3190 		SCTP_SOCKET_LOCK(so, 1);
3191 		SCTP_TCB_LOCK(stcb);
3192 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3193 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3194 			SCTP_SOCKET_UNLOCK(so, 1);
3195 			return;
3196 		}
3197 	}
3198 #endif
3199 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3200 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
3201 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
3202 		socantrcvmore(stcb->sctp_socket);
3203 	}
3204 	sorwakeup(stcb->sctp_socket);
3205 	sowwakeup(stcb->sctp_socket);
3206 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3207 	if (!so_locked) {
3208 		SCTP_SOCKET_UNLOCK(so, 1);
3209 	}
3210 #endif
3211 }
3212 
3213 static void
3214 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
3215     struct sockaddr *sa, uint32_t error, int so_locked
3216 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3217     SCTP_UNUSED
3218 #endif
3219 )
3220 {
3221 	struct mbuf *m_notify;
3222 	struct sctp_paddr_change *spc;
3223 	struct sctp_queued_to_read *control;
3224 
3225 	if ((stcb == NULL) ||
3226 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
3227 		/* event not enabled */
3228 		return;
3229 	}
3230 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
3231 	if (m_notify == NULL)
3232 		return;
3233 	SCTP_BUF_LEN(m_notify) = 0;
3234 	spc = mtod(m_notify, struct sctp_paddr_change *);
3235 	memset(spc, 0, sizeof(struct sctp_paddr_change));
3236 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
3237 	spc->spc_flags = 0;
3238 	spc->spc_length = sizeof(struct sctp_paddr_change);
3239 	switch (sa->sa_family) {
3240 #ifdef INET
3241 	case AF_INET:
3242 #ifdef INET6
3243 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
3244 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
3245 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
3246 		} else {
3247 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3248 		}
3249 #else
3250 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3251 #endif
3252 		break;
3253 #endif
3254 #ifdef INET6
3255 	case AF_INET6:
3256 		{
3257 			struct sockaddr_in6 *sin6;
3258 
3259 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
3260 
3261 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
3262 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
3263 				if (sin6->sin6_scope_id == 0) {
3264 					/* recover scope_id for user */
3265 					(void)sa6_recoverscope(sin6);
3266 				} else {
3267 					/* clear embedded scope_id for user */
3268 					in6_clearscope(&sin6->sin6_addr);
3269 				}
3270 			}
3271 			break;
3272 		}
3273 #endif
3274 	default:
3275 		/* TSNH */
3276 		break;
3277 	}
3278 	spc->spc_state = state;
3279 	spc->spc_error = error;
3280 	spc->spc_assoc_id = sctp_get_associd(stcb);
3281 
3282 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
3283 	SCTP_BUF_NEXT(m_notify) = NULL;
3284 
3285 	/* append to socket */
3286 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3287 	    0, 0, stcb->asoc.context, 0, 0, 0,
3288 	    m_notify);
3289 	if (control == NULL) {
3290 		/* no memory */
3291 		sctp_m_freem(m_notify);
3292 		return;
3293 	}
3294 	control->length = SCTP_BUF_LEN(m_notify);
3295 	control->spec_flags = M_NOTIFICATION;
3296 	/* not that we need this */
3297 	control->tail_mbuf = m_notify;
3298 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3299 	    control,
3300 	    &stcb->sctp_socket->so_rcv, 1,
3301 	    SCTP_READ_LOCK_NOT_HELD,
3302 	    so_locked);
3303 }
3304 
3305 
3306 static void
3307 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
3308     struct sctp_tmit_chunk *chk, int so_locked
3309 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3310     SCTP_UNUSED
3311 #endif
3312 )
3313 {
3314 	struct mbuf *m_notify;
3315 	struct sctp_send_failed *ssf;
3316 	struct sctp_send_failed_event *ssfe;
3317 	struct sctp_queued_to_read *control;
3318 	struct sctp_chunkhdr *chkhdr;
3319 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
3320 
3321 	if ((stcb == NULL) ||
3322 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3323 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3324 		/* event not enabled */
3325 		return;
3326 	}
3327 
3328 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3329 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3330 	} else {
3331 		notifhdr_len = sizeof(struct sctp_send_failed);
3332 	}
3333 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3334 	if (m_notify == NULL)
3335 		/* no space left */
3336 		return;
3337 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3338 	if (stcb->asoc.idata_supported) {
3339 		chkhdr_len = sizeof(struct sctp_idata_chunk);
3340 	} else {
3341 		chkhdr_len = sizeof(struct sctp_data_chunk);
3342 	}
3343 	/* Use some defaults in case we can't access the chunk header */
3344 	if (chk->send_size >= chkhdr_len) {
3345 		payload_len = chk->send_size - chkhdr_len;
3346 	} else {
3347 		payload_len = 0;
3348 	}
3349 	padding_len = 0;
3350 	if (chk->data != NULL) {
3351 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
3352 		if (chkhdr != NULL) {
3353 			chk_len = ntohs(chkhdr->chunk_length);
3354 			if ((chk_len >= chkhdr_len) &&
3355 			    (chk->send_size >= chk_len) &&
3356 			    (chk->send_size - chk_len < 4)) {
3357 				padding_len = chk->send_size - chk_len;
3358 				payload_len = chk->send_size - chkhdr_len - padding_len;
3359 			}
3360 		}
3361 	}
3362 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3363 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3364 		memset(ssfe, 0, notifhdr_len);
3365 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3366 		if (sent) {
3367 			ssfe->ssfe_flags = SCTP_DATA_SENT;
3368 		} else {
3369 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3370 		}
3371 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
3372 		ssfe->ssfe_error = error;
3373 		/* not exactly what the user sent in, but should be close :) */
3374 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
3375 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
3376 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
3377 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
3378 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3379 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3380 	} else {
3381 		ssf = mtod(m_notify, struct sctp_send_failed *);
3382 		memset(ssf, 0, notifhdr_len);
3383 		ssf->ssf_type = SCTP_SEND_FAILED;
3384 		if (sent) {
3385 			ssf->ssf_flags = SCTP_DATA_SENT;
3386 		} else {
3387 			ssf->ssf_flags = SCTP_DATA_UNSENT;
3388 		}
3389 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3390 		ssf->ssf_error = error;
3391 		/* not exactly what the user sent in, but should be close :) */
3392 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3393 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3394 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3395 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3396 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3397 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3398 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3399 	}
3400 	if (chk->data != NULL) {
3401 		/* Trim off the sctp chunk header (it should be there) */
3402 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3403 			m_adj(chk->data, chkhdr_len);
3404 			m_adj(chk->data, -padding_len);
3405 			sctp_mbuf_crush(chk->data);
3406 			chk->send_size -= (chkhdr_len + padding_len);
3407 		}
3408 	}
3409 	SCTP_BUF_NEXT(m_notify) = chk->data;
3410 	/* Steal off the mbuf */
3411 	chk->data = NULL;
3412 	/*
3413 	 * For this case, we check the actual socket buffer, since the assoc
3414 	 * is going away we don't want to overfill the socket buffer for a
3415 	 * non-reader
3416 	 */
3417 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3418 		sctp_m_freem(m_notify);
3419 		return;
3420 	}
3421 	/* append to socket */
3422 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3423 	    0, 0, stcb->asoc.context, 0, 0, 0,
3424 	    m_notify);
3425 	if (control == NULL) {
3426 		/* no memory */
3427 		sctp_m_freem(m_notify);
3428 		return;
3429 	}
3430 	control->length = SCTP_BUF_LEN(m_notify);
3431 	control->spec_flags = M_NOTIFICATION;
3432 	/* not that we need this */
3433 	control->tail_mbuf = m_notify;
3434 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3435 	    control,
3436 	    &stcb->sctp_socket->so_rcv, 1,
3437 	    SCTP_READ_LOCK_NOT_HELD,
3438 	    so_locked);
3439 }
3440 
3441 
3442 static void
3443 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3444     struct sctp_stream_queue_pending *sp, int so_locked
3445 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3446     SCTP_UNUSED
3447 #endif
3448 )
3449 {
3450 	struct mbuf *m_notify;
3451 	struct sctp_send_failed *ssf;
3452 	struct sctp_send_failed_event *ssfe;
3453 	struct sctp_queued_to_read *control;
3454 	int notifhdr_len;
3455 
3456 	if ((stcb == NULL) ||
3457 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3458 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3459 		/* event not enabled */
3460 		return;
3461 	}
3462 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3463 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3464 	} else {
3465 		notifhdr_len = sizeof(struct sctp_send_failed);
3466 	}
3467 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3468 	if (m_notify == NULL) {
3469 		/* no space left */
3470 		return;
3471 	}
3472 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3473 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3474 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3475 		memset(ssfe, 0, notifhdr_len);
3476 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3477 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3478 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3479 		ssfe->ssfe_error = error;
3480 		/* not exactly what the user sent in, but should be close :) */
3481 		ssfe->ssfe_info.snd_sid = sp->sid;
3482 		if (sp->some_taken) {
3483 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3484 		} else {
3485 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3486 		}
3487 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3488 		ssfe->ssfe_info.snd_context = sp->context;
3489 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3490 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3491 	} else {
3492 		ssf = mtod(m_notify, struct sctp_send_failed *);
3493 		memset(ssf, 0, notifhdr_len);
3494 		ssf->ssf_type = SCTP_SEND_FAILED;
3495 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3496 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3497 		ssf->ssf_error = error;
3498 		/* not exactly what the user sent in, but should be close :) */
3499 		ssf->ssf_info.sinfo_stream = sp->sid;
3500 		ssf->ssf_info.sinfo_ssn = 0;
3501 		if (sp->some_taken) {
3502 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3503 		} else {
3504 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3505 		}
3506 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3507 		ssf->ssf_info.sinfo_context = sp->context;
3508 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3509 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3510 	}
3511 	SCTP_BUF_NEXT(m_notify) = sp->data;
3512 
3513 	/* Steal off the mbuf */
3514 	sp->data = NULL;
3515 	/*
3516 	 * For this case, we check the actual socket buffer, since the assoc
3517 	 * is going away we don't want to overfill the socket buffer for a
3518 	 * non-reader
3519 	 */
3520 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3521 		sctp_m_freem(m_notify);
3522 		return;
3523 	}
3524 	/* append to socket */
3525 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3526 	    0, 0, stcb->asoc.context, 0, 0, 0,
3527 	    m_notify);
3528 	if (control == NULL) {
3529 		/* no memory */
3530 		sctp_m_freem(m_notify);
3531 		return;
3532 	}
3533 	control->length = SCTP_BUF_LEN(m_notify);
3534 	control->spec_flags = M_NOTIFICATION;
3535 	/* not that we need this */
3536 	control->tail_mbuf = m_notify;
3537 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3538 	    control,
3539 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3540 }
3541 
3542 
3543 
3544 static void
3545 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3546 {
3547 	struct mbuf *m_notify;
3548 	struct sctp_adaptation_event *sai;
3549 	struct sctp_queued_to_read *control;
3550 
3551 	if ((stcb == NULL) ||
3552 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3553 		/* event not enabled */
3554 		return;
3555 	}
3556 
3557 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3558 	if (m_notify == NULL)
3559 		/* no space left */
3560 		return;
3561 	SCTP_BUF_LEN(m_notify) = 0;
3562 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3563 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3564 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3565 	sai->sai_flags = 0;
3566 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3567 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3568 	sai->sai_assoc_id = sctp_get_associd(stcb);
3569 
3570 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3571 	SCTP_BUF_NEXT(m_notify) = NULL;
3572 
3573 	/* append to socket */
3574 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3575 	    0, 0, stcb->asoc.context, 0, 0, 0,
3576 	    m_notify);
3577 	if (control == NULL) {
3578 		/* no memory */
3579 		sctp_m_freem(m_notify);
3580 		return;
3581 	}
3582 	control->length = SCTP_BUF_LEN(m_notify);
3583 	control->spec_flags = M_NOTIFICATION;
3584 	/* not that we need this */
3585 	control->tail_mbuf = m_notify;
3586 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3587 	    control,
3588 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3589 }
3590 
3591 /* This always must be called with the read-queue LOCKED in the INP */
3592 static void
3593 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3594     uint32_t val, int so_locked
3595 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3596     SCTP_UNUSED
3597 #endif
3598 )
3599 {
3600 	struct mbuf *m_notify;
3601 	struct sctp_pdapi_event *pdapi;
3602 	struct sctp_queued_to_read *control;
3603 	struct sockbuf *sb;
3604 
3605 	if ((stcb == NULL) ||
3606 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3607 		/* event not enabled */
3608 		return;
3609 	}
3610 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3611 		return;
3612 	}
3613 
3614 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3615 	if (m_notify == NULL)
3616 		/* no space left */
3617 		return;
3618 	SCTP_BUF_LEN(m_notify) = 0;
3619 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3620 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3621 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3622 	pdapi->pdapi_flags = 0;
3623 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3624 	pdapi->pdapi_indication = error;
3625 	pdapi->pdapi_stream = (val >> 16);
3626 	pdapi->pdapi_seq = (val & 0x0000ffff);
3627 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3628 
3629 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3630 	SCTP_BUF_NEXT(m_notify) = NULL;
3631 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3632 	    0, 0, stcb->asoc.context, 0, 0, 0,
3633 	    m_notify);
3634 	if (control == NULL) {
3635 		/* no memory */
3636 		sctp_m_freem(m_notify);
3637 		return;
3638 	}
3639 	control->length = SCTP_BUF_LEN(m_notify);
3640 	control->spec_flags = M_NOTIFICATION;
3641 	/* not that we need this */
3642 	control->tail_mbuf = m_notify;
3643 	sb = &stcb->sctp_socket->so_rcv;
3644 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3645 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3646 	}
3647 	sctp_sballoc(stcb, sb, m_notify);
3648 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3649 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3650 	}
3651 	control->end_added = 1;
3652 	if (stcb->asoc.control_pdapi)
3653 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3654 	else {
3655 		/* we really should not see this case */
3656 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3657 	}
3658 	if (stcb->sctp_ep && stcb->sctp_socket) {
3659 		/* This should always be the case */
3660 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3661 		struct socket *so;
3662 
3663 		so = SCTP_INP_SO(stcb->sctp_ep);
3664 		if (!so_locked) {
3665 			atomic_add_int(&stcb->asoc.refcnt, 1);
3666 			SCTP_TCB_UNLOCK(stcb);
3667 			SCTP_SOCKET_LOCK(so, 1);
3668 			SCTP_TCB_LOCK(stcb);
3669 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3670 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3671 				SCTP_SOCKET_UNLOCK(so, 1);
3672 				return;
3673 			}
3674 		}
3675 #endif
3676 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3677 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3678 		if (!so_locked) {
3679 			SCTP_SOCKET_UNLOCK(so, 1);
3680 		}
3681 #endif
3682 	}
3683 }
3684 
3685 static void
3686 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3687 {
3688 	struct mbuf *m_notify;
3689 	struct sctp_shutdown_event *sse;
3690 	struct sctp_queued_to_read *control;
3691 
3692 	/*
3693 	 * For TCP model AND UDP connected sockets we will send an error up
3694 	 * when an SHUTDOWN completes
3695 	 */
3696 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3697 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3698 		/* mark socket closed for read/write and wakeup! */
3699 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3700 		struct socket *so;
3701 
3702 		so = SCTP_INP_SO(stcb->sctp_ep);
3703 		atomic_add_int(&stcb->asoc.refcnt, 1);
3704 		SCTP_TCB_UNLOCK(stcb);
3705 		SCTP_SOCKET_LOCK(so, 1);
3706 		SCTP_TCB_LOCK(stcb);
3707 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3708 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3709 			SCTP_SOCKET_UNLOCK(so, 1);
3710 			return;
3711 		}
3712 #endif
3713 		socantsendmore(stcb->sctp_socket);
3714 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3715 		SCTP_SOCKET_UNLOCK(so, 1);
3716 #endif
3717 	}
3718 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3719 		/* event not enabled */
3720 		return;
3721 	}
3722 
3723 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3724 	if (m_notify == NULL)
3725 		/* no space left */
3726 		return;
3727 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3728 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3729 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3730 	sse->sse_flags = 0;
3731 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3732 	sse->sse_assoc_id = sctp_get_associd(stcb);
3733 
3734 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3735 	SCTP_BUF_NEXT(m_notify) = NULL;
3736 
3737 	/* append to socket */
3738 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3739 	    0, 0, stcb->asoc.context, 0, 0, 0,
3740 	    m_notify);
3741 	if (control == NULL) {
3742 		/* no memory */
3743 		sctp_m_freem(m_notify);
3744 		return;
3745 	}
3746 	control->length = SCTP_BUF_LEN(m_notify);
3747 	control->spec_flags = M_NOTIFICATION;
3748 	/* not that we need this */
3749 	control->tail_mbuf = m_notify;
3750 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3751 	    control,
3752 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3753 }
3754 
3755 static void
3756 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3757     int so_locked
3758 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3759     SCTP_UNUSED
3760 #endif
3761 )
3762 {
3763 	struct mbuf *m_notify;
3764 	struct sctp_sender_dry_event *event;
3765 	struct sctp_queued_to_read *control;
3766 
3767 	if ((stcb == NULL) ||
3768 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3769 		/* event not enabled */
3770 		return;
3771 	}
3772 
3773 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3774 	if (m_notify == NULL) {
3775 		/* no space left */
3776 		return;
3777 	}
3778 	SCTP_BUF_LEN(m_notify) = 0;
3779 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3780 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3781 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3782 	event->sender_dry_flags = 0;
3783 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3784 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3785 
3786 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3787 	SCTP_BUF_NEXT(m_notify) = NULL;
3788 
3789 	/* append to socket */
3790 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3791 	    0, 0, stcb->asoc.context, 0, 0, 0,
3792 	    m_notify);
3793 	if (control == NULL) {
3794 		/* no memory */
3795 		sctp_m_freem(m_notify);
3796 		return;
3797 	}
3798 	control->length = SCTP_BUF_LEN(m_notify);
3799 	control->spec_flags = M_NOTIFICATION;
3800 	/* not that we need this */
3801 	control->tail_mbuf = m_notify;
3802 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3803 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3804 }
3805 
3806 
3807 void
3808 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3809 {
3810 	struct mbuf *m_notify;
3811 	struct sctp_queued_to_read *control;
3812 	struct sctp_stream_change_event *stradd;
3813 
3814 	if ((stcb == NULL) ||
3815 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3816 		/* event not enabled */
3817 		return;
3818 	}
3819 	if ((stcb->asoc.peer_req_out) && flag) {
3820 		/* Peer made the request, don't tell the local user */
3821 		stcb->asoc.peer_req_out = 0;
3822 		return;
3823 	}
3824 	stcb->asoc.peer_req_out = 0;
3825 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3826 	if (m_notify == NULL)
3827 		/* no space left */
3828 		return;
3829 	SCTP_BUF_LEN(m_notify) = 0;
3830 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3831 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3832 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3833 	stradd->strchange_flags = flag;
3834 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3835 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3836 	stradd->strchange_instrms = numberin;
3837 	stradd->strchange_outstrms = numberout;
3838 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3839 	SCTP_BUF_NEXT(m_notify) = NULL;
3840 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3841 		/* no space */
3842 		sctp_m_freem(m_notify);
3843 		return;
3844 	}
3845 	/* append to socket */
3846 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3847 	    0, 0, stcb->asoc.context, 0, 0, 0,
3848 	    m_notify);
3849 	if (control == NULL) {
3850 		/* no memory */
3851 		sctp_m_freem(m_notify);
3852 		return;
3853 	}
3854 	control->length = SCTP_BUF_LEN(m_notify);
3855 	control->spec_flags = M_NOTIFICATION;
3856 	/* not that we need this */
3857 	control->tail_mbuf = m_notify;
3858 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3859 	    control,
3860 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3861 }
3862 
3863 void
3864 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3865 {
3866 	struct mbuf *m_notify;
3867 	struct sctp_queued_to_read *control;
3868 	struct sctp_assoc_reset_event *strasoc;
3869 
3870 	if ((stcb == NULL) ||
3871 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3872 		/* event not enabled */
3873 		return;
3874 	}
3875 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3876 	if (m_notify == NULL)
3877 		/* no space left */
3878 		return;
3879 	SCTP_BUF_LEN(m_notify) = 0;
3880 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3881 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3882 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3883 	strasoc->assocreset_flags = flag;
3884 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3885 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3886 	strasoc->assocreset_local_tsn = sending_tsn;
3887 	strasoc->assocreset_remote_tsn = recv_tsn;
3888 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3889 	SCTP_BUF_NEXT(m_notify) = NULL;
3890 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3891 		/* no space */
3892 		sctp_m_freem(m_notify);
3893 		return;
3894 	}
3895 	/* append to socket */
3896 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3897 	    0, 0, stcb->asoc.context, 0, 0, 0,
3898 	    m_notify);
3899 	if (control == NULL) {
3900 		/* no memory */
3901 		sctp_m_freem(m_notify);
3902 		return;
3903 	}
3904 	control->length = SCTP_BUF_LEN(m_notify);
3905 	control->spec_flags = M_NOTIFICATION;
3906 	/* not that we need this */
3907 	control->tail_mbuf = m_notify;
3908 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3909 	    control,
3910 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3911 }
3912 
3913 
3914 
3915 static void
3916 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3917     int number_entries, uint16_t *list, int flag)
3918 {
3919 	struct mbuf *m_notify;
3920 	struct sctp_queued_to_read *control;
3921 	struct sctp_stream_reset_event *strreset;
3922 	int len;
3923 
3924 	if ((stcb == NULL) ||
3925 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3926 		/* event not enabled */
3927 		return;
3928 	}
3929 
3930 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3931 	if (m_notify == NULL)
3932 		/* no space left */
3933 		return;
3934 	SCTP_BUF_LEN(m_notify) = 0;
3935 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3936 	if (len > M_TRAILINGSPACE(m_notify)) {
3937 		/* never enough room */
3938 		sctp_m_freem(m_notify);
3939 		return;
3940 	}
3941 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3942 	memset(strreset, 0, len);
3943 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3944 	strreset->strreset_flags = flag;
3945 	strreset->strreset_length = len;
3946 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3947 	if (number_entries) {
3948 		int i;
3949 
3950 		for (i = 0; i < number_entries; i++) {
3951 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3952 		}
3953 	}
3954 	SCTP_BUF_LEN(m_notify) = len;
3955 	SCTP_BUF_NEXT(m_notify) = NULL;
3956 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3957 		/* no space */
3958 		sctp_m_freem(m_notify);
3959 		return;
3960 	}
3961 	/* append to socket */
3962 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3963 	    0, 0, stcb->asoc.context, 0, 0, 0,
3964 	    m_notify);
3965 	if (control == NULL) {
3966 		/* no memory */
3967 		sctp_m_freem(m_notify);
3968 		return;
3969 	}
3970 	control->length = SCTP_BUF_LEN(m_notify);
3971 	control->spec_flags = M_NOTIFICATION;
3972 	/* not that we need this */
3973 	control->tail_mbuf = m_notify;
3974 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3975 	    control,
3976 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3977 }
3978 
3979 
3980 static void
3981 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3982 {
3983 	struct mbuf *m_notify;
3984 	struct sctp_remote_error *sre;
3985 	struct sctp_queued_to_read *control;
3986 	unsigned int notif_len;
3987 	uint16_t chunk_len;
3988 
3989 	if ((stcb == NULL) ||
3990 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3991 		return;
3992 	}
3993 	if (chunk != NULL) {
3994 		chunk_len = ntohs(chunk->ch.chunk_length);
3995 		/*
3996 		 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
3997 		 * contiguous.
3998 		 */
3999 		if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) {
4000 			chunk_len = SCTP_CHUNK_BUFFER_SIZE;
4001 		}
4002 	} else {
4003 		chunk_len = 0;
4004 	}
4005 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
4006 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
4007 	if (m_notify == NULL) {
4008 		/* Retry with smaller value. */
4009 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
4010 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
4011 		if (m_notify == NULL) {
4012 			return;
4013 		}
4014 	}
4015 	SCTP_BUF_NEXT(m_notify) = NULL;
4016 	sre = mtod(m_notify, struct sctp_remote_error *);
4017 	memset(sre, 0, notif_len);
4018 	sre->sre_type = SCTP_REMOTE_ERROR;
4019 	sre->sre_flags = 0;
4020 	sre->sre_length = sizeof(struct sctp_remote_error);
4021 	sre->sre_error = error;
4022 	sre->sre_assoc_id = sctp_get_associd(stcb);
4023 	if (notif_len > sizeof(struct sctp_remote_error)) {
4024 		memcpy(sre->sre_data, chunk, chunk_len);
4025 		sre->sre_length += chunk_len;
4026 	}
4027 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
4028 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
4029 	    0, 0, stcb->asoc.context, 0, 0, 0,
4030 	    m_notify);
4031 	if (control != NULL) {
4032 		control->length = SCTP_BUF_LEN(m_notify);
4033 		control->spec_flags = M_NOTIFICATION;
4034 		/* not that we need this */
4035 		control->tail_mbuf = m_notify;
4036 		sctp_add_to_readq(stcb->sctp_ep, stcb,
4037 		    control,
4038 		    &stcb->sctp_socket->so_rcv, 1,
4039 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
4040 	} else {
4041 		sctp_m_freem(m_notify);
4042 	}
4043 }
4044 
4045 
4046 void
4047 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
4048     uint32_t error, void *data, int so_locked
4049 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4050     SCTP_UNUSED
4051 #endif
4052 )
4053 {
4054 	if ((stcb == NULL) ||
4055 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4056 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4057 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4058 		/* If the socket is gone we are out of here */
4059 		return;
4060 	}
4061 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
4062 		return;
4063 	}
4064 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4065 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4066 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
4067 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
4068 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
4069 			/* Don't report these in front states */
4070 			return;
4071 		}
4072 	}
4073 	switch (notification) {
4074 	case SCTP_NOTIFY_ASSOC_UP:
4075 		if (stcb->asoc.assoc_up_sent == 0) {
4076 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
4077 			stcb->asoc.assoc_up_sent = 1;
4078 		}
4079 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
4080 			sctp_notify_adaptation_layer(stcb);
4081 		}
4082 		if (stcb->asoc.auth_supported == 0) {
4083 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
4084 			    NULL, so_locked);
4085 		}
4086 		break;
4087 	case SCTP_NOTIFY_ASSOC_DOWN:
4088 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
4089 		break;
4090 	case SCTP_NOTIFY_INTERFACE_DOWN:
4091 		{
4092 			struct sctp_nets *net;
4093 
4094 			net = (struct sctp_nets *)data;
4095 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
4096 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4097 			break;
4098 		}
4099 	case SCTP_NOTIFY_INTERFACE_UP:
4100 		{
4101 			struct sctp_nets *net;
4102 
4103 			net = (struct sctp_nets *)data;
4104 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
4105 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4106 			break;
4107 		}
4108 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
4109 		{
4110 			struct sctp_nets *net;
4111 
4112 			net = (struct sctp_nets *)data;
4113 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
4114 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
4115 			break;
4116 		}
4117 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
4118 		sctp_notify_send_failed2(stcb, error,
4119 		    (struct sctp_stream_queue_pending *)data, so_locked);
4120 		break;
4121 	case SCTP_NOTIFY_SENT_DG_FAIL:
4122 		sctp_notify_send_failed(stcb, 1, error,
4123 		    (struct sctp_tmit_chunk *)data, so_locked);
4124 		break;
4125 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
4126 		sctp_notify_send_failed(stcb, 0, error,
4127 		    (struct sctp_tmit_chunk *)data, so_locked);
4128 		break;
4129 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
4130 		{
4131 			uint32_t val;
4132 
4133 			val = *((uint32_t *)data);
4134 
4135 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
4136 			break;
4137 		}
4138 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
4139 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4140 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4141 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
4142 		} else {
4143 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
4144 		}
4145 		break;
4146 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
4147 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
4148 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
4149 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
4150 		} else {
4151 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
4152 		}
4153 		break;
4154 	case SCTP_NOTIFY_ASSOC_RESTART:
4155 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
4156 		if (stcb->asoc.auth_supported == 0) {
4157 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
4158 			    NULL, so_locked);
4159 		}
4160 		break;
4161 	case SCTP_NOTIFY_STR_RESET_SEND:
4162 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
4163 		break;
4164 	case SCTP_NOTIFY_STR_RESET_RECV:
4165 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
4166 		break;
4167 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
4168 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
4169 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
4170 		break;
4171 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
4172 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
4173 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
4174 		break;
4175 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
4176 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
4177 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
4178 		break;
4179 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
4180 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
4181 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
4182 		break;
4183 	case SCTP_NOTIFY_ASCONF_ADD_IP:
4184 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
4185 		    error, so_locked);
4186 		break;
4187 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
4188 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
4189 		    error, so_locked);
4190 		break;
4191 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
4192 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
4193 		    error, so_locked);
4194 		break;
4195 	case SCTP_NOTIFY_PEER_SHUTDOWN:
4196 		sctp_notify_shutdown_event(stcb);
4197 		break;
4198 	case SCTP_NOTIFY_AUTH_NEW_KEY:
4199 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
4200 		    (uint16_t)(uintptr_t)data,
4201 		    so_locked);
4202 		break;
4203 	case SCTP_NOTIFY_AUTH_FREE_KEY:
4204 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
4205 		    (uint16_t)(uintptr_t)data,
4206 		    so_locked);
4207 		break;
4208 	case SCTP_NOTIFY_NO_PEER_AUTH:
4209 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
4210 		    (uint16_t)(uintptr_t)data,
4211 		    so_locked);
4212 		break;
4213 	case SCTP_NOTIFY_SENDER_DRY:
4214 		sctp_notify_sender_dry_event(stcb, so_locked);
4215 		break;
4216 	case SCTP_NOTIFY_REMOTE_ERROR:
4217 		sctp_notify_remote_error(stcb, error, data);
4218 		break;
4219 	default:
4220 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
4221 		    __func__, notification, notification);
4222 		break;
4223 	}			/* end switch */
4224 }
4225 
4226 void
4227 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
4228 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4229     SCTP_UNUSED
4230 #endif
4231 )
4232 {
4233 	struct sctp_association *asoc;
4234 	struct sctp_stream_out *outs;
4235 	struct sctp_tmit_chunk *chk, *nchk;
4236 	struct sctp_stream_queue_pending *sp, *nsp;
4237 	int i;
4238 
4239 	if (stcb == NULL) {
4240 		return;
4241 	}
4242 	asoc = &stcb->asoc;
4243 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4244 		/* already being freed */
4245 		return;
4246 	}
4247 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4248 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4249 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
4250 		return;
4251 	}
4252 	/* now through all the gunk freeing chunks */
4253 	if (holds_lock == 0) {
4254 		SCTP_TCB_SEND_LOCK(stcb);
4255 	}
4256 	/* sent queue SHOULD be empty */
4257 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
4258 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
4259 		asoc->sent_queue_cnt--;
4260 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
4261 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
4262 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
4263 #ifdef INVARIANTS
4264 			} else {
4265 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
4266 #endif
4267 			}
4268 		}
4269 		if (chk->data != NULL) {
4270 			sctp_free_bufspace(stcb, asoc, chk, 1);
4271 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
4272 			    error, chk, so_locked);
4273 			if (chk->data) {
4274 				sctp_m_freem(chk->data);
4275 				chk->data = NULL;
4276 			}
4277 		}
4278 		sctp_free_a_chunk(stcb, chk, so_locked);
4279 		/* sa_ignore FREED_MEMORY */
4280 	}
4281 	/* pending send queue SHOULD be empty */
4282 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
4283 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
4284 		asoc->send_queue_cnt--;
4285 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
4286 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
4287 #ifdef INVARIANTS
4288 		} else {
4289 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
4290 #endif
4291 		}
4292 		if (chk->data != NULL) {
4293 			sctp_free_bufspace(stcb, asoc, chk, 1);
4294 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
4295 			    error, chk, so_locked);
4296 			if (chk->data) {
4297 				sctp_m_freem(chk->data);
4298 				chk->data = NULL;
4299 			}
4300 		}
4301 		sctp_free_a_chunk(stcb, chk, so_locked);
4302 		/* sa_ignore FREED_MEMORY */
4303 	}
4304 	for (i = 0; i < asoc->streamoutcnt; i++) {
4305 		/* For each stream */
4306 		outs = &asoc->strmout[i];
4307 		/* clean up any sends there */
4308 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
4309 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
4310 			TAILQ_REMOVE(&outs->outqueue, sp, next);
4311 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
4312 			sctp_free_spbufspace(stcb, asoc, sp);
4313 			if (sp->data) {
4314 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
4315 				    error, (void *)sp, so_locked);
4316 				if (sp->data) {
4317 					sctp_m_freem(sp->data);
4318 					sp->data = NULL;
4319 					sp->tail_mbuf = NULL;
4320 					sp->length = 0;
4321 				}
4322 			}
4323 			if (sp->net) {
4324 				sctp_free_remote_addr(sp->net);
4325 				sp->net = NULL;
4326 			}
4327 			/* Free the chunk */
4328 			sctp_free_a_strmoq(stcb, sp, so_locked);
4329 			/* sa_ignore FREED_MEMORY */
4330 		}
4331 	}
4332 
4333 	if (holds_lock == 0) {
4334 		SCTP_TCB_SEND_UNLOCK(stcb);
4335 	}
4336 }
4337 
4338 void
4339 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
4340     struct sctp_abort_chunk *abort, int so_locked
4341 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4342     SCTP_UNUSED
4343 #endif
4344 )
4345 {
4346 	if (stcb == NULL) {
4347 		return;
4348 	}
4349 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
4350 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4351 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
4352 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
4353 	}
4354 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4355 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4356 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
4357 		return;
4358 	}
4359 	/* Tell them we lost the asoc */
4360 	sctp_report_all_outbound(stcb, error, 0, so_locked);
4361 	if (from_peer) {
4362 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
4363 	} else {
4364 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
4365 	}
4366 }
4367 
4368 void
4369 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4370     struct mbuf *m, int iphlen,
4371     struct sockaddr *src, struct sockaddr *dst,
4372     struct sctphdr *sh, struct mbuf *op_err,
4373     uint8_t mflowtype, uint32_t mflowid,
4374     uint32_t vrf_id, uint16_t port)
4375 {
4376 	uint32_t vtag;
4377 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4378 	struct socket *so;
4379 #endif
4380 
4381 	vtag = 0;
4382 	if (stcb != NULL) {
4383 		vtag = stcb->asoc.peer_vtag;
4384 		vrf_id = stcb->asoc.vrf_id;
4385 	}
4386 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4387 	    mflowtype, mflowid, inp->fibnum,
4388 	    vrf_id, port);
4389 	if (stcb != NULL) {
4390 		/* We have a TCB to abort, send notification too */
4391 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4392 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4393 		/* Ok, now lets free it */
4394 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4395 		so = SCTP_INP_SO(inp);
4396 		atomic_add_int(&stcb->asoc.refcnt, 1);
4397 		SCTP_TCB_UNLOCK(stcb);
4398 		SCTP_SOCKET_LOCK(so, 1);
4399 		SCTP_TCB_LOCK(stcb);
4400 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4401 #endif
4402 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4403 		if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4404 		    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4405 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4406 		}
4407 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4408 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4409 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4410 		SCTP_SOCKET_UNLOCK(so, 1);
4411 #endif
4412 	}
4413 }
4414 #ifdef SCTP_ASOCLOG_OF_TSNS
4415 void
4416 sctp_print_out_track_log(struct sctp_tcb *stcb)
4417 {
4418 #ifdef NOSIY_PRINTS
4419 	int i;
4420 
4421 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4422 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4423 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4424 		SCTP_PRINTF("None rcvd\n");
4425 		goto none_in;
4426 	}
4427 	if (stcb->asoc.tsn_in_wrapped) {
4428 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4429 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4430 			    stcb->asoc.in_tsnlog[i].tsn,
4431 			    stcb->asoc.in_tsnlog[i].strm,
4432 			    stcb->asoc.in_tsnlog[i].seq,
4433 			    stcb->asoc.in_tsnlog[i].flgs,
4434 			    stcb->asoc.in_tsnlog[i].sz);
4435 		}
4436 	}
4437 	if (stcb->asoc.tsn_in_at) {
4438 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4439 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4440 			    stcb->asoc.in_tsnlog[i].tsn,
4441 			    stcb->asoc.in_tsnlog[i].strm,
4442 			    stcb->asoc.in_tsnlog[i].seq,
4443 			    stcb->asoc.in_tsnlog[i].flgs,
4444 			    stcb->asoc.in_tsnlog[i].sz);
4445 		}
4446 	}
4447 none_in:
4448 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4449 	if ((stcb->asoc.tsn_out_at == 0) &&
4450 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4451 		SCTP_PRINTF("None sent\n");
4452 	}
4453 	if (stcb->asoc.tsn_out_wrapped) {
4454 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4455 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4456 			    stcb->asoc.out_tsnlog[i].tsn,
4457 			    stcb->asoc.out_tsnlog[i].strm,
4458 			    stcb->asoc.out_tsnlog[i].seq,
4459 			    stcb->asoc.out_tsnlog[i].flgs,
4460 			    stcb->asoc.out_tsnlog[i].sz);
4461 		}
4462 	}
4463 	if (stcb->asoc.tsn_out_at) {
4464 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4465 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4466 			    stcb->asoc.out_tsnlog[i].tsn,
4467 			    stcb->asoc.out_tsnlog[i].strm,
4468 			    stcb->asoc.out_tsnlog[i].seq,
4469 			    stcb->asoc.out_tsnlog[i].flgs,
4470 			    stcb->asoc.out_tsnlog[i].sz);
4471 		}
4472 	}
4473 #endif
4474 }
4475 #endif
4476 
4477 void
4478 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4479     struct mbuf *op_err,
4480     int so_locked
4481 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4482     SCTP_UNUSED
4483 #endif
4484 )
4485 {
4486 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4487 	struct socket *so;
4488 #endif
4489 
4490 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4491 	so = SCTP_INP_SO(inp);
4492 #endif
4493 	if (stcb == NULL) {
4494 		/* Got to have a TCB */
4495 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4496 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4497 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4498 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4499 			}
4500 		}
4501 		return;
4502 	} else {
4503 		SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_WAS_ABORTED);
4504 	}
4505 	/* notify the peer */
4506 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4507 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4508 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4509 	    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4510 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4511 	}
4512 	/* notify the ulp */
4513 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4514 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4515 	}
4516 	/* now free the asoc */
4517 #ifdef SCTP_ASOCLOG_OF_TSNS
4518 	sctp_print_out_track_log(stcb);
4519 #endif
4520 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4521 	if (!so_locked) {
4522 		atomic_add_int(&stcb->asoc.refcnt, 1);
4523 		SCTP_TCB_UNLOCK(stcb);
4524 		SCTP_SOCKET_LOCK(so, 1);
4525 		SCTP_TCB_LOCK(stcb);
4526 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4527 	}
4528 #endif
4529 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4530 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4531 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4532 	if (!so_locked) {
4533 		SCTP_SOCKET_UNLOCK(so, 1);
4534 	}
4535 #endif
4536 }
4537 
4538 void
4539 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4540     struct sockaddr *src, struct sockaddr *dst,
4541     struct sctphdr *sh, struct sctp_inpcb *inp,
4542     struct mbuf *cause,
4543     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4544     uint32_t vrf_id, uint16_t port)
4545 {
4546 	struct sctp_chunkhdr *ch, chunk_buf;
4547 	unsigned int chk_length;
4548 	int contains_init_chunk;
4549 
4550 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4551 	/* Generate a TO address for future reference */
4552 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4553 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4554 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4555 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4556 		}
4557 	}
4558 	contains_init_chunk = 0;
4559 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4560 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4561 	while (ch != NULL) {
4562 		chk_length = ntohs(ch->chunk_length);
4563 		if (chk_length < sizeof(*ch)) {
4564 			/* break to abort land */
4565 			break;
4566 		}
4567 		switch (ch->chunk_type) {
4568 		case SCTP_INIT:
4569 			contains_init_chunk = 1;
4570 			break;
4571 		case SCTP_PACKET_DROPPED:
4572 			/* we don't respond to pkt-dropped */
4573 			return;
4574 		case SCTP_ABORT_ASSOCIATION:
4575 			/* we don't respond with an ABORT to an ABORT */
4576 			return;
4577 		case SCTP_SHUTDOWN_COMPLETE:
4578 			/*
4579 			 * we ignore it since we are not waiting for it and
4580 			 * peer is gone
4581 			 */
4582 			return;
4583 		case SCTP_SHUTDOWN_ACK:
4584 			sctp_send_shutdown_complete2(src, dst, sh,
4585 			    mflowtype, mflowid, fibnum,
4586 			    vrf_id, port);
4587 			return;
4588 		default:
4589 			break;
4590 		}
4591 		offset += SCTP_SIZE32(chk_length);
4592 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4593 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4594 	}
4595 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4596 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4597 	    (contains_init_chunk == 0))) {
4598 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4599 		    mflowtype, mflowid, fibnum,
4600 		    vrf_id, port);
4601 	}
4602 }
4603 
4604 /*
4605  * check the inbound datagram to make sure there is not an abort inside it,
4606  * if there is return 1, else return 0.
4607  */
4608 int
4609 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4610 {
4611 	struct sctp_chunkhdr *ch;
4612 	struct sctp_init_chunk *init_chk, chunk_buf;
4613 	int offset;
4614 	unsigned int chk_length;
4615 
4616 	offset = iphlen + sizeof(struct sctphdr);
4617 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4618 	    (uint8_t *)&chunk_buf);
4619 	while (ch != NULL) {
4620 		chk_length = ntohs(ch->chunk_length);
4621 		if (chk_length < sizeof(*ch)) {
4622 			/* packet is probably corrupt */
4623 			break;
4624 		}
4625 		/* we seem to be ok, is it an abort? */
4626 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4627 			/* yep, tell them */
4628 			return (1);
4629 		}
4630 		if (ch->chunk_type == SCTP_INITIATION) {
4631 			/* need to update the Vtag */
4632 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4633 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4634 			if (init_chk != NULL) {
4635 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4636 			}
4637 		}
4638 		/* Nope, move to the next chunk */
4639 		offset += SCTP_SIZE32(chk_length);
4640 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4641 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4642 	}
4643 	return (0);
4644 }
4645 
4646 /*
4647  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4648  * set (i.e. it's 0) so, create this function to compare link local scopes
4649  */
4650 #ifdef INET6
4651 uint32_t
4652 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4653 {
4654 	struct sockaddr_in6 a, b;
4655 
4656 	/* save copies */
4657 	a = *addr1;
4658 	b = *addr2;
4659 
4660 	if (a.sin6_scope_id == 0)
4661 		if (sa6_recoverscope(&a)) {
4662 			/* can't get scope, so can't match */
4663 			return (0);
4664 		}
4665 	if (b.sin6_scope_id == 0)
4666 		if (sa6_recoverscope(&b)) {
4667 			/* can't get scope, so can't match */
4668 			return (0);
4669 		}
4670 	if (a.sin6_scope_id != b.sin6_scope_id)
4671 		return (0);
4672 
4673 	return (1);
4674 }
4675 
4676 /*
4677  * returns a sockaddr_in6 with embedded scope recovered and removed
4678  */
4679 struct sockaddr_in6 *
4680 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4681 {
4682 	/* check and strip embedded scope junk */
4683 	if (addr->sin6_family == AF_INET6) {
4684 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4685 			if (addr->sin6_scope_id == 0) {
4686 				*store = *addr;
4687 				if (!sa6_recoverscope(store)) {
4688 					/* use the recovered scope */
4689 					addr = store;
4690 				}
4691 			} else {
4692 				/* else, return the original "to" addr */
4693 				in6_clearscope(&addr->sin6_addr);
4694 			}
4695 		}
4696 	}
4697 	return (addr);
4698 }
4699 #endif
4700 
4701 /*
4702  * are the two addresses the same?  currently a "scopeless" check returns: 1
4703  * if same, 0 if not
4704  */
4705 int
4706 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4707 {
4708 
4709 	/* must be valid */
4710 	if (sa1 == NULL || sa2 == NULL)
4711 		return (0);
4712 
4713 	/* must be the same family */
4714 	if (sa1->sa_family != sa2->sa_family)
4715 		return (0);
4716 
4717 	switch (sa1->sa_family) {
4718 #ifdef INET6
4719 	case AF_INET6:
4720 		{
4721 			/* IPv6 addresses */
4722 			struct sockaddr_in6 *sin6_1, *sin6_2;
4723 
4724 			sin6_1 = (struct sockaddr_in6 *)sa1;
4725 			sin6_2 = (struct sockaddr_in6 *)sa2;
4726 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4727 			    sin6_2));
4728 		}
4729 #endif
4730 #ifdef INET
4731 	case AF_INET:
4732 		{
4733 			/* IPv4 addresses */
4734 			struct sockaddr_in *sin_1, *sin_2;
4735 
4736 			sin_1 = (struct sockaddr_in *)sa1;
4737 			sin_2 = (struct sockaddr_in *)sa2;
4738 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4739 		}
4740 #endif
4741 	default:
4742 		/* we don't do these... */
4743 		return (0);
4744 	}
4745 }
4746 
4747 void
4748 sctp_print_address(struct sockaddr *sa)
4749 {
4750 #ifdef INET6
4751 	char ip6buf[INET6_ADDRSTRLEN];
4752 #endif
4753 
4754 	switch (sa->sa_family) {
4755 #ifdef INET6
4756 	case AF_INET6:
4757 		{
4758 			struct sockaddr_in6 *sin6;
4759 
4760 			sin6 = (struct sockaddr_in6 *)sa;
4761 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4762 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4763 			    ntohs(sin6->sin6_port),
4764 			    sin6->sin6_scope_id);
4765 			break;
4766 		}
4767 #endif
4768 #ifdef INET
4769 	case AF_INET:
4770 		{
4771 			struct sockaddr_in *sin;
4772 			unsigned char *p;
4773 
4774 			sin = (struct sockaddr_in *)sa;
4775 			p = (unsigned char *)&sin->sin_addr;
4776 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4777 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4778 			break;
4779 		}
4780 #endif
4781 	default:
4782 		SCTP_PRINTF("?\n");
4783 		break;
4784 	}
4785 }
4786 
4787 void
4788 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4789     struct sctp_inpcb *new_inp,
4790     struct sctp_tcb *stcb,
4791     int waitflags)
4792 {
4793 	/*
4794 	 * go through our old INP and pull off any control structures that
4795 	 * belong to stcb and move then to the new inp.
4796 	 */
4797 	struct socket *old_so, *new_so;
4798 	struct sctp_queued_to_read *control, *nctl;
4799 	struct sctp_readhead tmp_queue;
4800 	struct mbuf *m;
4801 	int error = 0;
4802 
4803 	old_so = old_inp->sctp_socket;
4804 	new_so = new_inp->sctp_socket;
4805 	TAILQ_INIT(&tmp_queue);
4806 	error = sblock(&old_so->so_rcv, waitflags);
4807 	if (error) {
4808 		/*
4809 		 * Gak, can't get sblock, we have a problem. data will be
4810 		 * left stranded.. and we don't dare look at it since the
4811 		 * other thread may be reading something. Oh well, its a
4812 		 * screwed up app that does a peeloff OR a accept while
4813 		 * reading from the main socket... actually its only the
4814 		 * peeloff() case, since I think read will fail on a
4815 		 * listening socket..
4816 		 */
4817 		return;
4818 	}
4819 	/* lock the socket buffers */
4820 	SCTP_INP_READ_LOCK(old_inp);
4821 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4822 		/* Pull off all for out target stcb */
4823 		if (control->stcb == stcb) {
4824 			/* remove it we want it */
4825 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4826 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4827 			m = control->data;
4828 			while (m) {
4829 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4830 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4831 				}
4832 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4833 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4834 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4835 				}
4836 				m = SCTP_BUF_NEXT(m);
4837 			}
4838 		}
4839 	}
4840 	SCTP_INP_READ_UNLOCK(old_inp);
4841 	/* Remove the sb-lock on the old socket */
4842 
4843 	sbunlock(&old_so->so_rcv);
4844 	/* Now we move them over to the new socket buffer */
4845 	SCTP_INP_READ_LOCK(new_inp);
4846 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4847 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4848 		m = control->data;
4849 		while (m) {
4850 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4851 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4852 			}
4853 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4854 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4855 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4856 			}
4857 			m = SCTP_BUF_NEXT(m);
4858 		}
4859 	}
4860 	SCTP_INP_READ_UNLOCK(new_inp);
4861 }
4862 
4863 void
4864 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4865     struct sctp_tcb *stcb,
4866     int so_locked
4867 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4868     SCTP_UNUSED
4869 #endif
4870 )
4871 {
4872 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4873 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4874 		struct socket *so;
4875 
4876 		so = SCTP_INP_SO(inp);
4877 		if (!so_locked) {
4878 			if (stcb) {
4879 				atomic_add_int(&stcb->asoc.refcnt, 1);
4880 				SCTP_TCB_UNLOCK(stcb);
4881 			}
4882 			SCTP_SOCKET_LOCK(so, 1);
4883 			if (stcb) {
4884 				SCTP_TCB_LOCK(stcb);
4885 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4886 			}
4887 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4888 				SCTP_SOCKET_UNLOCK(so, 1);
4889 				return;
4890 			}
4891 		}
4892 #endif
4893 		sctp_sorwakeup(inp, inp->sctp_socket);
4894 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4895 		if (!so_locked) {
4896 			SCTP_SOCKET_UNLOCK(so, 1);
4897 		}
4898 #endif
4899 	}
4900 }
4901 
4902 void
4903 sctp_add_to_readq(struct sctp_inpcb *inp,
4904     struct sctp_tcb *stcb,
4905     struct sctp_queued_to_read *control,
4906     struct sockbuf *sb,
4907     int end,
4908     int inp_read_lock_held,
4909     int so_locked
4910 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4911     SCTP_UNUSED
4912 #endif
4913 )
4914 {
4915 	/*
4916 	 * Here we must place the control on the end of the socket read
4917 	 * queue AND increment sb_cc so that select will work properly on
4918 	 * read.
4919 	 */
4920 	struct mbuf *m, *prev = NULL;
4921 
4922 	if (inp == NULL) {
4923 		/* Gak, TSNH!! */
4924 #ifdef INVARIANTS
4925 		panic("Gak, inp NULL on add_to_readq");
4926 #endif
4927 		return;
4928 	}
4929 	if (inp_read_lock_held == 0)
4930 		SCTP_INP_READ_LOCK(inp);
4931 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4932 		if (!control->on_strm_q) {
4933 			sctp_free_remote_addr(control->whoFrom);
4934 			if (control->data) {
4935 				sctp_m_freem(control->data);
4936 				control->data = NULL;
4937 			}
4938 			sctp_free_a_readq(stcb, control);
4939 		}
4940 		if (inp_read_lock_held == 0)
4941 			SCTP_INP_READ_UNLOCK(inp);
4942 		return;
4943 	}
4944 	if (!(control->spec_flags & M_NOTIFICATION)) {
4945 		atomic_add_int(&inp->total_recvs, 1);
4946 		if (!control->do_not_ref_stcb) {
4947 			atomic_add_int(&stcb->total_recvs, 1);
4948 		}
4949 	}
4950 	m = control->data;
4951 	control->held_length = 0;
4952 	control->length = 0;
4953 	while (m) {
4954 		if (SCTP_BUF_LEN(m) == 0) {
4955 			/* Skip mbufs with NO length */
4956 			if (prev == NULL) {
4957 				/* First one */
4958 				control->data = sctp_m_free(m);
4959 				m = control->data;
4960 			} else {
4961 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4962 				m = SCTP_BUF_NEXT(prev);
4963 			}
4964 			if (m == NULL) {
4965 				control->tail_mbuf = prev;
4966 			}
4967 			continue;
4968 		}
4969 		prev = m;
4970 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4971 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4972 		}
4973 		sctp_sballoc(stcb, sb, m);
4974 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4975 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4976 		}
4977 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4978 		m = SCTP_BUF_NEXT(m);
4979 	}
4980 	if (prev != NULL) {
4981 		control->tail_mbuf = prev;
4982 	} else {
4983 		/* Everything got collapsed out?? */
4984 		if (!control->on_strm_q) {
4985 			sctp_free_remote_addr(control->whoFrom);
4986 			sctp_free_a_readq(stcb, control);
4987 		}
4988 		if (inp_read_lock_held == 0)
4989 			SCTP_INP_READ_UNLOCK(inp);
4990 		return;
4991 	}
4992 	if (end) {
4993 		control->end_added = 1;
4994 	}
4995 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4996 	control->on_read_q = 1;
4997 	if (inp_read_lock_held == 0)
4998 		SCTP_INP_READ_UNLOCK(inp);
4999 	if (inp && inp->sctp_socket) {
5000 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
5001 	}
5002 }
5003 
5004 /*************HOLD THIS COMMENT FOR PATCH FILE OF
5005  *************ALTERNATE ROUTING CODE
5006  */
5007 
5008 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
5009  *************ALTERNATE ROUTING CODE
5010  */
5011 
5012 struct mbuf *
5013 sctp_generate_cause(uint16_t code, char *info)
5014 {
5015 	struct mbuf *m;
5016 	struct sctp_gen_error_cause *cause;
5017 	size_t info_len;
5018 	uint16_t len;
5019 
5020 	if ((code == 0) || (info == NULL)) {
5021 		return (NULL);
5022 	}
5023 	info_len = strlen(info);
5024 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
5025 		return (NULL);
5026 	}
5027 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
5028 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5029 	if (m != NULL) {
5030 		SCTP_BUF_LEN(m) = len;
5031 		cause = mtod(m, struct sctp_gen_error_cause *);
5032 		cause->code = htons(code);
5033 		cause->length = htons(len);
5034 		memcpy(cause->info, info, info_len);
5035 	}
5036 	return (m);
5037 }
5038 
5039 struct mbuf *
5040 sctp_generate_no_user_data_cause(uint32_t tsn)
5041 {
5042 	struct mbuf *m;
5043 	struct sctp_error_no_user_data *no_user_data_cause;
5044 	uint16_t len;
5045 
5046 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
5047 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
5048 	if (m != NULL) {
5049 		SCTP_BUF_LEN(m) = len;
5050 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
5051 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
5052 		no_user_data_cause->cause.length = htons(len);
5053 		no_user_data_cause->tsn = htonl(tsn);
5054 	}
5055 	return (m);
5056 }
5057 
5058 #ifdef SCTP_MBCNT_LOGGING
5059 void
5060 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
5061     struct sctp_tmit_chunk *tp1, int chk_cnt)
5062 {
5063 	if (tp1->data == NULL) {
5064 		return;
5065 	}
5066 	asoc->chunks_on_out_queue -= chk_cnt;
5067 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
5068 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
5069 		    asoc->total_output_queue_size,
5070 		    tp1->book_size,
5071 		    0,
5072 		    tp1->mbcnt);
5073 	}
5074 	if (asoc->total_output_queue_size >= tp1->book_size) {
5075 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
5076 	} else {
5077 		asoc->total_output_queue_size = 0;
5078 	}
5079 
5080 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
5081 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
5082 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
5083 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
5084 		} else {
5085 			stcb->sctp_socket->so_snd.sb_cc = 0;
5086 
5087 		}
5088 	}
5089 }
5090 
5091 #endif
5092 
5093 int
5094 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
5095     uint8_t sent, int so_locked
5096 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
5097     SCTP_UNUSED
5098 #endif
5099 )
5100 {
5101 	struct sctp_stream_out *strq;
5102 	struct sctp_tmit_chunk *chk = NULL, *tp2;
5103 	struct sctp_stream_queue_pending *sp;
5104 	uint32_t mid;
5105 	uint16_t sid;
5106 	uint8_t foundeom = 0;
5107 	int ret_sz = 0;
5108 	int notdone;
5109 	int do_wakeup_routine = 0;
5110 
5111 	sid = tp1->rec.data.sid;
5112 	mid = tp1->rec.data.mid;
5113 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
5114 		stcb->asoc.abandoned_sent[0]++;
5115 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5116 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
5117 #if defined(SCTP_DETAILED_STR_STATS)
5118 		stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
5119 #endif
5120 	} else {
5121 		stcb->asoc.abandoned_unsent[0]++;
5122 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5123 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
5124 #if defined(SCTP_DETAILED_STR_STATS)
5125 		stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
5126 #endif
5127 	}
5128 	do {
5129 		ret_sz += tp1->book_size;
5130 		if (tp1->data != NULL) {
5131 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5132 				sctp_flight_size_decrease(tp1);
5133 				sctp_total_flight_decrease(stcb, tp1);
5134 			}
5135 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5136 			stcb->asoc.peers_rwnd += tp1->send_size;
5137 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
5138 			if (sent) {
5139 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5140 			} else {
5141 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5142 			}
5143 			if (tp1->data) {
5144 				sctp_m_freem(tp1->data);
5145 				tp1->data = NULL;
5146 			}
5147 			do_wakeup_routine = 1;
5148 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
5149 				stcb->asoc.sent_queue_cnt_removeable--;
5150 			}
5151 		}
5152 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
5153 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
5154 		    SCTP_DATA_NOT_FRAG) {
5155 			/* not frag'ed we ae done   */
5156 			notdone = 0;
5157 			foundeom = 1;
5158 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5159 			/* end of frag, we are done */
5160 			notdone = 0;
5161 			foundeom = 1;
5162 		} else {
5163 			/*
5164 			 * Its a begin or middle piece, we must mark all of
5165 			 * it
5166 			 */
5167 			notdone = 1;
5168 			tp1 = TAILQ_NEXT(tp1, sctp_next);
5169 		}
5170 	} while (tp1 && notdone);
5171 	if (foundeom == 0) {
5172 		/*
5173 		 * The multi-part message was scattered across the send and
5174 		 * sent queue.
5175 		 */
5176 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
5177 			if ((tp1->rec.data.sid != sid) ||
5178 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
5179 				break;
5180 			}
5181 			/*
5182 			 * save to chk in case we have some on stream out
5183 			 * queue. If so and we have an un-transmitted one we
5184 			 * don't have to fudge the TSN.
5185 			 */
5186 			chk = tp1;
5187 			ret_sz += tp1->book_size;
5188 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
5189 			if (sent) {
5190 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
5191 			} else {
5192 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
5193 			}
5194 			if (tp1->data) {
5195 				sctp_m_freem(tp1->data);
5196 				tp1->data = NULL;
5197 			}
5198 			/* No flight involved here book the size to 0 */
5199 			tp1->book_size = 0;
5200 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5201 				foundeom = 1;
5202 			}
5203 			do_wakeup_routine = 1;
5204 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
5205 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
5206 			/*
5207 			 * on to the sent queue so we can wait for it to be
5208 			 * passed by.
5209 			 */
5210 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
5211 			    sctp_next);
5212 			stcb->asoc.send_queue_cnt--;
5213 			stcb->asoc.sent_queue_cnt++;
5214 		}
5215 	}
5216 	if (foundeom == 0) {
5217 		/*
5218 		 * Still no eom found. That means there is stuff left on the
5219 		 * stream out queue.. yuck.
5220 		 */
5221 		SCTP_TCB_SEND_LOCK(stcb);
5222 		strq = &stcb->asoc.strmout[sid];
5223 		sp = TAILQ_FIRST(&strq->outqueue);
5224 		if (sp != NULL) {
5225 			sp->discard_rest = 1;
5226 			/*
5227 			 * We may need to put a chunk on the queue that
5228 			 * holds the TSN that would have been sent with the
5229 			 * LAST bit.
5230 			 */
5231 			if (chk == NULL) {
5232 				/* Yep, we have to */
5233 				sctp_alloc_a_chunk(stcb, chk);
5234 				if (chk == NULL) {
5235 					/*
5236 					 * we are hosed. All we can do is
5237 					 * nothing.. which will cause an
5238 					 * abort if the peer is paying
5239 					 * attention.
5240 					 */
5241 					goto oh_well;
5242 				}
5243 				memset(chk, 0, sizeof(*chk));
5244 				chk->rec.data.rcv_flags = 0;
5245 				chk->sent = SCTP_FORWARD_TSN_SKIP;
5246 				chk->asoc = &stcb->asoc;
5247 				if (stcb->asoc.idata_supported == 0) {
5248 					if (sp->sinfo_flags & SCTP_UNORDERED) {
5249 						chk->rec.data.mid = 0;
5250 					} else {
5251 						chk->rec.data.mid = strq->next_mid_ordered;
5252 					}
5253 				} else {
5254 					if (sp->sinfo_flags & SCTP_UNORDERED) {
5255 						chk->rec.data.mid = strq->next_mid_unordered;
5256 					} else {
5257 						chk->rec.data.mid = strq->next_mid_ordered;
5258 					}
5259 				}
5260 				chk->rec.data.sid = sp->sid;
5261 				chk->rec.data.ppid = sp->ppid;
5262 				chk->rec.data.context = sp->context;
5263 				chk->flags = sp->act_flags;
5264 				chk->whoTo = NULL;
5265 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
5266 				strq->chunks_on_queues++;
5267 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
5268 				stcb->asoc.sent_queue_cnt++;
5269 				stcb->asoc.pr_sctp_cnt++;
5270 			}
5271 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
5272 			if (sp->sinfo_flags & SCTP_UNORDERED) {
5273 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
5274 			}
5275 			if (stcb->asoc.idata_supported == 0) {
5276 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
5277 					strq->next_mid_ordered++;
5278 				}
5279 			} else {
5280 				if (sp->sinfo_flags & SCTP_UNORDERED) {
5281 					strq->next_mid_unordered++;
5282 				} else {
5283 					strq->next_mid_ordered++;
5284 				}
5285 			}
5286 	oh_well:
5287 			if (sp->data) {
5288 				/*
5289 				 * Pull any data to free up the SB and allow
5290 				 * sender to "add more" while we will throw
5291 				 * away :-)
5292 				 */
5293 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
5294 				ret_sz += sp->length;
5295 				do_wakeup_routine = 1;
5296 				sp->some_taken = 1;
5297 				sctp_m_freem(sp->data);
5298 				sp->data = NULL;
5299 				sp->tail_mbuf = NULL;
5300 				sp->length = 0;
5301 			}
5302 		}
5303 		SCTP_TCB_SEND_UNLOCK(stcb);
5304 	}
5305 	if (do_wakeup_routine) {
5306 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5307 		struct socket *so;
5308 
5309 		so = SCTP_INP_SO(stcb->sctp_ep);
5310 		if (!so_locked) {
5311 			atomic_add_int(&stcb->asoc.refcnt, 1);
5312 			SCTP_TCB_UNLOCK(stcb);
5313 			SCTP_SOCKET_LOCK(so, 1);
5314 			SCTP_TCB_LOCK(stcb);
5315 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
5316 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5317 				/* assoc was freed while we were unlocked */
5318 				SCTP_SOCKET_UNLOCK(so, 1);
5319 				return (ret_sz);
5320 			}
5321 		}
5322 #endif
5323 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5324 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5325 		if (!so_locked) {
5326 			SCTP_SOCKET_UNLOCK(so, 1);
5327 		}
5328 #endif
5329 	}
5330 	return (ret_sz);
5331 }
5332 
5333 /*
5334  * checks to see if the given address, sa, is one that is currently known by
5335  * the kernel note: can't distinguish the same address on multiple interfaces
5336  * and doesn't handle multiple addresses with different zone/scope id's note:
5337  * ifa_ifwithaddr() compares the entire sockaddr struct
5338  */
5339 struct sctp_ifa *
5340 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5341     int holds_lock)
5342 {
5343 	struct sctp_laddr *laddr;
5344 
5345 	if (holds_lock == 0) {
5346 		SCTP_INP_RLOCK(inp);
5347 	}
5348 
5349 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5350 		if (laddr->ifa == NULL)
5351 			continue;
5352 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5353 			continue;
5354 #ifdef INET
5355 		if (addr->sa_family == AF_INET) {
5356 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5357 			    laddr->ifa->address.sin.sin_addr.s_addr) {
5358 				/* found him. */
5359 				if (holds_lock == 0) {
5360 					SCTP_INP_RUNLOCK(inp);
5361 				}
5362 				return (laddr->ifa);
5363 				break;
5364 			}
5365 		}
5366 #endif
5367 #ifdef INET6
5368 		if (addr->sa_family == AF_INET6) {
5369 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5370 			    &laddr->ifa->address.sin6)) {
5371 				/* found him. */
5372 				if (holds_lock == 0) {
5373 					SCTP_INP_RUNLOCK(inp);
5374 				}
5375 				return (laddr->ifa);
5376 				break;
5377 			}
5378 		}
5379 #endif
5380 	}
5381 	if (holds_lock == 0) {
5382 		SCTP_INP_RUNLOCK(inp);
5383 	}
5384 	return (NULL);
5385 }
5386 
5387 uint32_t
5388 sctp_get_ifa_hash_val(struct sockaddr *addr)
5389 {
5390 	switch (addr->sa_family) {
5391 #ifdef INET
5392 	case AF_INET:
5393 		{
5394 			struct sockaddr_in *sin;
5395 
5396 			sin = (struct sockaddr_in *)addr;
5397 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5398 		}
5399 #endif
5400 #ifdef INET6
5401 	case AF_INET6:
5402 		{
5403 			struct sockaddr_in6 *sin6;
5404 			uint32_t hash_of_addr;
5405 
5406 			sin6 = (struct sockaddr_in6 *)addr;
5407 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5408 			    sin6->sin6_addr.s6_addr32[1] +
5409 			    sin6->sin6_addr.s6_addr32[2] +
5410 			    sin6->sin6_addr.s6_addr32[3]);
5411 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5412 			return (hash_of_addr);
5413 		}
5414 #endif
5415 	default:
5416 		break;
5417 	}
5418 	return (0);
5419 }
5420 
5421 struct sctp_ifa *
5422 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5423 {
5424 	struct sctp_ifa *sctp_ifap;
5425 	struct sctp_vrf *vrf;
5426 	struct sctp_ifalist *hash_head;
5427 	uint32_t hash_of_addr;
5428 
5429 	if (holds_lock == 0)
5430 		SCTP_IPI_ADDR_RLOCK();
5431 
5432 	vrf = sctp_find_vrf(vrf_id);
5433 	if (vrf == NULL) {
5434 		if (holds_lock == 0)
5435 			SCTP_IPI_ADDR_RUNLOCK();
5436 		return (NULL);
5437 	}
5438 
5439 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5440 
5441 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5442 	if (hash_head == NULL) {
5443 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5444 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5445 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5446 		sctp_print_address(addr);
5447 		SCTP_PRINTF("No such bucket for address\n");
5448 		if (holds_lock == 0)
5449 			SCTP_IPI_ADDR_RUNLOCK();
5450 
5451 		return (NULL);
5452 	}
5453 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5454 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5455 			continue;
5456 #ifdef INET
5457 		if (addr->sa_family == AF_INET) {
5458 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5459 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5460 				/* found him. */
5461 				if (holds_lock == 0)
5462 					SCTP_IPI_ADDR_RUNLOCK();
5463 				return (sctp_ifap);
5464 				break;
5465 			}
5466 		}
5467 #endif
5468 #ifdef INET6
5469 		if (addr->sa_family == AF_INET6) {
5470 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5471 			    &sctp_ifap->address.sin6)) {
5472 				/* found him. */
5473 				if (holds_lock == 0)
5474 					SCTP_IPI_ADDR_RUNLOCK();
5475 				return (sctp_ifap);
5476 				break;
5477 			}
5478 		}
5479 #endif
5480 	}
5481 	if (holds_lock == 0)
5482 		SCTP_IPI_ADDR_RUNLOCK();
5483 	return (NULL);
5484 }
5485 
5486 static void
5487 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5488     uint32_t rwnd_req)
5489 {
5490 	/* User pulled some data, do we need a rwnd update? */
5491 	struct epoch_tracker et;
5492 	int r_unlocked = 0;
5493 	uint32_t dif, rwnd;
5494 	struct socket *so = NULL;
5495 
5496 	if (stcb == NULL)
5497 		return;
5498 
5499 	atomic_add_int(&stcb->asoc.refcnt, 1);
5500 
5501 	if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
5502 	    (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | SCTP_STATE_SHUTDOWN_RECEIVED))) {
5503 		/* Pre-check If we are freeing no update */
5504 		goto no_lock;
5505 	}
5506 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5507 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5508 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5509 		goto out;
5510 	}
5511 	so = stcb->sctp_socket;
5512 	if (so == NULL) {
5513 		goto out;
5514 	}
5515 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5516 	/* Have you have freed enough to look */
5517 	*freed_so_far = 0;
5518 	/* Yep, its worth a look and the lock overhead */
5519 
5520 	/* Figure out what the rwnd would be */
5521 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5522 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5523 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5524 	} else {
5525 		dif = 0;
5526 	}
5527 	if (dif >= rwnd_req) {
5528 		if (hold_rlock) {
5529 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5530 			r_unlocked = 1;
5531 		}
5532 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5533 			/*
5534 			 * One last check before we allow the guy possibly
5535 			 * to get in. There is a race, where the guy has not
5536 			 * reached the gate. In that case
5537 			 */
5538 			goto out;
5539 		}
5540 		SCTP_TCB_LOCK(stcb);
5541 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5542 			/* No reports here */
5543 			SCTP_TCB_UNLOCK(stcb);
5544 			goto out;
5545 		}
5546 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5547 		NET_EPOCH_ENTER(et);
5548 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5549 
5550 		sctp_chunk_output(stcb->sctp_ep, stcb,
5551 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5552 		/* make sure no timer is running */
5553 		NET_EPOCH_EXIT(et);
5554 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5555 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5556 		SCTP_TCB_UNLOCK(stcb);
5557 	} else {
5558 		/* Update how much we have pending */
5559 		stcb->freed_by_sorcv_sincelast = dif;
5560 	}
5561 out:
5562 	if (so && r_unlocked && hold_rlock) {
5563 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5564 	}
5565 
5566 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5567 no_lock:
5568 	atomic_add_int(&stcb->asoc.refcnt, -1);
5569 	return;
5570 }
5571 
5572 int
5573 sctp_sorecvmsg(struct socket *so,
5574     struct uio *uio,
5575     struct mbuf **mp,
5576     struct sockaddr *from,
5577     int fromlen,
5578     int *msg_flags,
5579     struct sctp_sndrcvinfo *sinfo,
5580     int filling_sinfo)
5581 {
5582 	/*
5583 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5584 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5585 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5586 	 * On the way out we may send out any combination of:
5587 	 * MSG_NOTIFICATION MSG_EOR
5588 	 *
5589 	 */
5590 	struct sctp_inpcb *inp = NULL;
5591 	ssize_t my_len = 0;
5592 	ssize_t cp_len = 0;
5593 	int error = 0;
5594 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5595 	struct mbuf *m = NULL;
5596 	struct sctp_tcb *stcb = NULL;
5597 	int wakeup_read_socket = 0;
5598 	int freecnt_applied = 0;
5599 	int out_flags = 0, in_flags = 0;
5600 	int block_allowed = 1;
5601 	uint32_t freed_so_far = 0;
5602 	ssize_t copied_so_far = 0;
5603 	int in_eeor_mode = 0;
5604 	int no_rcv_needed = 0;
5605 	uint32_t rwnd_req = 0;
5606 	int hold_sblock = 0;
5607 	int hold_rlock = 0;
5608 	ssize_t slen = 0;
5609 	uint32_t held_length = 0;
5610 	int sockbuf_lock = 0;
5611 
5612 	if (uio == NULL) {
5613 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5614 		return (EINVAL);
5615 	}
5616 
5617 	if (msg_flags) {
5618 		in_flags = *msg_flags;
5619 		if (in_flags & MSG_PEEK)
5620 			SCTP_STAT_INCR(sctps_read_peeks);
5621 	} else {
5622 		in_flags = 0;
5623 	}
5624 	slen = uio->uio_resid;
5625 
5626 	/* Pull in and set up our int flags */
5627 	if (in_flags & MSG_OOB) {
5628 		/* Out of band's NOT supported */
5629 		return (EOPNOTSUPP);
5630 	}
5631 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5632 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5633 		return (EINVAL);
5634 	}
5635 	if ((in_flags & (MSG_DONTWAIT
5636 	    | MSG_NBIO
5637 	    )) ||
5638 	    SCTP_SO_IS_NBIO(so)) {
5639 		block_allowed = 0;
5640 	}
5641 	/* setup the endpoint */
5642 	inp = (struct sctp_inpcb *)so->so_pcb;
5643 	if (inp == NULL) {
5644 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5645 		return (EFAULT);
5646 	}
5647 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5648 	/* Must be at least a MTU's worth */
5649 	if (rwnd_req < SCTP_MIN_RWND)
5650 		rwnd_req = SCTP_MIN_RWND;
5651 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5652 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5653 		sctp_misc_ints(SCTP_SORECV_ENTER,
5654 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5655 	}
5656 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5657 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5658 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5659 	}
5660 
5661 
5662 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5663 	if (error) {
5664 		goto release_unlocked;
5665 	}
5666 	sockbuf_lock = 1;
5667 restart:
5668 
5669 
5670 restart_nosblocks:
5671 	if (hold_sblock == 0) {
5672 		SOCKBUF_LOCK(&so->so_rcv);
5673 		hold_sblock = 1;
5674 	}
5675 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5676 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5677 		goto out;
5678 	}
5679 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5680 		if (so->so_error) {
5681 			error = so->so_error;
5682 			if ((in_flags & MSG_PEEK) == 0)
5683 				so->so_error = 0;
5684 			goto out;
5685 		} else {
5686 			if (so->so_rcv.sb_cc == 0) {
5687 				/* indicate EOF */
5688 				error = 0;
5689 				goto out;
5690 			}
5691 		}
5692 	}
5693 	if (so->so_rcv.sb_cc <= held_length) {
5694 		if (so->so_error) {
5695 			error = so->so_error;
5696 			if ((in_flags & MSG_PEEK) == 0) {
5697 				so->so_error = 0;
5698 			}
5699 			goto out;
5700 		}
5701 		if ((so->so_rcv.sb_cc == 0) &&
5702 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5703 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5704 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5705 				/*
5706 				 * For active open side clear flags for
5707 				 * re-use passive open is blocked by
5708 				 * connect.
5709 				 */
5710 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5711 					/*
5712 					 * You were aborted, passive side
5713 					 * always hits here
5714 					 */
5715 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5716 					error = ECONNRESET;
5717 				}
5718 				so->so_state &= ~(SS_ISCONNECTING |
5719 				    SS_ISDISCONNECTING |
5720 				    SS_ISCONFIRMING |
5721 				    SS_ISCONNECTED);
5722 				if (error == 0) {
5723 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5724 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5725 						error = ENOTCONN;
5726 					}
5727 				}
5728 				goto out;
5729 			}
5730 		}
5731 		if (block_allowed) {
5732 			error = sbwait(&so->so_rcv);
5733 			if (error) {
5734 				goto out;
5735 			}
5736 			held_length = 0;
5737 			goto restart_nosblocks;
5738 		} else {
5739 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5740 			error = EWOULDBLOCK;
5741 			goto out;
5742 		}
5743 	}
5744 	if (hold_sblock == 1) {
5745 		SOCKBUF_UNLOCK(&so->so_rcv);
5746 		hold_sblock = 0;
5747 	}
5748 	/* we possibly have data we can read */
5749 	/* sa_ignore FREED_MEMORY */
5750 	control = TAILQ_FIRST(&inp->read_queue);
5751 	if (control == NULL) {
5752 		/*
5753 		 * This could be happening since the appender did the
5754 		 * increment but as not yet did the tailq insert onto the
5755 		 * read_queue
5756 		 */
5757 		if (hold_rlock == 0) {
5758 			SCTP_INP_READ_LOCK(inp);
5759 		}
5760 		control = TAILQ_FIRST(&inp->read_queue);
5761 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5762 #ifdef INVARIANTS
5763 			panic("Huh, its non zero and nothing on control?");
5764 #endif
5765 			so->so_rcv.sb_cc = 0;
5766 		}
5767 		SCTP_INP_READ_UNLOCK(inp);
5768 		hold_rlock = 0;
5769 		goto restart;
5770 	}
5771 
5772 	if ((control->length == 0) &&
5773 	    (control->do_not_ref_stcb)) {
5774 		/*
5775 		 * Clean up code for freeing assoc that left behind a
5776 		 * pdapi.. maybe a peer in EEOR that just closed after
5777 		 * sending and never indicated a EOR.
5778 		 */
5779 		if (hold_rlock == 0) {
5780 			hold_rlock = 1;
5781 			SCTP_INP_READ_LOCK(inp);
5782 		}
5783 		control->held_length = 0;
5784 		if (control->data) {
5785 			/* Hmm there is data here .. fix */
5786 			struct mbuf *m_tmp;
5787 			int cnt = 0;
5788 
5789 			m_tmp = control->data;
5790 			while (m_tmp) {
5791 				cnt += SCTP_BUF_LEN(m_tmp);
5792 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5793 					control->tail_mbuf = m_tmp;
5794 					control->end_added = 1;
5795 				}
5796 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5797 			}
5798 			control->length = cnt;
5799 		} else {
5800 			/* remove it */
5801 			TAILQ_REMOVE(&inp->read_queue, control, next);
5802 			/* Add back any hiddend data */
5803 			sctp_free_remote_addr(control->whoFrom);
5804 			sctp_free_a_readq(stcb, control);
5805 		}
5806 		if (hold_rlock) {
5807 			hold_rlock = 0;
5808 			SCTP_INP_READ_UNLOCK(inp);
5809 		}
5810 		goto restart;
5811 	}
5812 	if ((control->length == 0) &&
5813 	    (control->end_added == 1)) {
5814 		/*
5815 		 * Do we also need to check for (control->pdapi_aborted ==
5816 		 * 1)?
5817 		 */
5818 		if (hold_rlock == 0) {
5819 			hold_rlock = 1;
5820 			SCTP_INP_READ_LOCK(inp);
5821 		}
5822 		TAILQ_REMOVE(&inp->read_queue, control, next);
5823 		if (control->data) {
5824 #ifdef INVARIANTS
5825 			panic("control->data not null but control->length == 0");
5826 #else
5827 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5828 			sctp_m_freem(control->data);
5829 			control->data = NULL;
5830 #endif
5831 		}
5832 		if (control->aux_data) {
5833 			sctp_m_free(control->aux_data);
5834 			control->aux_data = NULL;
5835 		}
5836 #ifdef INVARIANTS
5837 		if (control->on_strm_q) {
5838 			panic("About to free ctl:%p so:%p and its in %d",
5839 			    control, so, control->on_strm_q);
5840 		}
5841 #endif
5842 		sctp_free_remote_addr(control->whoFrom);
5843 		sctp_free_a_readq(stcb, control);
5844 		if (hold_rlock) {
5845 			hold_rlock = 0;
5846 			SCTP_INP_READ_UNLOCK(inp);
5847 		}
5848 		goto restart;
5849 	}
5850 	if (control->length == 0) {
5851 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5852 		    (filling_sinfo)) {
5853 			/* find a more suitable one then this */
5854 			ctl = TAILQ_NEXT(control, next);
5855 			while (ctl) {
5856 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5857 				    (ctl->some_taken ||
5858 				    (ctl->spec_flags & M_NOTIFICATION) ||
5859 				    ((ctl->do_not_ref_stcb == 0) &&
5860 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5861 				    ) {
5862 					/*-
5863 					 * If we have a different TCB next, and there is data
5864 					 * present. If we have already taken some (pdapi), OR we can
5865 					 * ref the tcb and no delivery as started on this stream, we
5866 					 * take it. Note we allow a notification on a different
5867 					 * assoc to be delivered..
5868 					 */
5869 					control = ctl;
5870 					goto found_one;
5871 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5872 					    (ctl->length) &&
5873 					    ((ctl->some_taken) ||
5874 					    ((ctl->do_not_ref_stcb == 0) &&
5875 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5876 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5877 					/*-
5878 					 * If we have the same tcb, and there is data present, and we
5879 					 * have the strm interleave feature present. Then if we have
5880 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5881 					 * not started a delivery for this stream, we can take it.
5882 					 * Note we do NOT allow a notificaiton on the same assoc to
5883 					 * be delivered.
5884 					 */
5885 					control = ctl;
5886 					goto found_one;
5887 				}
5888 				ctl = TAILQ_NEXT(ctl, next);
5889 			}
5890 		}
5891 		/*
5892 		 * if we reach here, not suitable replacement is available
5893 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5894 		 * into the our held count, and its time to sleep again.
5895 		 */
5896 		held_length = so->so_rcv.sb_cc;
5897 		control->held_length = so->so_rcv.sb_cc;
5898 		goto restart;
5899 	}
5900 	/* Clear the held length since there is something to read */
5901 	control->held_length = 0;
5902 found_one:
5903 	/*
5904 	 * If we reach here, control has a some data for us to read off.
5905 	 * Note that stcb COULD be NULL.
5906 	 */
5907 	if (hold_rlock == 0) {
5908 		hold_rlock = 1;
5909 		SCTP_INP_READ_LOCK(inp);
5910 	}
5911 	control->some_taken++;
5912 	stcb = control->stcb;
5913 	if (stcb) {
5914 		if ((control->do_not_ref_stcb == 0) &&
5915 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5916 			if (freecnt_applied == 0)
5917 				stcb = NULL;
5918 		} else if (control->do_not_ref_stcb == 0) {
5919 			/* you can't free it on me please */
5920 			/*
5921 			 * The lock on the socket buffer protects us so the
5922 			 * free code will stop. But since we used the
5923 			 * socketbuf lock and the sender uses the tcb_lock
5924 			 * to increment, we need to use the atomic add to
5925 			 * the refcnt
5926 			 */
5927 			if (freecnt_applied) {
5928 #ifdef INVARIANTS
5929 				panic("refcnt already incremented");
5930 #else
5931 				SCTP_PRINTF("refcnt already incremented?\n");
5932 #endif
5933 			} else {
5934 				atomic_add_int(&stcb->asoc.refcnt, 1);
5935 				freecnt_applied = 1;
5936 			}
5937 			/*
5938 			 * Setup to remember how much we have not yet told
5939 			 * the peer our rwnd has opened up. Note we grab the
5940 			 * value from the tcb from last time. Note too that
5941 			 * sack sending clears this when a sack is sent,
5942 			 * which is fine. Once we hit the rwnd_req, we then
5943 			 * will go to the sctp_user_rcvd() that will not
5944 			 * lock until it KNOWs it MUST send a WUP-SACK.
5945 			 */
5946 			freed_so_far = (uint32_t)stcb->freed_by_sorcv_sincelast;
5947 			stcb->freed_by_sorcv_sincelast = 0;
5948 		}
5949 	}
5950 	if (stcb &&
5951 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5952 	    control->do_not_ref_stcb == 0) {
5953 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5954 	}
5955 
5956 	/* First lets get off the sinfo and sockaddr info */
5957 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5958 		sinfo->sinfo_stream = control->sinfo_stream;
5959 		sinfo->sinfo_ssn = (uint16_t)control->mid;
5960 		sinfo->sinfo_flags = control->sinfo_flags;
5961 		sinfo->sinfo_ppid = control->sinfo_ppid;
5962 		sinfo->sinfo_context = control->sinfo_context;
5963 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5964 		sinfo->sinfo_tsn = control->sinfo_tsn;
5965 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5966 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5967 		nxt = TAILQ_NEXT(control, next);
5968 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5969 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5970 			struct sctp_extrcvinfo *s_extra;
5971 
5972 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5973 			if ((nxt) &&
5974 			    (nxt->length)) {
5975 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5976 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5977 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5978 				}
5979 				if (nxt->spec_flags & M_NOTIFICATION) {
5980 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5981 				}
5982 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5983 				s_extra->serinfo_next_length = nxt->length;
5984 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5985 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5986 				if (nxt->tail_mbuf != NULL) {
5987 					if (nxt->end_added) {
5988 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5989 					}
5990 				}
5991 			} else {
5992 				/*
5993 				 * we explicitly 0 this, since the memcpy
5994 				 * got some other things beyond the older
5995 				 * sinfo_ that is on the control's structure
5996 				 * :-D
5997 				 */
5998 				nxt = NULL;
5999 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6000 				s_extra->serinfo_next_aid = 0;
6001 				s_extra->serinfo_next_length = 0;
6002 				s_extra->serinfo_next_ppid = 0;
6003 				s_extra->serinfo_next_stream = 0;
6004 			}
6005 		}
6006 		/*
6007 		 * update off the real current cum-ack, if we have an stcb.
6008 		 */
6009 		if ((control->do_not_ref_stcb == 0) && stcb)
6010 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
6011 		/*
6012 		 * mask off the high bits, we keep the actual chunk bits in
6013 		 * there.
6014 		 */
6015 		sinfo->sinfo_flags &= 0x00ff;
6016 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
6017 			sinfo->sinfo_flags |= SCTP_UNORDERED;
6018 		}
6019 	}
6020 #ifdef SCTP_ASOCLOG_OF_TSNS
6021 	{
6022 		int index, newindex;
6023 		struct sctp_pcbtsn_rlog *entry;
6024 
6025 		do {
6026 			index = inp->readlog_index;
6027 			newindex = index + 1;
6028 			if (newindex >= SCTP_READ_LOG_SIZE) {
6029 				newindex = 0;
6030 			}
6031 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
6032 		entry = &inp->readlog[index];
6033 		entry->vtag = control->sinfo_assoc_id;
6034 		entry->strm = control->sinfo_stream;
6035 		entry->seq = (uint16_t)control->mid;
6036 		entry->sz = control->length;
6037 		entry->flgs = control->sinfo_flags;
6038 	}
6039 #endif
6040 	if ((fromlen > 0) && (from != NULL)) {
6041 		union sctp_sockstore store;
6042 		size_t len;
6043 
6044 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
6045 #ifdef INET6
6046 		case AF_INET6:
6047 			len = sizeof(struct sockaddr_in6);
6048 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
6049 			store.sin6.sin6_port = control->port_from;
6050 			break;
6051 #endif
6052 #ifdef INET
6053 		case AF_INET:
6054 #ifdef INET6
6055 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
6056 				len = sizeof(struct sockaddr_in6);
6057 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
6058 				    &store.sin6);
6059 				store.sin6.sin6_port = control->port_from;
6060 			} else {
6061 				len = sizeof(struct sockaddr_in);
6062 				store.sin = control->whoFrom->ro._l_addr.sin;
6063 				store.sin.sin_port = control->port_from;
6064 			}
6065 #else
6066 			len = sizeof(struct sockaddr_in);
6067 			store.sin = control->whoFrom->ro._l_addr.sin;
6068 			store.sin.sin_port = control->port_from;
6069 #endif
6070 			break;
6071 #endif
6072 		default:
6073 			len = 0;
6074 			break;
6075 		}
6076 		memcpy(from, &store, min((size_t)fromlen, len));
6077 #ifdef INET6
6078 		{
6079 			struct sockaddr_in6 lsa6, *from6;
6080 
6081 			from6 = (struct sockaddr_in6 *)from;
6082 			sctp_recover_scope_mac(from6, (&lsa6));
6083 		}
6084 #endif
6085 	}
6086 	if (hold_rlock) {
6087 		SCTP_INP_READ_UNLOCK(inp);
6088 		hold_rlock = 0;
6089 	}
6090 	if (hold_sblock) {
6091 		SOCKBUF_UNLOCK(&so->so_rcv);
6092 		hold_sblock = 0;
6093 	}
6094 	/* now copy out what data we can */
6095 	if (mp == NULL) {
6096 		/* copy out each mbuf in the chain up to length */
6097 get_more_data:
6098 		m = control->data;
6099 		while (m) {
6100 			/* Move out all we can */
6101 			cp_len = uio->uio_resid;
6102 			my_len = SCTP_BUF_LEN(m);
6103 			if (cp_len > my_len) {
6104 				/* not enough in this buf */
6105 				cp_len = my_len;
6106 			}
6107 			if (hold_rlock) {
6108 				SCTP_INP_READ_UNLOCK(inp);
6109 				hold_rlock = 0;
6110 			}
6111 			if (cp_len > 0)
6112 				error = uiomove(mtod(m, char *), (int)cp_len, uio);
6113 			/* re-read */
6114 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
6115 				goto release;
6116 			}
6117 
6118 			if ((control->do_not_ref_stcb == 0) && stcb &&
6119 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
6120 				no_rcv_needed = 1;
6121 			}
6122 			if (error) {
6123 				/* error we are out of here */
6124 				goto release;
6125 			}
6126 			SCTP_INP_READ_LOCK(inp);
6127 			hold_rlock = 1;
6128 			if (cp_len == SCTP_BUF_LEN(m)) {
6129 				if ((SCTP_BUF_NEXT(m) == NULL) &&
6130 				    (control->end_added)) {
6131 					out_flags |= MSG_EOR;
6132 					if ((control->do_not_ref_stcb == 0) &&
6133 					    (control->stcb != NULL) &&
6134 					    ((control->spec_flags & M_NOTIFICATION) == 0))
6135 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6136 				}
6137 				if (control->spec_flags & M_NOTIFICATION) {
6138 					out_flags |= MSG_NOTIFICATION;
6139 				}
6140 				/* we ate up the mbuf */
6141 				if (in_flags & MSG_PEEK) {
6142 					/* just looking */
6143 					m = SCTP_BUF_NEXT(m);
6144 					copied_so_far += cp_len;
6145 				} else {
6146 					/* dispose of the mbuf */
6147 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6148 						sctp_sblog(&so->so_rcv,
6149 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6150 					}
6151 					sctp_sbfree(control, stcb, &so->so_rcv, m);
6152 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6153 						sctp_sblog(&so->so_rcv,
6154 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6155 					}
6156 					copied_so_far += cp_len;
6157 					freed_so_far += (uint32_t)cp_len;
6158 					freed_so_far += MSIZE;
6159 					atomic_subtract_int(&control->length, cp_len);
6160 					control->data = sctp_m_free(m);
6161 					m = control->data;
6162 					/*
6163 					 * been through it all, must hold sb
6164 					 * lock ok to null tail
6165 					 */
6166 					if (control->data == NULL) {
6167 #ifdef INVARIANTS
6168 						if ((control->end_added == 0) ||
6169 						    (TAILQ_NEXT(control, next) == NULL)) {
6170 							/*
6171 							 * If the end is not
6172 							 * added, OR the
6173 							 * next is NOT null
6174 							 * we MUST have the
6175 							 * lock.
6176 							 */
6177 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
6178 								panic("Hmm we don't own the lock?");
6179 							}
6180 						}
6181 #endif
6182 						control->tail_mbuf = NULL;
6183 #ifdef INVARIANTS
6184 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
6185 							panic("end_added, nothing left and no MSG_EOR");
6186 						}
6187 #endif
6188 					}
6189 				}
6190 			} else {
6191 				/* Do we need to trim the mbuf? */
6192 				if (control->spec_flags & M_NOTIFICATION) {
6193 					out_flags |= MSG_NOTIFICATION;
6194 				}
6195 				if ((in_flags & MSG_PEEK) == 0) {
6196 					SCTP_BUF_RESV_UF(m, cp_len);
6197 					SCTP_BUF_LEN(m) -= (int)cp_len;
6198 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6199 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, (int)cp_len);
6200 					}
6201 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
6202 					if ((control->do_not_ref_stcb == 0) &&
6203 					    stcb) {
6204 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
6205 					}
6206 					copied_so_far += cp_len;
6207 					freed_so_far += (uint32_t)cp_len;
6208 					freed_so_far += MSIZE;
6209 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6210 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
6211 						    SCTP_LOG_SBRESULT, 0);
6212 					}
6213 					atomic_subtract_int(&control->length, cp_len);
6214 				} else {
6215 					copied_so_far += cp_len;
6216 				}
6217 			}
6218 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
6219 				break;
6220 			}
6221 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6222 			    (control->do_not_ref_stcb == 0) &&
6223 			    (freed_so_far >= rwnd_req)) {
6224 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6225 			}
6226 		}		/* end while(m) */
6227 		/*
6228 		 * At this point we have looked at it all and we either have
6229 		 * a MSG_EOR/or read all the user wants... <OR>
6230 		 * control->length == 0.
6231 		 */
6232 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
6233 			/* we are done with this control */
6234 			if (control->length == 0) {
6235 				if (control->data) {
6236 #ifdef INVARIANTS
6237 					panic("control->data not null at read eor?");
6238 #else
6239 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
6240 					sctp_m_freem(control->data);
6241 					control->data = NULL;
6242 #endif
6243 				}
6244 		done_with_control:
6245 				if (hold_rlock == 0) {
6246 					SCTP_INP_READ_LOCK(inp);
6247 					hold_rlock = 1;
6248 				}
6249 				TAILQ_REMOVE(&inp->read_queue, control, next);
6250 				/* Add back any hiddend data */
6251 				if (control->held_length) {
6252 					held_length = 0;
6253 					control->held_length = 0;
6254 					wakeup_read_socket = 1;
6255 				}
6256 				if (control->aux_data) {
6257 					sctp_m_free(control->aux_data);
6258 					control->aux_data = NULL;
6259 				}
6260 				no_rcv_needed = control->do_not_ref_stcb;
6261 				sctp_free_remote_addr(control->whoFrom);
6262 				control->data = NULL;
6263 #ifdef INVARIANTS
6264 				if (control->on_strm_q) {
6265 					panic("About to free ctl:%p so:%p and its in %d",
6266 					    control, so, control->on_strm_q);
6267 				}
6268 #endif
6269 				sctp_free_a_readq(stcb, control);
6270 				control = NULL;
6271 				if ((freed_so_far >= rwnd_req) &&
6272 				    (no_rcv_needed == 0))
6273 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6274 
6275 			} else {
6276 				/*
6277 				 * The user did not read all of this
6278 				 * message, turn off the returned MSG_EOR
6279 				 * since we are leaving more behind on the
6280 				 * control to read.
6281 				 */
6282 #ifdef INVARIANTS
6283 				if (control->end_added &&
6284 				    (control->data == NULL) &&
6285 				    (control->tail_mbuf == NULL)) {
6286 					panic("Gak, control->length is corrupt?");
6287 				}
6288 #endif
6289 				no_rcv_needed = control->do_not_ref_stcb;
6290 				out_flags &= ~MSG_EOR;
6291 			}
6292 		}
6293 		if (out_flags & MSG_EOR) {
6294 			goto release;
6295 		}
6296 		if ((uio->uio_resid == 0) ||
6297 		    ((in_eeor_mode) &&
6298 		    (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))) {
6299 			goto release;
6300 		}
6301 		/*
6302 		 * If I hit here the receiver wants more and this message is
6303 		 * NOT done (pd-api). So two questions. Can we block? if not
6304 		 * we are done. Did the user NOT set MSG_WAITALL?
6305 		 */
6306 		if (block_allowed == 0) {
6307 			goto release;
6308 		}
6309 		/*
6310 		 * We need to wait for more data a few things: - We don't
6311 		 * sbunlock() so we don't get someone else reading. - We
6312 		 * must be sure to account for the case where what is added
6313 		 * is NOT to our control when we wakeup.
6314 		 */
6315 
6316 		/*
6317 		 * Do we need to tell the transport a rwnd update might be
6318 		 * needed before we go to sleep?
6319 		 */
6320 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6321 		    ((freed_so_far >= rwnd_req) &&
6322 		    (control->do_not_ref_stcb == 0) &&
6323 		    (no_rcv_needed == 0))) {
6324 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6325 		}
6326 wait_some_more:
6327 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6328 			goto release;
6329 		}
6330 
6331 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6332 			goto release;
6333 
6334 		if (hold_rlock == 1) {
6335 			SCTP_INP_READ_UNLOCK(inp);
6336 			hold_rlock = 0;
6337 		}
6338 		if (hold_sblock == 0) {
6339 			SOCKBUF_LOCK(&so->so_rcv);
6340 			hold_sblock = 1;
6341 		}
6342 		if ((copied_so_far) && (control->length == 0) &&
6343 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6344 			goto release;
6345 		}
6346 		if (so->so_rcv.sb_cc <= control->held_length) {
6347 			error = sbwait(&so->so_rcv);
6348 			if (error) {
6349 				goto release;
6350 			}
6351 			control->held_length = 0;
6352 		}
6353 		if (hold_sblock) {
6354 			SOCKBUF_UNLOCK(&so->so_rcv);
6355 			hold_sblock = 0;
6356 		}
6357 		if (control->length == 0) {
6358 			/* still nothing here */
6359 			if (control->end_added == 1) {
6360 				/* he aborted, or is done i.e.did a shutdown */
6361 				out_flags |= MSG_EOR;
6362 				if (control->pdapi_aborted) {
6363 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6364 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6365 
6366 					out_flags |= MSG_TRUNC;
6367 				} else {
6368 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6369 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6370 				}
6371 				goto done_with_control;
6372 			}
6373 			if (so->so_rcv.sb_cc > held_length) {
6374 				control->held_length = so->so_rcv.sb_cc;
6375 				held_length = 0;
6376 			}
6377 			goto wait_some_more;
6378 		} else if (control->data == NULL) {
6379 			/*
6380 			 * we must re-sync since data is probably being
6381 			 * added
6382 			 */
6383 			SCTP_INP_READ_LOCK(inp);
6384 			if ((control->length > 0) && (control->data == NULL)) {
6385 				/*
6386 				 * big trouble.. we have the lock and its
6387 				 * corrupt?
6388 				 */
6389 #ifdef INVARIANTS
6390 				panic("Impossible data==NULL length !=0");
6391 #endif
6392 				out_flags |= MSG_EOR;
6393 				out_flags |= MSG_TRUNC;
6394 				control->length = 0;
6395 				SCTP_INP_READ_UNLOCK(inp);
6396 				goto done_with_control;
6397 			}
6398 			SCTP_INP_READ_UNLOCK(inp);
6399 			/* We will fall around to get more data */
6400 		}
6401 		goto get_more_data;
6402 	} else {
6403 		/*-
6404 		 * Give caller back the mbuf chain,
6405 		 * store in uio_resid the length
6406 		 */
6407 		wakeup_read_socket = 0;
6408 		if ((control->end_added == 0) ||
6409 		    (TAILQ_NEXT(control, next) == NULL)) {
6410 			/* Need to get rlock */
6411 			if (hold_rlock == 0) {
6412 				SCTP_INP_READ_LOCK(inp);
6413 				hold_rlock = 1;
6414 			}
6415 		}
6416 		if (control->end_added) {
6417 			out_flags |= MSG_EOR;
6418 			if ((control->do_not_ref_stcb == 0) &&
6419 			    (control->stcb != NULL) &&
6420 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6421 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6422 		}
6423 		if (control->spec_flags & M_NOTIFICATION) {
6424 			out_flags |= MSG_NOTIFICATION;
6425 		}
6426 		uio->uio_resid = control->length;
6427 		*mp = control->data;
6428 		m = control->data;
6429 		while (m) {
6430 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6431 				sctp_sblog(&so->so_rcv,
6432 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6433 			}
6434 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6435 			freed_so_far += (uint32_t)SCTP_BUF_LEN(m);
6436 			freed_so_far += MSIZE;
6437 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6438 				sctp_sblog(&so->so_rcv,
6439 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6440 			}
6441 			m = SCTP_BUF_NEXT(m);
6442 		}
6443 		control->data = control->tail_mbuf = NULL;
6444 		control->length = 0;
6445 		if (out_flags & MSG_EOR) {
6446 			/* Done with this control */
6447 			goto done_with_control;
6448 		}
6449 	}
6450 release:
6451 	if (hold_rlock == 1) {
6452 		SCTP_INP_READ_UNLOCK(inp);
6453 		hold_rlock = 0;
6454 	}
6455 	if (hold_sblock == 1) {
6456 		SOCKBUF_UNLOCK(&so->so_rcv);
6457 		hold_sblock = 0;
6458 	}
6459 
6460 	sbunlock(&so->so_rcv);
6461 	sockbuf_lock = 0;
6462 
6463 release_unlocked:
6464 	if (hold_sblock) {
6465 		SOCKBUF_UNLOCK(&so->so_rcv);
6466 		hold_sblock = 0;
6467 	}
6468 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6469 		if ((freed_so_far >= rwnd_req) &&
6470 		    (control && (control->do_not_ref_stcb == 0)) &&
6471 		    (no_rcv_needed == 0))
6472 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6473 	}
6474 out:
6475 	if (msg_flags) {
6476 		*msg_flags = out_flags;
6477 	}
6478 	if (((out_flags & MSG_EOR) == 0) &&
6479 	    ((in_flags & MSG_PEEK) == 0) &&
6480 	    (sinfo) &&
6481 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6482 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6483 		struct sctp_extrcvinfo *s_extra;
6484 
6485 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6486 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6487 	}
6488 	if (hold_rlock == 1) {
6489 		SCTP_INP_READ_UNLOCK(inp);
6490 	}
6491 	if (hold_sblock) {
6492 		SOCKBUF_UNLOCK(&so->so_rcv);
6493 	}
6494 	if (sockbuf_lock) {
6495 		sbunlock(&so->so_rcv);
6496 	}
6497 
6498 	if (freecnt_applied) {
6499 		/*
6500 		 * The lock on the socket buffer protects us so the free
6501 		 * code will stop. But since we used the socketbuf lock and
6502 		 * the sender uses the tcb_lock to increment, we need to use
6503 		 * the atomic add to the refcnt.
6504 		 */
6505 		if (stcb == NULL) {
6506 #ifdef INVARIANTS
6507 			panic("stcb for refcnt has gone NULL?");
6508 			goto stage_left;
6509 #else
6510 			goto stage_left;
6511 #endif
6512 		}
6513 		/* Save the value back for next time */
6514 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6515 		atomic_add_int(&stcb->asoc.refcnt, -1);
6516 	}
6517 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6518 		if (stcb) {
6519 			sctp_misc_ints(SCTP_SORECV_DONE,
6520 			    freed_so_far,
6521 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6522 			    stcb->asoc.my_rwnd,
6523 			    so->so_rcv.sb_cc);
6524 		} else {
6525 			sctp_misc_ints(SCTP_SORECV_DONE,
6526 			    freed_so_far,
6527 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6528 			    0,
6529 			    so->so_rcv.sb_cc);
6530 		}
6531 	}
6532 stage_left:
6533 	if (wakeup_read_socket) {
6534 		sctp_sorwakeup(inp, so);
6535 	}
6536 	return (error);
6537 }
6538 
6539 
6540 #ifdef SCTP_MBUF_LOGGING
6541 struct mbuf *
6542 sctp_m_free(struct mbuf *m)
6543 {
6544 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6545 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6546 	}
6547 	return (m_free(m));
6548 }
6549 
6550 void
6551 sctp_m_freem(struct mbuf *mb)
6552 {
6553 	while (mb != NULL)
6554 		mb = sctp_m_free(mb);
6555 }
6556 
6557 #endif
6558 
6559 int
6560 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6561 {
6562 	/*
6563 	 * Given a local address. For all associations that holds the
6564 	 * address, request a peer-set-primary.
6565 	 */
6566 	struct sctp_ifa *ifa;
6567 	struct sctp_laddr *wi;
6568 
6569 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6570 	if (ifa == NULL) {
6571 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6572 		return (EADDRNOTAVAIL);
6573 	}
6574 	/*
6575 	 * Now that we have the ifa we must awaken the iterator with this
6576 	 * message.
6577 	 */
6578 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6579 	if (wi == NULL) {
6580 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6581 		return (ENOMEM);
6582 	}
6583 	/* Now incr the count and int wi structure */
6584 	SCTP_INCR_LADDR_COUNT();
6585 	memset(wi, 0, sizeof(*wi));
6586 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6587 	wi->ifa = ifa;
6588 	wi->action = SCTP_SET_PRIM_ADDR;
6589 	atomic_add_int(&ifa->refcount, 1);
6590 
6591 	/* Now add it to the work queue */
6592 	SCTP_WQ_ADDR_LOCK();
6593 	/*
6594 	 * Should this really be a tailq? As it is we will process the
6595 	 * newest first :-0
6596 	 */
6597 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6598 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6599 	    (struct sctp_inpcb *)NULL,
6600 	    (struct sctp_tcb *)NULL,
6601 	    (struct sctp_nets *)NULL);
6602 	SCTP_WQ_ADDR_UNLOCK();
6603 	return (0);
6604 }
6605 
6606 
6607 int
6608 sctp_soreceive(struct socket *so,
6609     struct sockaddr **psa,
6610     struct uio *uio,
6611     struct mbuf **mp0,
6612     struct mbuf **controlp,
6613     int *flagsp)
6614 {
6615 	int error, fromlen;
6616 	uint8_t sockbuf[256];
6617 	struct sockaddr *from;
6618 	struct sctp_extrcvinfo sinfo;
6619 	int filling_sinfo = 1;
6620 	int flags;
6621 	struct sctp_inpcb *inp;
6622 
6623 	inp = (struct sctp_inpcb *)so->so_pcb;
6624 	/* pickup the assoc we are reading from */
6625 	if (inp == NULL) {
6626 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6627 		return (EINVAL);
6628 	}
6629 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6630 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6631 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6632 	    (controlp == NULL)) {
6633 		/* user does not want the sndrcv ctl */
6634 		filling_sinfo = 0;
6635 	}
6636 	if (psa) {
6637 		from = (struct sockaddr *)sockbuf;
6638 		fromlen = sizeof(sockbuf);
6639 		from->sa_len = 0;
6640 	} else {
6641 		from = NULL;
6642 		fromlen = 0;
6643 	}
6644 
6645 	if (filling_sinfo) {
6646 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6647 	}
6648 	if (flagsp != NULL) {
6649 		flags = *flagsp;
6650 	} else {
6651 		flags = 0;
6652 	}
6653 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags,
6654 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6655 	if (flagsp != NULL) {
6656 		*flagsp = flags;
6657 	}
6658 	if (controlp != NULL) {
6659 		/* copy back the sinfo in a CMSG format */
6660 		if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) {
6661 			*controlp = sctp_build_ctl_nchunk(inp,
6662 			    (struct sctp_sndrcvinfo *)&sinfo);
6663 		} else {
6664 			*controlp = NULL;
6665 		}
6666 	}
6667 	if (psa) {
6668 		/* copy back the address info */
6669 		if (from && from->sa_len) {
6670 			*psa = sodupsockaddr(from, M_NOWAIT);
6671 		} else {
6672 			*psa = NULL;
6673 		}
6674 	}
6675 	return (error);
6676 }
6677 
6678 
6679 
6680 
6681 
6682 int
6683 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6684     int totaddr, int *error)
6685 {
6686 	int added = 0;
6687 	int i;
6688 	struct sctp_inpcb *inp;
6689 	struct sockaddr *sa;
6690 	size_t incr = 0;
6691 #ifdef INET
6692 	struct sockaddr_in *sin;
6693 #endif
6694 #ifdef INET6
6695 	struct sockaddr_in6 *sin6;
6696 #endif
6697 
6698 	sa = addr;
6699 	inp = stcb->sctp_ep;
6700 	*error = 0;
6701 	for (i = 0; i < totaddr; i++) {
6702 		switch (sa->sa_family) {
6703 #ifdef INET
6704 		case AF_INET:
6705 			incr = sizeof(struct sockaddr_in);
6706 			sin = (struct sockaddr_in *)sa;
6707 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6708 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6709 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6710 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6711 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6712 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6713 				*error = EINVAL;
6714 				goto out_now;
6715 			}
6716 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6717 			    SCTP_DONOT_SETSCOPE,
6718 			    SCTP_ADDR_IS_CONFIRMED)) {
6719 				/* assoc gone no un-lock */
6720 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6721 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6722 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6723 				*error = ENOBUFS;
6724 				goto out_now;
6725 			}
6726 			added++;
6727 			break;
6728 #endif
6729 #ifdef INET6
6730 		case AF_INET6:
6731 			incr = sizeof(struct sockaddr_in6);
6732 			sin6 = (struct sockaddr_in6 *)sa;
6733 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6734 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6735 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6736 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6737 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6738 				*error = EINVAL;
6739 				goto out_now;
6740 			}
6741 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6742 			    SCTP_DONOT_SETSCOPE,
6743 			    SCTP_ADDR_IS_CONFIRMED)) {
6744 				/* assoc gone no un-lock */
6745 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6746 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6747 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6748 				*error = ENOBUFS;
6749 				goto out_now;
6750 			}
6751 			added++;
6752 			break;
6753 #endif
6754 		default:
6755 			break;
6756 		}
6757 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6758 	}
6759 out_now:
6760 	return (added);
6761 }
6762 
6763 int
6764 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6765     unsigned int totaddr,
6766     unsigned int *num_v4, unsigned int *num_v6,
6767     unsigned int limit)
6768 {
6769 	struct sockaddr *sa;
6770 	struct sctp_tcb *stcb;
6771 	unsigned int incr, at, i;
6772 
6773 	at = 0;
6774 	sa = addr;
6775 	*num_v6 = *num_v4 = 0;
6776 	/* account and validate addresses */
6777 	if (totaddr == 0) {
6778 		return (EINVAL);
6779 	}
6780 	for (i = 0; i < totaddr; i++) {
6781 		if (at + sizeof(struct sockaddr) > limit) {
6782 			return (EINVAL);
6783 		}
6784 		switch (sa->sa_family) {
6785 #ifdef INET
6786 		case AF_INET:
6787 			incr = (unsigned int)sizeof(struct sockaddr_in);
6788 			if (sa->sa_len != incr) {
6789 				return (EINVAL);
6790 			}
6791 			(*num_v4) += 1;
6792 			break;
6793 #endif
6794 #ifdef INET6
6795 		case AF_INET6:
6796 			{
6797 				struct sockaddr_in6 *sin6;
6798 
6799 				sin6 = (struct sockaddr_in6 *)sa;
6800 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6801 					/* Must be non-mapped for connectx */
6802 					return (EINVAL);
6803 				}
6804 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6805 				if (sa->sa_len != incr) {
6806 					return (EINVAL);
6807 				}
6808 				(*num_v6) += 1;
6809 				break;
6810 			}
6811 #endif
6812 		default:
6813 			return (EINVAL);
6814 		}
6815 		if ((at + incr) > limit) {
6816 			return (EINVAL);
6817 		}
6818 		SCTP_INP_INCR_REF(inp);
6819 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6820 		if (stcb != NULL) {
6821 			SCTP_TCB_UNLOCK(stcb);
6822 			return (EALREADY);
6823 		} else {
6824 			SCTP_INP_DECR_REF(inp);
6825 		}
6826 		at += incr;
6827 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6828 	}
6829 	return (0);
6830 }
6831 
6832 /*
6833  * sctp_bindx(ADD) for one address.
6834  * assumes all arguments are valid/checked by caller.
6835  */
6836 void
6837 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6838     struct sockaddr *sa, sctp_assoc_t assoc_id,
6839     uint32_t vrf_id, int *error, void *p)
6840 {
6841 	struct sockaddr *addr_touse;
6842 #if defined(INET) && defined(INET6)
6843 	struct sockaddr_in sin;
6844 #endif
6845 
6846 	/* see if we're bound all already! */
6847 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6848 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6849 		*error = EINVAL;
6850 		return;
6851 	}
6852 	addr_touse = sa;
6853 #ifdef INET6
6854 	if (sa->sa_family == AF_INET6) {
6855 #ifdef INET
6856 		struct sockaddr_in6 *sin6;
6857 
6858 #endif
6859 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6860 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6861 			*error = EINVAL;
6862 			return;
6863 		}
6864 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6865 			/* can only bind v6 on PF_INET6 sockets */
6866 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6867 			*error = EINVAL;
6868 			return;
6869 		}
6870 #ifdef INET
6871 		sin6 = (struct sockaddr_in6 *)addr_touse;
6872 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6873 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6874 			    SCTP_IPV6_V6ONLY(inp)) {
6875 				/* can't bind v4-mapped on PF_INET sockets */
6876 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6877 				*error = EINVAL;
6878 				return;
6879 			}
6880 			in6_sin6_2_sin(&sin, sin6);
6881 			addr_touse = (struct sockaddr *)&sin;
6882 		}
6883 #endif
6884 	}
6885 #endif
6886 #ifdef INET
6887 	if (sa->sa_family == AF_INET) {
6888 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6889 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6890 			*error = EINVAL;
6891 			return;
6892 		}
6893 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6894 		    SCTP_IPV6_V6ONLY(inp)) {
6895 			/* can't bind v4 on PF_INET sockets */
6896 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6897 			*error = EINVAL;
6898 			return;
6899 		}
6900 	}
6901 #endif
6902 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6903 		if (p == NULL) {
6904 			/* Can't get proc for Net/Open BSD */
6905 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6906 			*error = EINVAL;
6907 			return;
6908 		}
6909 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6910 		return;
6911 	}
6912 	/*
6913 	 * No locks required here since bind and mgmt_ep_sa all do their own
6914 	 * locking. If we do something for the FIX: below we may need to
6915 	 * lock in that case.
6916 	 */
6917 	if (assoc_id == 0) {
6918 		/* add the address */
6919 		struct sctp_inpcb *lep;
6920 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6921 
6922 		/* validate the incoming port */
6923 		if ((lsin->sin_port != 0) &&
6924 		    (lsin->sin_port != inp->sctp_lport)) {
6925 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6926 			*error = EINVAL;
6927 			return;
6928 		} else {
6929 			/* user specified 0 port, set it to existing port */
6930 			lsin->sin_port = inp->sctp_lport;
6931 		}
6932 
6933 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6934 		if (lep != NULL) {
6935 			/*
6936 			 * We must decrement the refcount since we have the
6937 			 * ep already and are binding. No remove going on
6938 			 * here.
6939 			 */
6940 			SCTP_INP_DECR_REF(lep);
6941 		}
6942 		if (lep == inp) {
6943 			/* already bound to it.. ok */
6944 			return;
6945 		} else if (lep == NULL) {
6946 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6947 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6948 			    SCTP_ADD_IP_ADDRESS,
6949 			    vrf_id, NULL);
6950 		} else {
6951 			*error = EADDRINUSE;
6952 		}
6953 		if (*error)
6954 			return;
6955 	} else {
6956 		/*
6957 		 * FIX: decide whether we allow assoc based bindx
6958 		 */
6959 	}
6960 }
6961 
6962 /*
6963  * sctp_bindx(DELETE) for one address.
6964  * assumes all arguments are valid/checked by caller.
6965  */
6966 void
6967 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6968     struct sockaddr *sa, sctp_assoc_t assoc_id,
6969     uint32_t vrf_id, int *error)
6970 {
6971 	struct sockaddr *addr_touse;
6972 #if defined(INET) && defined(INET6)
6973 	struct sockaddr_in sin;
6974 #endif
6975 
6976 	/* see if we're bound all already! */
6977 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6978 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6979 		*error = EINVAL;
6980 		return;
6981 	}
6982 	addr_touse = sa;
6983 #ifdef INET6
6984 	if (sa->sa_family == AF_INET6) {
6985 #ifdef INET
6986 		struct sockaddr_in6 *sin6;
6987 #endif
6988 
6989 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6990 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6991 			*error = EINVAL;
6992 			return;
6993 		}
6994 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6995 			/* can only bind v6 on PF_INET6 sockets */
6996 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6997 			*error = EINVAL;
6998 			return;
6999 		}
7000 #ifdef INET
7001 		sin6 = (struct sockaddr_in6 *)addr_touse;
7002 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
7003 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7004 			    SCTP_IPV6_V6ONLY(inp)) {
7005 				/* can't bind mapped-v4 on PF_INET sockets */
7006 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7007 				*error = EINVAL;
7008 				return;
7009 			}
7010 			in6_sin6_2_sin(&sin, sin6);
7011 			addr_touse = (struct sockaddr *)&sin;
7012 		}
7013 #endif
7014 	}
7015 #endif
7016 #ifdef INET
7017 	if (sa->sa_family == AF_INET) {
7018 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
7019 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7020 			*error = EINVAL;
7021 			return;
7022 		}
7023 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
7024 		    SCTP_IPV6_V6ONLY(inp)) {
7025 			/* can't bind v4 on PF_INET sockets */
7026 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
7027 			*error = EINVAL;
7028 			return;
7029 		}
7030 	}
7031 #endif
7032 	/*
7033 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
7034 	 * below is ever changed we may need to lock before calling
7035 	 * association level binding.
7036 	 */
7037 	if (assoc_id == 0) {
7038 		/* delete the address */
7039 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
7040 		    SCTP_DEL_IP_ADDRESS,
7041 		    vrf_id, NULL);
7042 	} else {
7043 		/*
7044 		 * FIX: decide whether we allow assoc based bindx
7045 		 */
7046 	}
7047 }
7048 
7049 /*
7050  * returns the valid local address count for an assoc, taking into account
7051  * all scoping rules
7052  */
7053 int
7054 sctp_local_addr_count(struct sctp_tcb *stcb)
7055 {
7056 	int loopback_scope;
7057 #if defined(INET)
7058 	int ipv4_local_scope, ipv4_addr_legal;
7059 #endif
7060 #if defined (INET6)
7061 	int local_scope, site_scope, ipv6_addr_legal;
7062 #endif
7063 	struct sctp_vrf *vrf;
7064 	struct sctp_ifn *sctp_ifn;
7065 	struct sctp_ifa *sctp_ifa;
7066 	int count = 0;
7067 
7068 	/* Turn on all the appropriate scopes */
7069 	loopback_scope = stcb->asoc.scope.loopback_scope;
7070 #if defined(INET)
7071 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
7072 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
7073 #endif
7074 #if defined(INET6)
7075 	local_scope = stcb->asoc.scope.local_scope;
7076 	site_scope = stcb->asoc.scope.site_scope;
7077 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
7078 #endif
7079 	SCTP_IPI_ADDR_RLOCK();
7080 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
7081 	if (vrf == NULL) {
7082 		/* no vrf, no addresses */
7083 		SCTP_IPI_ADDR_RUNLOCK();
7084 		return (0);
7085 	}
7086 
7087 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
7088 		/*
7089 		 * bound all case: go through all ifns on the vrf
7090 		 */
7091 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
7092 			if ((loopback_scope == 0) &&
7093 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
7094 				continue;
7095 			}
7096 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
7097 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
7098 					continue;
7099 				switch (sctp_ifa->address.sa.sa_family) {
7100 #ifdef INET
7101 				case AF_INET:
7102 					if (ipv4_addr_legal) {
7103 						struct sockaddr_in *sin;
7104 
7105 						sin = &sctp_ifa->address.sin;
7106 						if (sin->sin_addr.s_addr == 0) {
7107 							/*
7108 							 * skip unspecified
7109 							 * addrs
7110 							 */
7111 							continue;
7112 						}
7113 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
7114 						    &sin->sin_addr) != 0) {
7115 							continue;
7116 						}
7117 						if ((ipv4_local_scope == 0) &&
7118 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
7119 							continue;
7120 						}
7121 						/* count this one */
7122 						count++;
7123 					} else {
7124 						continue;
7125 					}
7126 					break;
7127 #endif
7128 #ifdef INET6
7129 				case AF_INET6:
7130 					if (ipv6_addr_legal) {
7131 						struct sockaddr_in6 *sin6;
7132 
7133 						sin6 = &sctp_ifa->address.sin6;
7134 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
7135 							continue;
7136 						}
7137 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
7138 						    &sin6->sin6_addr) != 0) {
7139 							continue;
7140 						}
7141 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
7142 							if (local_scope == 0)
7143 								continue;
7144 							if (sin6->sin6_scope_id == 0) {
7145 								if (sa6_recoverscope(sin6) != 0)
7146 									/*
7147 									 *
7148 									 * bad
7149 									 * link
7150 									 *
7151 									 * local
7152 									 *
7153 									 * address
7154 									 */
7155 									continue;
7156 							}
7157 						}
7158 						if ((site_scope == 0) &&
7159 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
7160 							continue;
7161 						}
7162 						/* count this one */
7163 						count++;
7164 					}
7165 					break;
7166 #endif
7167 				default:
7168 					/* TSNH */
7169 					break;
7170 				}
7171 			}
7172 		}
7173 	} else {
7174 		/*
7175 		 * subset bound case
7176 		 */
7177 		struct sctp_laddr *laddr;
7178 
7179 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
7180 		    sctp_nxt_addr) {
7181 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
7182 				continue;
7183 			}
7184 			/* count this one */
7185 			count++;
7186 		}
7187 	}
7188 	SCTP_IPI_ADDR_RUNLOCK();
7189 	return (count);
7190 }
7191 
7192 #if defined(SCTP_LOCAL_TRACE_BUF)
7193 
7194 void
7195 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
7196 {
7197 	uint32_t saveindex, newindex;
7198 
7199 	do {
7200 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
7201 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7202 			newindex = 1;
7203 		} else {
7204 			newindex = saveindex + 1;
7205 		}
7206 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
7207 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
7208 		saveindex = 0;
7209 	}
7210 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
7211 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
7212 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
7213 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
7214 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
7215 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
7216 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
7217 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
7218 }
7219 
7220 #endif
7221 static void
7222 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
7223     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
7224 {
7225 	struct ip *iph;
7226 #ifdef INET6
7227 	struct ip6_hdr *ip6;
7228 #endif
7229 	struct mbuf *sp, *last;
7230 	struct udphdr *uhdr;
7231 	uint16_t port;
7232 
7233 	if ((m->m_flags & M_PKTHDR) == 0) {
7234 		/* Can't handle one that is not a pkt hdr */
7235 		goto out;
7236 	}
7237 	/* Pull the src port */
7238 	iph = mtod(m, struct ip *);
7239 	uhdr = (struct udphdr *)((caddr_t)iph + off);
7240 	port = uhdr->uh_sport;
7241 	/*
7242 	 * Split out the mbuf chain. Leave the IP header in m, place the
7243 	 * rest in the sp.
7244 	 */
7245 	sp = m_split(m, off, M_NOWAIT);
7246 	if (sp == NULL) {
7247 		/* Gak, drop packet, we can't do a split */
7248 		goto out;
7249 	}
7250 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
7251 		/* Gak, packet can't have an SCTP header in it - too small */
7252 		m_freem(sp);
7253 		goto out;
7254 	}
7255 	/* Now pull up the UDP header and SCTP header together */
7256 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
7257 	if (sp == NULL) {
7258 		/* Gak pullup failed */
7259 		goto out;
7260 	}
7261 	/* Trim out the UDP header */
7262 	m_adj(sp, sizeof(struct udphdr));
7263 
7264 	/* Now reconstruct the mbuf chain */
7265 	for (last = m; last->m_next; last = last->m_next);
7266 	last->m_next = sp;
7267 	m->m_pkthdr.len += sp->m_pkthdr.len;
7268 	/*
7269 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
7270 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
7271 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
7272 	 * SCTP checksum. Therefore, clear the bit.
7273 	 */
7274 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
7275 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
7276 	    m->m_pkthdr.len,
7277 	    if_name(m->m_pkthdr.rcvif),
7278 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
7279 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
7280 	iph = mtod(m, struct ip *);
7281 	switch (iph->ip_v) {
7282 #ifdef INET
7283 	case IPVERSION:
7284 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
7285 		sctp_input_with_port(m, off, port);
7286 		break;
7287 #endif
7288 #ifdef INET6
7289 	case IPV6_VERSION >> 4:
7290 		ip6 = mtod(m, struct ip6_hdr *);
7291 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
7292 		sctp6_input_with_port(&m, &off, port);
7293 		break;
7294 #endif
7295 	default:
7296 		goto out;
7297 		break;
7298 	}
7299 	return;
7300 out:
7301 	m_freem(m);
7302 }
7303 
7304 #ifdef INET
7305 static void
7306 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
7307 {
7308 	struct ip *outer_ip, *inner_ip;
7309 	struct sctphdr *sh;
7310 	struct icmp *icmp;
7311 	struct udphdr *udp;
7312 	struct sctp_inpcb *inp;
7313 	struct sctp_tcb *stcb;
7314 	struct sctp_nets *net;
7315 	struct sctp_init_chunk *ch;
7316 	struct sockaddr_in src, dst;
7317 	uint8_t type, code;
7318 
7319 	inner_ip = (struct ip *)vip;
7320 	icmp = (struct icmp *)((caddr_t)inner_ip -
7321 	    (sizeof(struct icmp) - sizeof(struct ip)));
7322 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
7323 	if (ntohs(outer_ip->ip_len) <
7324 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
7325 		return;
7326 	}
7327 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
7328 	sh = (struct sctphdr *)(udp + 1);
7329 	memset(&src, 0, sizeof(struct sockaddr_in));
7330 	src.sin_family = AF_INET;
7331 	src.sin_len = sizeof(struct sockaddr_in);
7332 	src.sin_port = sh->src_port;
7333 	src.sin_addr = inner_ip->ip_src;
7334 	memset(&dst, 0, sizeof(struct sockaddr_in));
7335 	dst.sin_family = AF_INET;
7336 	dst.sin_len = sizeof(struct sockaddr_in);
7337 	dst.sin_port = sh->dest_port;
7338 	dst.sin_addr = inner_ip->ip_dst;
7339 	/*
7340 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
7341 	 * holds our local endpoint address. Thus we reverse the dst and the
7342 	 * src in the lookup.
7343 	 */
7344 	inp = NULL;
7345 	net = NULL;
7346 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7347 	    (struct sockaddr *)&src,
7348 	    &inp, &net, 1,
7349 	    SCTP_DEFAULT_VRFID);
7350 	if ((stcb != NULL) &&
7351 	    (net != NULL) &&
7352 	    (inp != NULL)) {
7353 		/* Check the UDP port numbers */
7354 		if ((udp->uh_dport != net->port) ||
7355 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7356 			SCTP_TCB_UNLOCK(stcb);
7357 			return;
7358 		}
7359 		/* Check the verification tag */
7360 		if (ntohl(sh->v_tag) != 0) {
7361 			/*
7362 			 * This must be the verification tag used for
7363 			 * sending out packets. We don't consider packets
7364 			 * reflecting the verification tag.
7365 			 */
7366 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
7367 				SCTP_TCB_UNLOCK(stcb);
7368 				return;
7369 			}
7370 		} else {
7371 			if (ntohs(outer_ip->ip_len) >=
7372 			    sizeof(struct ip) +
7373 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
7374 				/*
7375 				 * In this case we can check if we got an
7376 				 * INIT chunk and if the initiate tag
7377 				 * matches.
7378 				 */
7379 				ch = (struct sctp_init_chunk *)(sh + 1);
7380 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
7381 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
7382 					SCTP_TCB_UNLOCK(stcb);
7383 					return;
7384 				}
7385 			} else {
7386 				SCTP_TCB_UNLOCK(stcb);
7387 				return;
7388 			}
7389 		}
7390 		type = icmp->icmp_type;
7391 		code = icmp->icmp_code;
7392 		if ((type == ICMP_UNREACH) &&
7393 		    (code == ICMP_UNREACH_PORT)) {
7394 			code = ICMP_UNREACH_PROTOCOL;
7395 		}
7396 		sctp_notify(inp, stcb, net, type, code,
7397 		    ntohs(inner_ip->ip_len),
7398 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
7399 	} else {
7400 		if ((stcb == NULL) && (inp != NULL)) {
7401 			/* reduce ref-count */
7402 			SCTP_INP_WLOCK(inp);
7403 			SCTP_INP_DECR_REF(inp);
7404 			SCTP_INP_WUNLOCK(inp);
7405 		}
7406 		if (stcb) {
7407 			SCTP_TCB_UNLOCK(stcb);
7408 		}
7409 	}
7410 	return;
7411 }
7412 #endif
7413 
7414 #ifdef INET6
7415 static void
7416 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7417 {
7418 	struct ip6ctlparam *ip6cp;
7419 	struct sctp_inpcb *inp;
7420 	struct sctp_tcb *stcb;
7421 	struct sctp_nets *net;
7422 	struct sctphdr sh;
7423 	struct udphdr udp;
7424 	struct sockaddr_in6 src, dst;
7425 	uint8_t type, code;
7426 
7427 	ip6cp = (struct ip6ctlparam *)d;
7428 	/*
7429 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7430 	 */
7431 	if (ip6cp->ip6c_m == NULL) {
7432 		return;
7433 	}
7434 	/*
7435 	 * Check if we can safely examine the ports and the verification tag
7436 	 * of the SCTP common header.
7437 	 */
7438 	if (ip6cp->ip6c_m->m_pkthdr.len <
7439 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7440 		return;
7441 	}
7442 	/* Copy out the UDP header. */
7443 	memset(&udp, 0, sizeof(struct udphdr));
7444 	m_copydata(ip6cp->ip6c_m,
7445 	    ip6cp->ip6c_off,
7446 	    sizeof(struct udphdr),
7447 	    (caddr_t)&udp);
7448 	/* Copy out the port numbers and the verification tag. */
7449 	memset(&sh, 0, sizeof(struct sctphdr));
7450 	m_copydata(ip6cp->ip6c_m,
7451 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7452 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7453 	    (caddr_t)&sh);
7454 	memset(&src, 0, sizeof(struct sockaddr_in6));
7455 	src.sin6_family = AF_INET6;
7456 	src.sin6_len = sizeof(struct sockaddr_in6);
7457 	src.sin6_port = sh.src_port;
7458 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7459 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7460 		return;
7461 	}
7462 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7463 	dst.sin6_family = AF_INET6;
7464 	dst.sin6_len = sizeof(struct sockaddr_in6);
7465 	dst.sin6_port = sh.dest_port;
7466 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7467 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7468 		return;
7469 	}
7470 	inp = NULL;
7471 	net = NULL;
7472 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7473 	    (struct sockaddr *)&src,
7474 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7475 	if ((stcb != NULL) &&
7476 	    (net != NULL) &&
7477 	    (inp != NULL)) {
7478 		/* Check the UDP port numbers */
7479 		if ((udp.uh_dport != net->port) ||
7480 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7481 			SCTP_TCB_UNLOCK(stcb);
7482 			return;
7483 		}
7484 		/* Check the verification tag */
7485 		if (ntohl(sh.v_tag) != 0) {
7486 			/*
7487 			 * This must be the verification tag used for
7488 			 * sending out packets. We don't consider packets
7489 			 * reflecting the verification tag.
7490 			 */
7491 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7492 				SCTP_TCB_UNLOCK(stcb);
7493 				return;
7494 			}
7495 		} else {
7496 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7497 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7498 			    sizeof(struct sctphdr) +
7499 			    sizeof(struct sctp_chunkhdr) +
7500 			    offsetof(struct sctp_init, a_rwnd)) {
7501 				/*
7502 				 * In this case we can check if we got an
7503 				 * INIT chunk and if the initiate tag
7504 				 * matches.
7505 				 */
7506 				uint32_t initiate_tag;
7507 				uint8_t chunk_type;
7508 
7509 				m_copydata(ip6cp->ip6c_m,
7510 				    ip6cp->ip6c_off +
7511 				    sizeof(struct udphdr) +
7512 				    sizeof(struct sctphdr),
7513 				    sizeof(uint8_t),
7514 				    (caddr_t)&chunk_type);
7515 				m_copydata(ip6cp->ip6c_m,
7516 				    ip6cp->ip6c_off +
7517 				    sizeof(struct udphdr) +
7518 				    sizeof(struct sctphdr) +
7519 				    sizeof(struct sctp_chunkhdr),
7520 				    sizeof(uint32_t),
7521 				    (caddr_t)&initiate_tag);
7522 				if ((chunk_type != SCTP_INITIATION) ||
7523 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7524 					SCTP_TCB_UNLOCK(stcb);
7525 					return;
7526 				}
7527 			} else {
7528 				SCTP_TCB_UNLOCK(stcb);
7529 				return;
7530 			}
7531 		}
7532 		type = ip6cp->ip6c_icmp6->icmp6_type;
7533 		code = ip6cp->ip6c_icmp6->icmp6_code;
7534 		if ((type == ICMP6_DST_UNREACH) &&
7535 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7536 			type = ICMP6_PARAM_PROB;
7537 			code = ICMP6_PARAMPROB_NEXTHEADER;
7538 		}
7539 		sctp6_notify(inp, stcb, net, type, code,
7540 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7541 	} else {
7542 		if ((stcb == NULL) && (inp != NULL)) {
7543 			/* reduce inp's ref-count */
7544 			SCTP_INP_WLOCK(inp);
7545 			SCTP_INP_DECR_REF(inp);
7546 			SCTP_INP_WUNLOCK(inp);
7547 		}
7548 		if (stcb) {
7549 			SCTP_TCB_UNLOCK(stcb);
7550 		}
7551 	}
7552 }
7553 #endif
7554 
7555 void
7556 sctp_over_udp_stop(void)
7557 {
7558 	/*
7559 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7560 	 * for writting!
7561 	 */
7562 #ifdef INET
7563 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7564 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7565 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7566 	}
7567 #endif
7568 #ifdef INET6
7569 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7570 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7571 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7572 	}
7573 #endif
7574 }
7575 
7576 int
7577 sctp_over_udp_start(void)
7578 {
7579 	uint16_t port;
7580 	int ret;
7581 #ifdef INET
7582 	struct sockaddr_in sin;
7583 #endif
7584 #ifdef INET6
7585 	struct sockaddr_in6 sin6;
7586 #endif
7587 	/*
7588 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7589 	 * for writting!
7590 	 */
7591 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7592 	if (ntohs(port) == 0) {
7593 		/* Must have a port set */
7594 		return (EINVAL);
7595 	}
7596 #ifdef INET
7597 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7598 		/* Already running -- must stop first */
7599 		return (EALREADY);
7600 	}
7601 #endif
7602 #ifdef INET6
7603 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7604 		/* Already running -- must stop first */
7605 		return (EALREADY);
7606 	}
7607 #endif
7608 #ifdef INET
7609 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7610 	    SOCK_DGRAM, IPPROTO_UDP,
7611 	    curthread->td_ucred, curthread))) {
7612 		sctp_over_udp_stop();
7613 		return (ret);
7614 	}
7615 	/* Call the special UDP hook. */
7616 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7617 	    sctp_recv_udp_tunneled_packet,
7618 	    sctp_recv_icmp_tunneled_packet,
7619 	    NULL))) {
7620 		sctp_over_udp_stop();
7621 		return (ret);
7622 	}
7623 	/* Ok, we have a socket, bind it to the port. */
7624 	memset(&sin, 0, sizeof(struct sockaddr_in));
7625 	sin.sin_len = sizeof(struct sockaddr_in);
7626 	sin.sin_family = AF_INET;
7627 	sin.sin_port = htons(port);
7628 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7629 	    (struct sockaddr *)&sin, curthread))) {
7630 		sctp_over_udp_stop();
7631 		return (ret);
7632 	}
7633 #endif
7634 #ifdef INET6
7635 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7636 	    SOCK_DGRAM, IPPROTO_UDP,
7637 	    curthread->td_ucred, curthread))) {
7638 		sctp_over_udp_stop();
7639 		return (ret);
7640 	}
7641 	/* Call the special UDP hook. */
7642 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7643 	    sctp_recv_udp_tunneled_packet,
7644 	    sctp_recv_icmp6_tunneled_packet,
7645 	    NULL))) {
7646 		sctp_over_udp_stop();
7647 		return (ret);
7648 	}
7649 	/* Ok, we have a socket, bind it to the port. */
7650 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7651 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7652 	sin6.sin6_family = AF_INET6;
7653 	sin6.sin6_port = htons(port);
7654 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7655 	    (struct sockaddr *)&sin6, curthread))) {
7656 		sctp_over_udp_stop();
7657 		return (ret);
7658 	}
7659 #endif
7660 	return (0);
7661 }
7662 
7663 /*
7664  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7665  * If all arguments are zero, zero is returned.
7666  */
7667 uint32_t
7668 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7669 {
7670 	if (mtu1 > 0) {
7671 		if (mtu2 > 0) {
7672 			if (mtu3 > 0) {
7673 				return (min(mtu1, min(mtu2, mtu3)));
7674 			} else {
7675 				return (min(mtu1, mtu2));
7676 			}
7677 		} else {
7678 			if (mtu3 > 0) {
7679 				return (min(mtu1, mtu3));
7680 			} else {
7681 				return (mtu1);
7682 			}
7683 		}
7684 	} else {
7685 		if (mtu2 > 0) {
7686 			if (mtu3 > 0) {
7687 				return (min(mtu2, mtu3));
7688 			} else {
7689 				return (mtu2);
7690 			}
7691 		} else {
7692 			return (mtu3);
7693 		}
7694 	}
7695 }
7696 
7697 void
7698 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7699 {
7700 	struct in_conninfo inc;
7701 
7702 	memset(&inc, 0, sizeof(struct in_conninfo));
7703 	inc.inc_fibnum = fibnum;
7704 	switch (addr->sa.sa_family) {
7705 #ifdef INET
7706 	case AF_INET:
7707 		inc.inc_faddr = addr->sin.sin_addr;
7708 		break;
7709 #endif
7710 #ifdef INET6
7711 	case AF_INET6:
7712 		inc.inc_flags |= INC_ISIPV6;
7713 		inc.inc6_faddr = addr->sin6.sin6_addr;
7714 		break;
7715 #endif
7716 	default:
7717 		return;
7718 	}
7719 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7720 }
7721 
7722 uint32_t
7723 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7724 {
7725 	struct in_conninfo inc;
7726 
7727 	memset(&inc, 0, sizeof(struct in_conninfo));
7728 	inc.inc_fibnum = fibnum;
7729 	switch (addr->sa.sa_family) {
7730 #ifdef INET
7731 	case AF_INET:
7732 		inc.inc_faddr = addr->sin.sin_addr;
7733 		break;
7734 #endif
7735 #ifdef INET6
7736 	case AF_INET6:
7737 		inc.inc_flags |= INC_ISIPV6;
7738 		inc.inc6_faddr = addr->sin6.sin6_addr;
7739 		break;
7740 #endif
7741 	default:
7742 		return (0);
7743 	}
7744 	return ((uint32_t)tcp_hc_getmtu(&inc));
7745 }
7746 
7747 void
7748 sctp_set_state(struct sctp_tcb *stcb, int new_state)
7749 {
7750 #if defined(KDTRACE_HOOKS)
7751 	int old_state = stcb->asoc.state;
7752 #endif
7753 
7754 	KASSERT((new_state & ~SCTP_STATE_MASK) == 0,
7755 	    ("sctp_set_state: Can't set substate (new_state = %x)",
7756 	    new_state));
7757 	stcb->asoc.state = (stcb->asoc.state & ~SCTP_STATE_MASK) | new_state;
7758 	if ((new_state == SCTP_STATE_SHUTDOWN_RECEIVED) ||
7759 	    (new_state == SCTP_STATE_SHUTDOWN_SENT) ||
7760 	    (new_state == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
7761 		SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
7762 	}
7763 #if defined(KDTRACE_HOOKS)
7764 	if (((old_state & SCTP_STATE_MASK) != new_state) &&
7765 	    !(((old_state & SCTP_STATE_MASK) == SCTP_STATE_EMPTY) &&
7766 	    (new_state == SCTP_STATE_INUSE))) {
7767 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7768 	}
7769 #endif
7770 }
7771 
7772 void
7773 sctp_add_substate(struct sctp_tcb *stcb, int substate)
7774 {
7775 #if defined(KDTRACE_HOOKS)
7776 	int old_state = stcb->asoc.state;
7777 #endif
7778 
7779 	KASSERT((substate & SCTP_STATE_MASK) == 0,
7780 	    ("sctp_add_substate: Can't set state (substate = %x)",
7781 	    substate));
7782 	stcb->asoc.state |= substate;
7783 #if defined(KDTRACE_HOOKS)
7784 	if (((substate & SCTP_STATE_ABOUT_TO_BE_FREED) &&
7785 	    ((old_state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) ||
7786 	    ((substate & SCTP_STATE_SHUTDOWN_PENDING) &&
7787 	    ((old_state & SCTP_STATE_SHUTDOWN_PENDING) == 0))) {
7788 		SCTP_PROBE6(state__change, NULL, stcb, NULL, stcb, NULL, old_state);
7789 	}
7790 #endif
7791 }
7792