xref: /freebsd/sys/netinet/sctputil.c (revision 81ea85a8845662ca329a954eeeb3e6d4124282a2)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #include <netinet6/sctp6_var.h>
45 #endif
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_output.h>
48 #include <netinet/sctp_uio.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_auth.h>
52 #include <netinet/sctp_asconf.h>
53 #include <netinet/sctp_bsd_addr.h>
54 #if defined(INET6) || defined(INET)
55 #include <netinet/tcp_var.h>
56 #endif
57 #include <netinet/udp.h>
58 #include <netinet/udp_var.h>
59 #include <sys/proc.h>
60 #ifdef INET6
61 #include <netinet/icmp6.h>
62 #endif
63 
64 
65 #ifndef KTR_SCTP
66 #define KTR_SCTP KTR_SUBSYS
67 #endif
68 
69 extern const struct sctp_cc_functions sctp_cc_functions[];
70 extern const struct sctp_ss_functions sctp_ss_functions[];
71 
72 void
73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
74 {
75 #if defined(SCTP_LOCAL_TRACE_BUF)
76 	struct sctp_cwnd_log sctp_clog;
77 
78 	sctp_clog.x.sb.stcb = stcb;
79 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
80 	if (stcb)
81 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
82 	else
83 		sctp_clog.x.sb.stcb_sbcc = 0;
84 	sctp_clog.x.sb.incr = incr;
85 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
86 	    SCTP_LOG_EVENT_SB,
87 	    from,
88 	    sctp_clog.x.misc.log1,
89 	    sctp_clog.x.misc.log2,
90 	    sctp_clog.x.misc.log3,
91 	    sctp_clog.x.misc.log4);
92 #endif
93 }
94 
95 void
96 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
97 {
98 #if defined(SCTP_LOCAL_TRACE_BUF)
99 	struct sctp_cwnd_log sctp_clog;
100 
101 	sctp_clog.x.close.inp = (void *)inp;
102 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
103 	if (stcb) {
104 		sctp_clog.x.close.stcb = (void *)stcb;
105 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
106 	} else {
107 		sctp_clog.x.close.stcb = 0;
108 		sctp_clog.x.close.state = 0;
109 	}
110 	sctp_clog.x.close.loc = loc;
111 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
112 	    SCTP_LOG_EVENT_CLOSE,
113 	    0,
114 	    sctp_clog.x.misc.log1,
115 	    sctp_clog.x.misc.log2,
116 	    sctp_clog.x.misc.log3,
117 	    sctp_clog.x.misc.log4);
118 #endif
119 }
120 
121 void
122 rto_logging(struct sctp_nets *net, int from)
123 {
124 #if defined(SCTP_LOCAL_TRACE_BUF)
125 	struct sctp_cwnd_log sctp_clog;
126 
127 	memset(&sctp_clog, 0, sizeof(sctp_clog));
128 	sctp_clog.x.rto.net = (void *)net;
129 	sctp_clog.x.rto.rtt = net->rtt / 1000;
130 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
131 	    SCTP_LOG_EVENT_RTT,
132 	    from,
133 	    sctp_clog.x.misc.log1,
134 	    sctp_clog.x.misc.log2,
135 	    sctp_clog.x.misc.log3,
136 	    sctp_clog.x.misc.log4);
137 #endif
138 }
139 
140 void
141 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
142 {
143 #if defined(SCTP_LOCAL_TRACE_BUF)
144 	struct sctp_cwnd_log sctp_clog;
145 
146 	sctp_clog.x.strlog.stcb = stcb;
147 	sctp_clog.x.strlog.n_tsn = tsn;
148 	sctp_clog.x.strlog.n_sseq = sseq;
149 	sctp_clog.x.strlog.e_tsn = 0;
150 	sctp_clog.x.strlog.e_sseq = 0;
151 	sctp_clog.x.strlog.strm = stream;
152 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
153 	    SCTP_LOG_EVENT_STRM,
154 	    from,
155 	    sctp_clog.x.misc.log1,
156 	    sctp_clog.x.misc.log2,
157 	    sctp_clog.x.misc.log3,
158 	    sctp_clog.x.misc.log4);
159 #endif
160 }
161 
162 void
163 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
164 {
165 #if defined(SCTP_LOCAL_TRACE_BUF)
166 	struct sctp_cwnd_log sctp_clog;
167 
168 	sctp_clog.x.nagle.stcb = (void *)stcb;
169 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
170 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
171 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
172 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
173 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
174 	    SCTP_LOG_EVENT_NAGLE,
175 	    action,
176 	    sctp_clog.x.misc.log1,
177 	    sctp_clog.x.misc.log2,
178 	    sctp_clog.x.misc.log3,
179 	    sctp_clog.x.misc.log4);
180 #endif
181 }
182 
183 void
184 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
185 {
186 #if defined(SCTP_LOCAL_TRACE_BUF)
187 	struct sctp_cwnd_log sctp_clog;
188 
189 	sctp_clog.x.sack.cumack = cumack;
190 	sctp_clog.x.sack.oldcumack = old_cumack;
191 	sctp_clog.x.sack.tsn = tsn;
192 	sctp_clog.x.sack.numGaps = gaps;
193 	sctp_clog.x.sack.numDups = dups;
194 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
195 	    SCTP_LOG_EVENT_SACK,
196 	    from,
197 	    sctp_clog.x.misc.log1,
198 	    sctp_clog.x.misc.log2,
199 	    sctp_clog.x.misc.log3,
200 	    sctp_clog.x.misc.log4);
201 #endif
202 }
203 
204 void
205 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
206 {
207 #if defined(SCTP_LOCAL_TRACE_BUF)
208 	struct sctp_cwnd_log sctp_clog;
209 
210 	memset(&sctp_clog, 0, sizeof(sctp_clog));
211 	sctp_clog.x.map.base = map;
212 	sctp_clog.x.map.cum = cum;
213 	sctp_clog.x.map.high = high;
214 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
215 	    SCTP_LOG_EVENT_MAP,
216 	    from,
217 	    sctp_clog.x.misc.log1,
218 	    sctp_clog.x.misc.log2,
219 	    sctp_clog.x.misc.log3,
220 	    sctp_clog.x.misc.log4);
221 #endif
222 }
223 
224 void
225 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
226 {
227 #if defined(SCTP_LOCAL_TRACE_BUF)
228 	struct sctp_cwnd_log sctp_clog;
229 
230 	memset(&sctp_clog, 0, sizeof(sctp_clog));
231 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
232 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
233 	sctp_clog.x.fr.tsn = tsn;
234 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
235 	    SCTP_LOG_EVENT_FR,
236 	    from,
237 	    sctp_clog.x.misc.log1,
238 	    sctp_clog.x.misc.log2,
239 	    sctp_clog.x.misc.log3,
240 	    sctp_clog.x.misc.log4);
241 #endif
242 }
243 
244 #ifdef SCTP_MBUF_LOGGING
245 void
246 sctp_log_mb(struct mbuf *m, int from)
247 {
248 #if defined(SCTP_LOCAL_TRACE_BUF)
249 	struct sctp_cwnd_log sctp_clog;
250 
251 	sctp_clog.x.mb.mp = m;
252 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
253 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
254 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
255 	if (SCTP_BUF_IS_EXTENDED(m)) {
256 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
257 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
258 	} else {
259 		sctp_clog.x.mb.ext = 0;
260 		sctp_clog.x.mb.refcnt = 0;
261 	}
262 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
263 	    SCTP_LOG_EVENT_MBUF,
264 	    from,
265 	    sctp_clog.x.misc.log1,
266 	    sctp_clog.x.misc.log2,
267 	    sctp_clog.x.misc.log3,
268 	    sctp_clog.x.misc.log4);
269 #endif
270 }
271 
272 void
273 sctp_log_mbc(struct mbuf *m, int from)
274 {
275 	struct mbuf *mat;
276 
277 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
278 		sctp_log_mb(mat, from);
279 	}
280 }
281 #endif
282 
283 void
284 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
285 {
286 #if defined(SCTP_LOCAL_TRACE_BUF)
287 	struct sctp_cwnd_log sctp_clog;
288 
289 	if (control == NULL) {
290 		SCTP_PRINTF("Gak log of NULL?\n");
291 		return;
292 	}
293 	sctp_clog.x.strlog.stcb = control->stcb;
294 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
295 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
296 	sctp_clog.x.strlog.strm = control->sinfo_stream;
297 	if (poschk != NULL) {
298 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
299 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
300 	} else {
301 		sctp_clog.x.strlog.e_tsn = 0;
302 		sctp_clog.x.strlog.e_sseq = 0;
303 	}
304 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
305 	    SCTP_LOG_EVENT_STRM,
306 	    from,
307 	    sctp_clog.x.misc.log1,
308 	    sctp_clog.x.misc.log2,
309 	    sctp_clog.x.misc.log3,
310 	    sctp_clog.x.misc.log4);
311 #endif
312 }
313 
314 void
315 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
316 {
317 #if defined(SCTP_LOCAL_TRACE_BUF)
318 	struct sctp_cwnd_log sctp_clog;
319 
320 	sctp_clog.x.cwnd.net = net;
321 	if (stcb->asoc.send_queue_cnt > 255)
322 		sctp_clog.x.cwnd.cnt_in_send = 255;
323 	else
324 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
325 	if (stcb->asoc.stream_queue_cnt > 255)
326 		sctp_clog.x.cwnd.cnt_in_str = 255;
327 	else
328 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
329 
330 	if (net) {
331 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
332 		sctp_clog.x.cwnd.inflight = net->flight_size;
333 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
334 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
335 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
336 	}
337 	if (SCTP_CWNDLOG_PRESEND == from) {
338 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
339 	}
340 	sctp_clog.x.cwnd.cwnd_augment = augment;
341 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
342 	    SCTP_LOG_EVENT_CWND,
343 	    from,
344 	    sctp_clog.x.misc.log1,
345 	    sctp_clog.x.misc.log2,
346 	    sctp_clog.x.misc.log3,
347 	    sctp_clog.x.misc.log4);
348 #endif
349 }
350 
351 void
352 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
353 {
354 #if defined(SCTP_LOCAL_TRACE_BUF)
355 	struct sctp_cwnd_log sctp_clog;
356 
357 	memset(&sctp_clog, 0, sizeof(sctp_clog));
358 	if (inp) {
359 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
360 
361 	} else {
362 		sctp_clog.x.lock.sock = (void *)NULL;
363 	}
364 	sctp_clog.x.lock.inp = (void *)inp;
365 	if (stcb) {
366 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
367 	} else {
368 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
369 	}
370 	if (inp) {
371 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
372 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
373 	} else {
374 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
375 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
376 	}
377 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
378 	if (inp && (inp->sctp_socket)) {
379 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
380 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
381 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
382 	} else {
383 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
384 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
385 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
386 	}
387 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
388 	    SCTP_LOG_LOCK_EVENT,
389 	    from,
390 	    sctp_clog.x.misc.log1,
391 	    sctp_clog.x.misc.log2,
392 	    sctp_clog.x.misc.log3,
393 	    sctp_clog.x.misc.log4);
394 #endif
395 }
396 
397 void
398 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
399 {
400 #if defined(SCTP_LOCAL_TRACE_BUF)
401 	struct sctp_cwnd_log sctp_clog;
402 
403 	memset(&sctp_clog, 0, sizeof(sctp_clog));
404 	sctp_clog.x.cwnd.net = net;
405 	sctp_clog.x.cwnd.cwnd_new_value = error;
406 	sctp_clog.x.cwnd.inflight = net->flight_size;
407 	sctp_clog.x.cwnd.cwnd_augment = burst;
408 	if (stcb->asoc.send_queue_cnt > 255)
409 		sctp_clog.x.cwnd.cnt_in_send = 255;
410 	else
411 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
412 	if (stcb->asoc.stream_queue_cnt > 255)
413 		sctp_clog.x.cwnd.cnt_in_str = 255;
414 	else
415 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
416 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
417 	    SCTP_LOG_EVENT_MAXBURST,
418 	    from,
419 	    sctp_clog.x.misc.log1,
420 	    sctp_clog.x.misc.log2,
421 	    sctp_clog.x.misc.log3,
422 	    sctp_clog.x.misc.log4);
423 #endif
424 }
425 
426 void
427 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
428 {
429 #if defined(SCTP_LOCAL_TRACE_BUF)
430 	struct sctp_cwnd_log sctp_clog;
431 
432 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
433 	sctp_clog.x.rwnd.send_size = snd_size;
434 	sctp_clog.x.rwnd.overhead = overhead;
435 	sctp_clog.x.rwnd.new_rwnd = 0;
436 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
437 	    SCTP_LOG_EVENT_RWND,
438 	    from,
439 	    sctp_clog.x.misc.log1,
440 	    sctp_clog.x.misc.log2,
441 	    sctp_clog.x.misc.log3,
442 	    sctp_clog.x.misc.log4);
443 #endif
444 }
445 
446 void
447 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
448 {
449 #if defined(SCTP_LOCAL_TRACE_BUF)
450 	struct sctp_cwnd_log sctp_clog;
451 
452 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
453 	sctp_clog.x.rwnd.send_size = flight_size;
454 	sctp_clog.x.rwnd.overhead = overhead;
455 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
456 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
457 	    SCTP_LOG_EVENT_RWND,
458 	    from,
459 	    sctp_clog.x.misc.log1,
460 	    sctp_clog.x.misc.log2,
461 	    sctp_clog.x.misc.log3,
462 	    sctp_clog.x.misc.log4);
463 #endif
464 }
465 
466 #ifdef SCTP_MBCNT_LOGGING
467 static void
468 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
469 {
470 #if defined(SCTP_LOCAL_TRACE_BUF)
471 	struct sctp_cwnd_log sctp_clog;
472 
473 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
474 	sctp_clog.x.mbcnt.size_change = book;
475 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
476 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
477 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
478 	    SCTP_LOG_EVENT_MBCNT,
479 	    from,
480 	    sctp_clog.x.misc.log1,
481 	    sctp_clog.x.misc.log2,
482 	    sctp_clog.x.misc.log3,
483 	    sctp_clog.x.misc.log4);
484 #endif
485 }
486 #endif
487 
488 void
489 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
490 {
491 #if defined(SCTP_LOCAL_TRACE_BUF)
492 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
493 	    SCTP_LOG_MISC_EVENT,
494 	    from,
495 	    a, b, c, d);
496 #endif
497 }
498 
499 void
500 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
501 {
502 #if defined(SCTP_LOCAL_TRACE_BUF)
503 	struct sctp_cwnd_log sctp_clog;
504 
505 	sctp_clog.x.wake.stcb = (void *)stcb;
506 	sctp_clog.x.wake.wake_cnt = wake_cnt;
507 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
508 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
509 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
510 
511 	if (stcb->asoc.stream_queue_cnt < 0xff)
512 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
513 	else
514 		sctp_clog.x.wake.stream_qcnt = 0xff;
515 
516 	if (stcb->asoc.chunks_on_out_queue < 0xff)
517 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
518 	else
519 		sctp_clog.x.wake.chunks_on_oque = 0xff;
520 
521 	sctp_clog.x.wake.sctpflags = 0;
522 	/* set in the defered mode stuff */
523 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
524 		sctp_clog.x.wake.sctpflags |= 1;
525 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
526 		sctp_clog.x.wake.sctpflags |= 2;
527 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
528 		sctp_clog.x.wake.sctpflags |= 4;
529 	/* what about the sb */
530 	if (stcb->sctp_socket) {
531 		struct socket *so = stcb->sctp_socket;
532 
533 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
534 	} else {
535 		sctp_clog.x.wake.sbflags = 0xff;
536 	}
537 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
538 	    SCTP_LOG_EVENT_WAKE,
539 	    from,
540 	    sctp_clog.x.misc.log1,
541 	    sctp_clog.x.misc.log2,
542 	    sctp_clog.x.misc.log3,
543 	    sctp_clog.x.misc.log4);
544 #endif
545 }
546 
547 void
548 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
549 {
550 #if defined(SCTP_LOCAL_TRACE_BUF)
551 	struct sctp_cwnd_log sctp_clog;
552 
553 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
554 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
555 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
556 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
557 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
558 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
559 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
560 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
561 	    SCTP_LOG_EVENT_BLOCK,
562 	    from,
563 	    sctp_clog.x.misc.log1,
564 	    sctp_clog.x.misc.log2,
565 	    sctp_clog.x.misc.log3,
566 	    sctp_clog.x.misc.log4);
567 #endif
568 }
569 
570 int
571 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
572 {
573 	/* May need to fix this if ktrdump does not work */
574 	return (0);
575 }
576 
577 #ifdef SCTP_AUDITING_ENABLED
578 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
579 static int sctp_audit_indx = 0;
580 
581 static
582 void
583 sctp_print_audit_report(void)
584 {
585 	int i;
586 	int cnt;
587 
588 	cnt = 0;
589 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
590 		if ((sctp_audit_data[i][0] == 0xe0) &&
591 		    (sctp_audit_data[i][1] == 0x01)) {
592 			cnt = 0;
593 			SCTP_PRINTF("\n");
594 		} else if (sctp_audit_data[i][0] == 0xf0) {
595 			cnt = 0;
596 			SCTP_PRINTF("\n");
597 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
598 		    (sctp_audit_data[i][1] == 0x01)) {
599 			SCTP_PRINTF("\n");
600 			cnt = 0;
601 		}
602 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
603 		    (uint32_t)sctp_audit_data[i][1]);
604 		cnt++;
605 		if ((cnt % 14) == 0)
606 			SCTP_PRINTF("\n");
607 	}
608 	for (i = 0; i < sctp_audit_indx; i++) {
609 		if ((sctp_audit_data[i][0] == 0xe0) &&
610 		    (sctp_audit_data[i][1] == 0x01)) {
611 			cnt = 0;
612 			SCTP_PRINTF("\n");
613 		} else if (sctp_audit_data[i][0] == 0xf0) {
614 			cnt = 0;
615 			SCTP_PRINTF("\n");
616 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
617 		    (sctp_audit_data[i][1] == 0x01)) {
618 			SCTP_PRINTF("\n");
619 			cnt = 0;
620 		}
621 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
622 		    (uint32_t)sctp_audit_data[i][1]);
623 		cnt++;
624 		if ((cnt % 14) == 0)
625 			SCTP_PRINTF("\n");
626 	}
627 	SCTP_PRINTF("\n");
628 }
629 
630 void
631 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
632     struct sctp_nets *net)
633 {
634 	int resend_cnt, tot_out, rep, tot_book_cnt;
635 	struct sctp_nets *lnet;
636 	struct sctp_tmit_chunk *chk;
637 
638 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
639 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
640 	sctp_audit_indx++;
641 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
642 		sctp_audit_indx = 0;
643 	}
644 	if (inp == NULL) {
645 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
646 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
647 		sctp_audit_indx++;
648 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
649 			sctp_audit_indx = 0;
650 		}
651 		return;
652 	}
653 	if (stcb == NULL) {
654 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
655 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
656 		sctp_audit_indx++;
657 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
658 			sctp_audit_indx = 0;
659 		}
660 		return;
661 	}
662 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
663 	sctp_audit_data[sctp_audit_indx][1] =
664 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
665 	sctp_audit_indx++;
666 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
667 		sctp_audit_indx = 0;
668 	}
669 	rep = 0;
670 	tot_book_cnt = 0;
671 	resend_cnt = tot_out = 0;
672 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
673 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
674 			resend_cnt++;
675 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
676 			tot_out += chk->book_size;
677 			tot_book_cnt++;
678 		}
679 	}
680 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
681 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
682 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
683 		sctp_audit_indx++;
684 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
685 			sctp_audit_indx = 0;
686 		}
687 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
688 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
689 		rep = 1;
690 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
691 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
692 		sctp_audit_data[sctp_audit_indx][1] =
693 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
694 		sctp_audit_indx++;
695 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
696 			sctp_audit_indx = 0;
697 		}
698 	}
699 	if (tot_out != stcb->asoc.total_flight) {
700 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
701 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
702 		sctp_audit_indx++;
703 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
704 			sctp_audit_indx = 0;
705 		}
706 		rep = 1;
707 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
708 		    (int)stcb->asoc.total_flight);
709 		stcb->asoc.total_flight = tot_out;
710 	}
711 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
712 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
713 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
714 		sctp_audit_indx++;
715 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
716 			sctp_audit_indx = 0;
717 		}
718 		rep = 1;
719 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
720 
721 		stcb->asoc.total_flight_count = tot_book_cnt;
722 	}
723 	tot_out = 0;
724 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
725 		tot_out += lnet->flight_size;
726 	}
727 	if (tot_out != stcb->asoc.total_flight) {
728 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
729 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
730 		sctp_audit_indx++;
731 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
732 			sctp_audit_indx = 0;
733 		}
734 		rep = 1;
735 		SCTP_PRINTF("real flight:%d net total was %d\n",
736 		    stcb->asoc.total_flight, tot_out);
737 		/* now corrective action */
738 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
739 
740 			tot_out = 0;
741 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
742 				if ((chk->whoTo == lnet) &&
743 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
744 					tot_out += chk->book_size;
745 				}
746 			}
747 			if (lnet->flight_size != tot_out) {
748 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
749 				    (void *)lnet, lnet->flight_size,
750 				    tot_out);
751 				lnet->flight_size = tot_out;
752 			}
753 		}
754 	}
755 	if (rep) {
756 		sctp_print_audit_report();
757 	}
758 }
759 
760 void
761 sctp_audit_log(uint8_t ev, uint8_t fd)
762 {
763 
764 	sctp_audit_data[sctp_audit_indx][0] = ev;
765 	sctp_audit_data[sctp_audit_indx][1] = fd;
766 	sctp_audit_indx++;
767 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
768 		sctp_audit_indx = 0;
769 	}
770 }
771 
772 #endif
773 
774 /*
775  * sctp_stop_timers_for_shutdown() should be called
776  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
777  * state to make sure that all timers are stopped.
778  */
779 void
780 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
781 {
782 	struct sctp_association *asoc;
783 	struct sctp_nets *net;
784 
785 	asoc = &stcb->asoc;
786 
787 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
788 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
789 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
790 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
791 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
792 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
793 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
794 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
795 	}
796 }
797 
798 /*
799  * A list of sizes based on typical mtu's, used only if next hop size not
800  * returned. These values MUST be multiples of 4 and MUST be ordered.
801  */
802 static uint32_t sctp_mtu_sizes[] = {
803 	68,
804 	296,
805 	508,
806 	512,
807 	544,
808 	576,
809 	1004,
810 	1492,
811 	1500,
812 	1536,
813 	2000,
814 	2048,
815 	4352,
816 	4464,
817 	8166,
818 	17912,
819 	32000,
820 	65532
821 };
822 
823 /*
824  * Return the largest MTU in sctp_mtu_sizes smaller than val.
825  * If val is smaller than the minimum, just return the largest
826  * multiple of 4 smaller or equal to val.
827  * Ensure that the result is a multiple of 4.
828  */
829 uint32_t
830 sctp_get_prev_mtu(uint32_t val)
831 {
832 	uint32_t i;
833 
834 	val &= 0xfffffffc;
835 	if (val <= sctp_mtu_sizes[0]) {
836 		return (val);
837 	}
838 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
839 		if (val <= sctp_mtu_sizes[i]) {
840 			break;
841 		}
842 	}
843 	KASSERT((sctp_mtu_sizes[i - 1] & 0x00000003) == 0,
844 	    ("sctp_mtu_sizes[%u] not a multiple of 4", i - 1));
845 	return (sctp_mtu_sizes[i - 1]);
846 }
847 
848 /*
849  * Return the smallest MTU in sctp_mtu_sizes larger than val.
850  * If val is larger than the maximum, just return the largest multiple of 4 smaller
851  * or equal to val.
852  * Ensure that the result is a multiple of 4.
853  */
854 uint32_t
855 sctp_get_next_mtu(uint32_t val)
856 {
857 	/* select another MTU that is just bigger than this one */
858 	uint32_t i;
859 
860 	val &= 0xfffffffc;
861 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
862 		if (val < sctp_mtu_sizes[i]) {
863 			KASSERT((sctp_mtu_sizes[i] & 0x00000003) == 0,
864 			    ("sctp_mtu_sizes[%u] not a multiple of 4", i));
865 			return (sctp_mtu_sizes[i]);
866 		}
867 	}
868 	return (val);
869 }
870 
871 void
872 sctp_fill_random_store(struct sctp_pcb *m)
873 {
874 	/*
875 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
876 	 * our counter. The result becomes our good random numbers and we
877 	 * then setup to give these out. Note that we do no locking to
878 	 * protect this. This is ok, since if competing folks call this we
879 	 * will get more gobbled gook in the random store which is what we
880 	 * want. There is a danger that two guys will use the same random
881 	 * numbers, but thats ok too since that is random as well :->
882 	 */
883 	m->store_at = 0;
884 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
885 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
886 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
887 	m->random_counter++;
888 }
889 
890 uint32_t
891 sctp_select_initial_TSN(struct sctp_pcb *inp)
892 {
893 	/*
894 	 * A true implementation should use random selection process to get
895 	 * the initial stream sequence number, using RFC1750 as a good
896 	 * guideline
897 	 */
898 	uint32_t x, *xp;
899 	uint8_t *p;
900 	int store_at, new_store;
901 
902 	if (inp->initial_sequence_debug != 0) {
903 		uint32_t ret;
904 
905 		ret = inp->initial_sequence_debug;
906 		inp->initial_sequence_debug++;
907 		return (ret);
908 	}
909 retry:
910 	store_at = inp->store_at;
911 	new_store = store_at + sizeof(uint32_t);
912 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
913 		new_store = 0;
914 	}
915 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
916 		goto retry;
917 	}
918 	if (new_store == 0) {
919 		/* Refill the random store */
920 		sctp_fill_random_store(inp);
921 	}
922 	p = &inp->random_store[store_at];
923 	xp = (uint32_t *)p;
924 	x = *xp;
925 	return (x);
926 }
927 
928 uint32_t
929 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
930 {
931 	uint32_t x;
932 	struct timeval now;
933 
934 	if (check) {
935 		(void)SCTP_GETTIME_TIMEVAL(&now);
936 	}
937 	for (;;) {
938 		x = sctp_select_initial_TSN(&inp->sctp_ep);
939 		if (x == 0) {
940 			/* we never use 0 */
941 			continue;
942 		}
943 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
944 			break;
945 		}
946 	}
947 	return (x);
948 }
949 
950 int32_t
951 sctp_map_assoc_state(int kernel_state)
952 {
953 	int32_t user_state;
954 
955 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
956 		user_state = SCTP_CLOSED;
957 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
958 		user_state = SCTP_SHUTDOWN_PENDING;
959 	} else {
960 		switch (kernel_state & SCTP_STATE_MASK) {
961 		case SCTP_STATE_EMPTY:
962 			user_state = SCTP_CLOSED;
963 			break;
964 		case SCTP_STATE_INUSE:
965 			user_state = SCTP_CLOSED;
966 			break;
967 		case SCTP_STATE_COOKIE_WAIT:
968 			user_state = SCTP_COOKIE_WAIT;
969 			break;
970 		case SCTP_STATE_COOKIE_ECHOED:
971 			user_state = SCTP_COOKIE_ECHOED;
972 			break;
973 		case SCTP_STATE_OPEN:
974 			user_state = SCTP_ESTABLISHED;
975 			break;
976 		case SCTP_STATE_SHUTDOWN_SENT:
977 			user_state = SCTP_SHUTDOWN_SENT;
978 			break;
979 		case SCTP_STATE_SHUTDOWN_RECEIVED:
980 			user_state = SCTP_SHUTDOWN_RECEIVED;
981 			break;
982 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
983 			user_state = SCTP_SHUTDOWN_ACK_SENT;
984 			break;
985 		default:
986 			user_state = SCTP_CLOSED;
987 			break;
988 		}
989 	}
990 	return (user_state);
991 }
992 
993 int
994 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
995     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
996 {
997 	struct sctp_association *asoc;
998 
999 	/*
1000 	 * Anything set to zero is taken care of by the allocation routine's
1001 	 * bzero
1002 	 */
1003 
1004 	/*
1005 	 * Up front select what scoping to apply on addresses I tell my peer
1006 	 * Not sure what to do with these right now, we will need to come up
1007 	 * with a way to set them. We may need to pass them through from the
1008 	 * caller in the sctp_aloc_assoc() function.
1009 	 */
1010 	int i;
1011 #if defined(SCTP_DETAILED_STR_STATS)
1012 	int j;
1013 #endif
1014 
1015 	asoc = &stcb->asoc;
1016 	/* init all variables to a known value. */
1017 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
1018 	asoc->max_burst = inp->sctp_ep.max_burst;
1019 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
1020 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
1021 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
1022 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
1023 	asoc->ecn_supported = inp->ecn_supported;
1024 	asoc->prsctp_supported = inp->prsctp_supported;
1025 	asoc->idata_supported = inp->idata_supported;
1026 	asoc->auth_supported = inp->auth_supported;
1027 	asoc->asconf_supported = inp->asconf_supported;
1028 	asoc->reconfig_supported = inp->reconfig_supported;
1029 	asoc->nrsack_supported = inp->nrsack_supported;
1030 	asoc->pktdrop_supported = inp->pktdrop_supported;
1031 	asoc->idata_supported = inp->idata_supported;
1032 	asoc->sctp_cmt_pf = (uint8_t)0;
1033 	asoc->sctp_frag_point = inp->sctp_frag_point;
1034 	asoc->sctp_features = inp->sctp_features;
1035 	asoc->default_dscp = inp->sctp_ep.default_dscp;
1036 	asoc->max_cwnd = inp->max_cwnd;
1037 #ifdef INET6
1038 	if (inp->sctp_ep.default_flowlabel) {
1039 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
1040 	} else {
1041 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
1042 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
1043 			asoc->default_flowlabel &= 0x000fffff;
1044 			asoc->default_flowlabel |= 0x80000000;
1045 		} else {
1046 			asoc->default_flowlabel = 0;
1047 		}
1048 	}
1049 #endif
1050 	asoc->sb_send_resv = 0;
1051 	if (override_tag) {
1052 		asoc->my_vtag = override_tag;
1053 	} else {
1054 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1055 	}
1056 	/* Get the nonce tags */
1057 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1058 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1059 	asoc->vrf_id = vrf_id;
1060 
1061 #ifdef SCTP_ASOCLOG_OF_TSNS
1062 	asoc->tsn_in_at = 0;
1063 	asoc->tsn_out_at = 0;
1064 	asoc->tsn_in_wrapped = 0;
1065 	asoc->tsn_out_wrapped = 0;
1066 	asoc->cumack_log_at = 0;
1067 	asoc->cumack_log_atsnt = 0;
1068 #endif
1069 #ifdef SCTP_FS_SPEC_LOG
1070 	asoc->fs_index = 0;
1071 #endif
1072 	asoc->refcnt = 0;
1073 	asoc->assoc_up_sent = 0;
1074 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1075 	    sctp_select_initial_TSN(&inp->sctp_ep);
1076 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1077 	/* we are optimisitic here */
1078 	asoc->peer_supports_nat = 0;
1079 	asoc->sent_queue_retran_cnt = 0;
1080 
1081 	/* for CMT */
1082 	asoc->last_net_cmt_send_started = NULL;
1083 
1084 	/* This will need to be adjusted */
1085 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1086 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1087 	asoc->asconf_seq_in = asoc->last_acked_seq;
1088 
1089 	/* here we are different, we hold the next one we expect */
1090 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1091 
1092 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1093 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1094 
1095 	asoc->default_mtu = inp->sctp_ep.default_mtu;
1096 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1097 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1098 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1099 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1100 	asoc->free_chunk_cnt = 0;
1101 
1102 	asoc->iam_blocking = 0;
1103 	asoc->context = inp->sctp_context;
1104 	asoc->local_strreset_support = inp->local_strreset_support;
1105 	asoc->def_send = inp->def_send;
1106 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1107 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1108 	asoc->pr_sctp_cnt = 0;
1109 	asoc->total_output_queue_size = 0;
1110 
1111 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1112 		asoc->scope.ipv6_addr_legal = 1;
1113 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1114 			asoc->scope.ipv4_addr_legal = 1;
1115 		} else {
1116 			asoc->scope.ipv4_addr_legal = 0;
1117 		}
1118 	} else {
1119 		asoc->scope.ipv6_addr_legal = 0;
1120 		asoc->scope.ipv4_addr_legal = 1;
1121 	}
1122 
1123 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1124 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1125 
1126 	asoc->smallest_mtu = inp->sctp_frag_point;
1127 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1128 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1129 
1130 	asoc->stream_locked_on = 0;
1131 	asoc->ecn_echo_cnt_onq = 0;
1132 	asoc->stream_locked = 0;
1133 
1134 	asoc->send_sack = 1;
1135 
1136 	LIST_INIT(&asoc->sctp_restricted_addrs);
1137 
1138 	TAILQ_INIT(&asoc->nets);
1139 	TAILQ_INIT(&asoc->pending_reply_queue);
1140 	TAILQ_INIT(&asoc->asconf_ack_sent);
1141 	/* Setup to fill the hb random cache at first HB */
1142 	asoc->hb_random_idx = 4;
1143 
1144 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1145 
1146 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1147 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1148 
1149 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1150 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1151 
1152 	/*
1153 	 * Now the stream parameters, here we allocate space for all streams
1154 	 * that we request by default.
1155 	 */
1156 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1157 	    o_strms;
1158 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1159 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1160 	    SCTP_M_STRMO);
1161 	if (asoc->strmout == NULL) {
1162 		/* big trouble no memory */
1163 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1164 		return (ENOMEM);
1165 	}
1166 	for (i = 0; i < asoc->streamoutcnt; i++) {
1167 		/*
1168 		 * inbound side must be set to 0xffff, also NOTE when we get
1169 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1170 		 * count (streamoutcnt) but first check if we sent to any of
1171 		 * the upper streams that were dropped (if some were). Those
1172 		 * that were dropped must be notified to the upper layer as
1173 		 * failed to send.
1174 		 */
1175 		asoc->strmout[i].next_mid_ordered = 0;
1176 		asoc->strmout[i].next_mid_unordered = 0;
1177 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1178 		asoc->strmout[i].chunks_on_queues = 0;
1179 #if defined(SCTP_DETAILED_STR_STATS)
1180 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1181 			asoc->strmout[i].abandoned_sent[j] = 0;
1182 			asoc->strmout[i].abandoned_unsent[j] = 0;
1183 		}
1184 #else
1185 		asoc->strmout[i].abandoned_sent[0] = 0;
1186 		asoc->strmout[i].abandoned_unsent[0] = 0;
1187 #endif
1188 		asoc->strmout[i].sid = i;
1189 		asoc->strmout[i].last_msg_incomplete = 0;
1190 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1191 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1192 	}
1193 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1194 
1195 	/* Now the mapping array */
1196 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1197 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1198 	    SCTP_M_MAP);
1199 	if (asoc->mapping_array == NULL) {
1200 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1201 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1202 		return (ENOMEM);
1203 	}
1204 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1205 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1206 	    SCTP_M_MAP);
1207 	if (asoc->nr_mapping_array == NULL) {
1208 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1209 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1210 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1211 		return (ENOMEM);
1212 	}
1213 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1214 
1215 	/* Now the init of the other outqueues */
1216 	TAILQ_INIT(&asoc->free_chunks);
1217 	TAILQ_INIT(&asoc->control_send_queue);
1218 	TAILQ_INIT(&asoc->asconf_send_queue);
1219 	TAILQ_INIT(&asoc->send_queue);
1220 	TAILQ_INIT(&asoc->sent_queue);
1221 	TAILQ_INIT(&asoc->resetHead);
1222 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1223 	TAILQ_INIT(&asoc->asconf_queue);
1224 	/* authentication fields */
1225 	asoc->authinfo.random = NULL;
1226 	asoc->authinfo.active_keyid = 0;
1227 	asoc->authinfo.assoc_key = NULL;
1228 	asoc->authinfo.assoc_keyid = 0;
1229 	asoc->authinfo.recv_key = NULL;
1230 	asoc->authinfo.recv_keyid = 0;
1231 	LIST_INIT(&asoc->shared_keys);
1232 	asoc->marked_retrans = 0;
1233 	asoc->port = inp->sctp_ep.port;
1234 	asoc->timoinit = 0;
1235 	asoc->timodata = 0;
1236 	asoc->timosack = 0;
1237 	asoc->timoshutdown = 0;
1238 	asoc->timoheartbeat = 0;
1239 	asoc->timocookie = 0;
1240 	asoc->timoshutdownack = 0;
1241 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1242 	asoc->discontinuity_time = asoc->start_time;
1243 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1244 		asoc->abandoned_unsent[i] = 0;
1245 		asoc->abandoned_sent[i] = 0;
1246 	}
1247 	/*
1248 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1249 	 * freed later when the association is freed.
1250 	 */
1251 	return (0);
1252 }
1253 
1254 void
1255 sctp_print_mapping_array(struct sctp_association *asoc)
1256 {
1257 	unsigned int i, limit;
1258 
1259 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1260 	    asoc->mapping_array_size,
1261 	    asoc->mapping_array_base_tsn,
1262 	    asoc->cumulative_tsn,
1263 	    asoc->highest_tsn_inside_map,
1264 	    asoc->highest_tsn_inside_nr_map);
1265 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1266 		if (asoc->mapping_array[limit - 1] != 0) {
1267 			break;
1268 		}
1269 	}
1270 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1271 	for (i = 0; i < limit; i++) {
1272 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1273 	}
1274 	if (limit % 16)
1275 		SCTP_PRINTF("\n");
1276 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1277 		if (asoc->nr_mapping_array[limit - 1]) {
1278 			break;
1279 		}
1280 	}
1281 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1282 	for (i = 0; i < limit; i++) {
1283 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1284 	}
1285 	if (limit % 16)
1286 		SCTP_PRINTF("\n");
1287 }
1288 
1289 int
1290 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1291 {
1292 	/* mapping array needs to grow */
1293 	uint8_t *new_array1, *new_array2;
1294 	uint32_t new_size;
1295 
1296 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1297 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1298 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1299 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1300 		/* can't get more, forget it */
1301 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1302 		if (new_array1) {
1303 			SCTP_FREE(new_array1, SCTP_M_MAP);
1304 		}
1305 		if (new_array2) {
1306 			SCTP_FREE(new_array2, SCTP_M_MAP);
1307 		}
1308 		return (-1);
1309 	}
1310 	memset(new_array1, 0, new_size);
1311 	memset(new_array2, 0, new_size);
1312 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1313 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1314 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1315 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1316 	asoc->mapping_array = new_array1;
1317 	asoc->nr_mapping_array = new_array2;
1318 	asoc->mapping_array_size = new_size;
1319 	return (0);
1320 }
1321 
1322 
1323 static void
1324 sctp_iterator_work(struct sctp_iterator *it)
1325 {
1326 	int iteration_count = 0;
1327 	int inp_skip = 0;
1328 	int first_in = 1;
1329 	struct sctp_inpcb *tinp;
1330 
1331 	SCTP_INP_INFO_RLOCK();
1332 	SCTP_ITERATOR_LOCK();
1333 	sctp_it_ctl.cur_it = it;
1334 	if (it->inp) {
1335 		SCTP_INP_RLOCK(it->inp);
1336 		SCTP_INP_DECR_REF(it->inp);
1337 	}
1338 	if (it->inp == NULL) {
1339 		/* iterator is complete */
1340 done_with_iterator:
1341 		sctp_it_ctl.cur_it = NULL;
1342 		SCTP_ITERATOR_UNLOCK();
1343 		SCTP_INP_INFO_RUNLOCK();
1344 		if (it->function_atend != NULL) {
1345 			(*it->function_atend) (it->pointer, it->val);
1346 		}
1347 		SCTP_FREE(it, SCTP_M_ITER);
1348 		return;
1349 	}
1350 select_a_new_ep:
1351 	if (first_in) {
1352 		first_in = 0;
1353 	} else {
1354 		SCTP_INP_RLOCK(it->inp);
1355 	}
1356 	while (((it->pcb_flags) &&
1357 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1358 	    ((it->pcb_features) &&
1359 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1360 		/* endpoint flags or features don't match, so keep looking */
1361 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1362 			SCTP_INP_RUNLOCK(it->inp);
1363 			goto done_with_iterator;
1364 		}
1365 		tinp = it->inp;
1366 		it->inp = LIST_NEXT(it->inp, sctp_list);
1367 		SCTP_INP_RUNLOCK(tinp);
1368 		if (it->inp == NULL) {
1369 			goto done_with_iterator;
1370 		}
1371 		SCTP_INP_RLOCK(it->inp);
1372 	}
1373 	/* now go through each assoc which is in the desired state */
1374 	if (it->done_current_ep == 0) {
1375 		if (it->function_inp != NULL)
1376 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1377 		it->done_current_ep = 1;
1378 	}
1379 	if (it->stcb == NULL) {
1380 		/* run the per instance function */
1381 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1382 	}
1383 	if ((inp_skip) || it->stcb == NULL) {
1384 		if (it->function_inp_end != NULL) {
1385 			inp_skip = (*it->function_inp_end) (it->inp,
1386 			    it->pointer,
1387 			    it->val);
1388 		}
1389 		SCTP_INP_RUNLOCK(it->inp);
1390 		goto no_stcb;
1391 	}
1392 	while (it->stcb) {
1393 		SCTP_TCB_LOCK(it->stcb);
1394 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1395 			/* not in the right state... keep looking */
1396 			SCTP_TCB_UNLOCK(it->stcb);
1397 			goto next_assoc;
1398 		}
1399 		/* see if we have limited out the iterator loop */
1400 		iteration_count++;
1401 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1402 			/* Pause to let others grab the lock */
1403 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1404 			SCTP_TCB_UNLOCK(it->stcb);
1405 			SCTP_INP_INCR_REF(it->inp);
1406 			SCTP_INP_RUNLOCK(it->inp);
1407 			SCTP_ITERATOR_UNLOCK();
1408 			SCTP_INP_INFO_RUNLOCK();
1409 			SCTP_INP_INFO_RLOCK();
1410 			SCTP_ITERATOR_LOCK();
1411 			if (sctp_it_ctl.iterator_flags) {
1412 				/* We won't be staying here */
1413 				SCTP_INP_DECR_REF(it->inp);
1414 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1415 				if (sctp_it_ctl.iterator_flags &
1416 				    SCTP_ITERATOR_STOP_CUR_IT) {
1417 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1418 					goto done_with_iterator;
1419 				}
1420 				if (sctp_it_ctl.iterator_flags &
1421 				    SCTP_ITERATOR_STOP_CUR_INP) {
1422 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1423 					goto no_stcb;
1424 				}
1425 				/* If we reach here huh? */
1426 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1427 				    sctp_it_ctl.iterator_flags);
1428 				sctp_it_ctl.iterator_flags = 0;
1429 			}
1430 			SCTP_INP_RLOCK(it->inp);
1431 			SCTP_INP_DECR_REF(it->inp);
1432 			SCTP_TCB_LOCK(it->stcb);
1433 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1434 			iteration_count = 0;
1435 		}
1436 
1437 		/* run function on this one */
1438 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1439 
1440 		/*
1441 		 * we lie here, it really needs to have its own type but
1442 		 * first I must verify that this won't effect things :-0
1443 		 */
1444 		if (it->no_chunk_output == 0)
1445 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1446 
1447 		SCTP_TCB_UNLOCK(it->stcb);
1448 next_assoc:
1449 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1450 		if (it->stcb == NULL) {
1451 			/* Run last function */
1452 			if (it->function_inp_end != NULL) {
1453 				inp_skip = (*it->function_inp_end) (it->inp,
1454 				    it->pointer,
1455 				    it->val);
1456 			}
1457 		}
1458 	}
1459 	SCTP_INP_RUNLOCK(it->inp);
1460 no_stcb:
1461 	/* done with all assocs on this endpoint, move on to next endpoint */
1462 	it->done_current_ep = 0;
1463 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1464 		it->inp = NULL;
1465 	} else {
1466 		it->inp = LIST_NEXT(it->inp, sctp_list);
1467 	}
1468 	if (it->inp == NULL) {
1469 		goto done_with_iterator;
1470 	}
1471 	goto select_a_new_ep;
1472 }
1473 
1474 void
1475 sctp_iterator_worker(void)
1476 {
1477 	struct sctp_iterator *it, *nit;
1478 
1479 	/* This function is called with the WQ lock in place */
1480 
1481 	sctp_it_ctl.iterator_running = 1;
1482 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1483 		/* now lets work on this one */
1484 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1485 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1486 		CURVNET_SET(it->vn);
1487 		sctp_iterator_work(it);
1488 		CURVNET_RESTORE();
1489 		SCTP_IPI_ITERATOR_WQ_LOCK();
1490 		/* sa_ignore FREED_MEMORY */
1491 	}
1492 	sctp_it_ctl.iterator_running = 0;
1493 	return;
1494 }
1495 
1496 
1497 static void
1498 sctp_handle_addr_wq(void)
1499 {
1500 	/* deal with the ADDR wq from the rtsock calls */
1501 	struct sctp_laddr *wi, *nwi;
1502 	struct sctp_asconf_iterator *asc;
1503 
1504 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1505 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1506 	if (asc == NULL) {
1507 		/* Try later, no memory */
1508 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1509 		    (struct sctp_inpcb *)NULL,
1510 		    (struct sctp_tcb *)NULL,
1511 		    (struct sctp_nets *)NULL);
1512 		return;
1513 	}
1514 	LIST_INIT(&asc->list_of_work);
1515 	asc->cnt = 0;
1516 
1517 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1518 		LIST_REMOVE(wi, sctp_nxt_addr);
1519 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1520 		asc->cnt++;
1521 	}
1522 
1523 	if (asc->cnt == 0) {
1524 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1525 	} else {
1526 		int ret;
1527 
1528 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1529 		    sctp_asconf_iterator_stcb,
1530 		    NULL,	/* No ep end for boundall */
1531 		    SCTP_PCB_FLAGS_BOUNDALL,
1532 		    SCTP_PCB_ANY_FEATURES,
1533 		    SCTP_ASOC_ANY_STATE,
1534 		    (void *)asc, 0,
1535 		    sctp_asconf_iterator_end, NULL, 0);
1536 		if (ret) {
1537 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1538 			/*
1539 			 * Freeing if we are stopping or put back on the
1540 			 * addr_wq.
1541 			 */
1542 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1543 				sctp_asconf_iterator_end(asc, 0);
1544 			} else {
1545 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1546 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1547 				}
1548 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1549 			}
1550 		}
1551 	}
1552 }
1553 
1554 void
1555 sctp_timeout_handler(void *t)
1556 {
1557 	struct sctp_inpcb *inp;
1558 	struct sctp_tcb *stcb;
1559 	struct sctp_nets *net;
1560 	struct sctp_timer *tmr;
1561 	struct mbuf *op_err;
1562 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1563 	struct socket *so;
1564 #endif
1565 	int did_output;
1566 	int type;
1567 
1568 	tmr = (struct sctp_timer *)t;
1569 	inp = (struct sctp_inpcb *)tmr->ep;
1570 	stcb = (struct sctp_tcb *)tmr->tcb;
1571 	net = (struct sctp_nets *)tmr->net;
1572 	CURVNET_SET((struct vnet *)tmr->vnet);
1573 	did_output = 1;
1574 
1575 #ifdef SCTP_AUDITING_ENABLED
1576 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1577 	sctp_auditing(3, inp, stcb, net);
1578 #endif
1579 
1580 	/* sanity checks... */
1581 	if (tmr->self != (void *)tmr) {
1582 		/*
1583 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1584 		 * (void *)tmr);
1585 		 */
1586 		CURVNET_RESTORE();
1587 		return;
1588 	}
1589 	tmr->stopped_from = 0xa001;
1590 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1591 		/*
1592 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1593 		 * tmr->type);
1594 		 */
1595 		CURVNET_RESTORE();
1596 		return;
1597 	}
1598 	tmr->stopped_from = 0xa002;
1599 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1600 		CURVNET_RESTORE();
1601 		return;
1602 	}
1603 	/* if this is an iterator timeout, get the struct and clear inp */
1604 	tmr->stopped_from = 0xa003;
1605 	if (inp) {
1606 		SCTP_INP_INCR_REF(inp);
1607 		if ((inp->sctp_socket == NULL) &&
1608 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1609 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1610 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1611 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1612 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1613 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1614 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1615 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1616 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))) {
1617 			SCTP_INP_DECR_REF(inp);
1618 			CURVNET_RESTORE();
1619 			return;
1620 		}
1621 	}
1622 	tmr->stopped_from = 0xa004;
1623 	if (stcb) {
1624 		atomic_add_int(&stcb->asoc.refcnt, 1);
1625 		if (stcb->asoc.state == 0) {
1626 			atomic_add_int(&stcb->asoc.refcnt, -1);
1627 			if (inp) {
1628 				SCTP_INP_DECR_REF(inp);
1629 			}
1630 			CURVNET_RESTORE();
1631 			return;
1632 		}
1633 	}
1634 	type = tmr->type;
1635 	tmr->stopped_from = 0xa005;
1636 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1637 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1638 		if (inp) {
1639 			SCTP_INP_DECR_REF(inp);
1640 		}
1641 		if (stcb) {
1642 			atomic_add_int(&stcb->asoc.refcnt, -1);
1643 		}
1644 		CURVNET_RESTORE();
1645 		return;
1646 	}
1647 	tmr->stopped_from = 0xa006;
1648 
1649 	if (stcb) {
1650 		SCTP_TCB_LOCK(stcb);
1651 		atomic_add_int(&stcb->asoc.refcnt, -1);
1652 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1653 		    ((stcb->asoc.state == 0) ||
1654 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1655 			SCTP_TCB_UNLOCK(stcb);
1656 			if (inp) {
1657 				SCTP_INP_DECR_REF(inp);
1658 			}
1659 			CURVNET_RESTORE();
1660 			return;
1661 		}
1662 	} else if (inp != NULL) {
1663 		if (type != SCTP_TIMER_TYPE_INPKILL) {
1664 			SCTP_INP_WLOCK(inp);
1665 		}
1666 	} else {
1667 		SCTP_WQ_ADDR_LOCK();
1668 	}
1669 	/* record in stopped what t-o occurred */
1670 	tmr->stopped_from = type;
1671 
1672 	/* mark as being serviced now */
1673 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1674 		/*
1675 		 * Callout has been rescheduled.
1676 		 */
1677 		goto get_out;
1678 	}
1679 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1680 		/*
1681 		 * Not active, so no action.
1682 		 */
1683 		goto get_out;
1684 	}
1685 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1686 
1687 	/* call the handler for the appropriate timer type */
1688 	switch (type) {
1689 	case SCTP_TIMER_TYPE_ADDR_WQ:
1690 		sctp_handle_addr_wq();
1691 		break;
1692 	case SCTP_TIMER_TYPE_SEND:
1693 		if ((stcb == NULL) || (inp == NULL)) {
1694 			break;
1695 		}
1696 		SCTP_STAT_INCR(sctps_timodata);
1697 		stcb->asoc.timodata++;
1698 		stcb->asoc.num_send_timers_up--;
1699 		if (stcb->asoc.num_send_timers_up < 0) {
1700 			stcb->asoc.num_send_timers_up = 0;
1701 		}
1702 		SCTP_TCB_LOCK_ASSERT(stcb);
1703 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1704 			/* no need to unlock on tcb its gone */
1705 
1706 			goto out_decr;
1707 		}
1708 		SCTP_TCB_LOCK_ASSERT(stcb);
1709 #ifdef SCTP_AUDITING_ENABLED
1710 		sctp_auditing(4, inp, stcb, net);
1711 #endif
1712 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1713 		if ((stcb->asoc.num_send_timers_up == 0) &&
1714 		    (stcb->asoc.sent_queue_cnt > 0)) {
1715 			struct sctp_tmit_chunk *chk;
1716 
1717 			/*
1718 			 * safeguard. If there on some on the sent queue
1719 			 * somewhere but no timers running something is
1720 			 * wrong... so we start a timer on the first chunk
1721 			 * on the send queue on whatever net it is sent to.
1722 			 */
1723 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1724 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1725 			    chk->whoTo);
1726 		}
1727 		break;
1728 	case SCTP_TIMER_TYPE_INIT:
1729 		if ((stcb == NULL) || (inp == NULL)) {
1730 			break;
1731 		}
1732 		SCTP_STAT_INCR(sctps_timoinit);
1733 		stcb->asoc.timoinit++;
1734 		if (sctp_t1init_timer(inp, stcb, net)) {
1735 			/* no need to unlock on tcb its gone */
1736 			goto out_decr;
1737 		}
1738 		/* We do output but not here */
1739 		did_output = 0;
1740 		break;
1741 	case SCTP_TIMER_TYPE_RECV:
1742 		if ((stcb == NULL) || (inp == NULL)) {
1743 			break;
1744 		}
1745 		SCTP_STAT_INCR(sctps_timosack);
1746 		stcb->asoc.timosack++;
1747 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1748 #ifdef SCTP_AUDITING_ENABLED
1749 		sctp_auditing(4, inp, stcb, net);
1750 #endif
1751 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1752 		break;
1753 	case SCTP_TIMER_TYPE_SHUTDOWN:
1754 		if ((stcb == NULL) || (inp == NULL)) {
1755 			break;
1756 		}
1757 		if (sctp_shutdown_timer(inp, stcb, net)) {
1758 			/* no need to unlock on tcb its gone */
1759 			goto out_decr;
1760 		}
1761 		SCTP_STAT_INCR(sctps_timoshutdown);
1762 		stcb->asoc.timoshutdown++;
1763 #ifdef SCTP_AUDITING_ENABLED
1764 		sctp_auditing(4, inp, stcb, net);
1765 #endif
1766 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1767 		break;
1768 	case SCTP_TIMER_TYPE_HEARTBEAT:
1769 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1770 			break;
1771 		}
1772 		SCTP_STAT_INCR(sctps_timoheartbeat);
1773 		stcb->asoc.timoheartbeat++;
1774 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1775 			/* no need to unlock on tcb its gone */
1776 			goto out_decr;
1777 		}
1778 #ifdef SCTP_AUDITING_ENABLED
1779 		sctp_auditing(4, inp, stcb, net);
1780 #endif
1781 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1782 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1783 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1784 		}
1785 		break;
1786 	case SCTP_TIMER_TYPE_COOKIE:
1787 		if ((stcb == NULL) || (inp == NULL)) {
1788 			break;
1789 		}
1790 
1791 		if (sctp_cookie_timer(inp, stcb, net)) {
1792 			/* no need to unlock on tcb its gone */
1793 			goto out_decr;
1794 		}
1795 		SCTP_STAT_INCR(sctps_timocookie);
1796 		stcb->asoc.timocookie++;
1797 #ifdef SCTP_AUDITING_ENABLED
1798 		sctp_auditing(4, inp, stcb, net);
1799 #endif
1800 		/*
1801 		 * We consider T3 and Cookie timer pretty much the same with
1802 		 * respect to where from in chunk_output.
1803 		 */
1804 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1805 		break;
1806 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1807 		{
1808 			struct timeval tv;
1809 			int i, secret;
1810 
1811 			if (inp == NULL) {
1812 				break;
1813 			}
1814 			SCTP_STAT_INCR(sctps_timosecret);
1815 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1816 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1817 			inp->sctp_ep.last_secret_number =
1818 			    inp->sctp_ep.current_secret_number;
1819 			inp->sctp_ep.current_secret_number++;
1820 			if (inp->sctp_ep.current_secret_number >=
1821 			    SCTP_HOW_MANY_SECRETS) {
1822 				inp->sctp_ep.current_secret_number = 0;
1823 			}
1824 			secret = (int)inp->sctp_ep.current_secret_number;
1825 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1826 				inp->sctp_ep.secret_key[secret][i] =
1827 				    sctp_select_initial_TSN(&inp->sctp_ep);
1828 			}
1829 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1830 		}
1831 		did_output = 0;
1832 		break;
1833 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1834 		if ((stcb == NULL) || (inp == NULL)) {
1835 			break;
1836 		}
1837 		SCTP_STAT_INCR(sctps_timopathmtu);
1838 		sctp_pathmtu_timer(inp, stcb, net);
1839 		did_output = 0;
1840 		break;
1841 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1842 		if ((stcb == NULL) || (inp == NULL)) {
1843 			break;
1844 		}
1845 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1846 			/* no need to unlock on tcb its gone */
1847 			goto out_decr;
1848 		}
1849 		SCTP_STAT_INCR(sctps_timoshutdownack);
1850 		stcb->asoc.timoshutdownack++;
1851 #ifdef SCTP_AUDITING_ENABLED
1852 		sctp_auditing(4, inp, stcb, net);
1853 #endif
1854 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1855 		break;
1856 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1857 		if ((stcb == NULL) || (inp == NULL)) {
1858 			break;
1859 		}
1860 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1861 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1862 		    "Shutdown guard timer expired");
1863 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1864 		/* no need to unlock on tcb its gone */
1865 		goto out_decr;
1866 
1867 	case SCTP_TIMER_TYPE_STRRESET:
1868 		if ((stcb == NULL) || (inp == NULL)) {
1869 			break;
1870 		}
1871 		if (sctp_strreset_timer(inp, stcb, net)) {
1872 			/* no need to unlock on tcb its gone */
1873 			goto out_decr;
1874 		}
1875 		SCTP_STAT_INCR(sctps_timostrmrst);
1876 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1877 		break;
1878 	case SCTP_TIMER_TYPE_ASCONF:
1879 		if ((stcb == NULL) || (inp == NULL)) {
1880 			break;
1881 		}
1882 		if (sctp_asconf_timer(inp, stcb, net)) {
1883 			/* no need to unlock on tcb its gone */
1884 			goto out_decr;
1885 		}
1886 		SCTP_STAT_INCR(sctps_timoasconf);
1887 #ifdef SCTP_AUDITING_ENABLED
1888 		sctp_auditing(4, inp, stcb, net);
1889 #endif
1890 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1891 		break;
1892 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1893 		if ((stcb == NULL) || (inp == NULL)) {
1894 			break;
1895 		}
1896 		sctp_delete_prim_timer(inp, stcb, net);
1897 		SCTP_STAT_INCR(sctps_timodelprim);
1898 		break;
1899 
1900 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1901 		if ((stcb == NULL) || (inp == NULL)) {
1902 			break;
1903 		}
1904 		SCTP_STAT_INCR(sctps_timoautoclose);
1905 		sctp_autoclose_timer(inp, stcb, net);
1906 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1907 		did_output = 0;
1908 		break;
1909 	case SCTP_TIMER_TYPE_ASOCKILL:
1910 		if ((stcb == NULL) || (inp == NULL)) {
1911 			break;
1912 		}
1913 		SCTP_STAT_INCR(sctps_timoassockill);
1914 		/* Can we free it yet? */
1915 		SCTP_INP_DECR_REF(inp);
1916 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1917 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1918 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1919 		so = SCTP_INP_SO(inp);
1920 		atomic_add_int(&stcb->asoc.refcnt, 1);
1921 		SCTP_TCB_UNLOCK(stcb);
1922 		SCTP_SOCKET_LOCK(so, 1);
1923 		SCTP_TCB_LOCK(stcb);
1924 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1925 #endif
1926 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1927 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1928 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1929 		SCTP_SOCKET_UNLOCK(so, 1);
1930 #endif
1931 		/*
1932 		 * free asoc, always unlocks (or destroy's) so prevent
1933 		 * duplicate unlock or unlock of a free mtx :-0
1934 		 */
1935 		stcb = NULL;
1936 		goto out_no_decr;
1937 	case SCTP_TIMER_TYPE_INPKILL:
1938 		SCTP_STAT_INCR(sctps_timoinpkill);
1939 		if (inp == NULL) {
1940 			break;
1941 		}
1942 		/*
1943 		 * special case, take away our increment since WE are the
1944 		 * killer
1945 		 */
1946 		SCTP_INP_DECR_REF(inp);
1947 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1948 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1949 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1950 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1951 		inp = NULL;
1952 		goto out_no_decr;
1953 	default:
1954 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1955 		    type);
1956 		break;
1957 	}
1958 #ifdef SCTP_AUDITING_ENABLED
1959 	sctp_audit_log(0xF1, (uint8_t)type);
1960 	if (inp)
1961 		sctp_auditing(5, inp, stcb, net);
1962 #endif
1963 	if ((did_output) && stcb) {
1964 		/*
1965 		 * Now we need to clean up the control chunk chain if an
1966 		 * ECNE is on it. It must be marked as UNSENT again so next
1967 		 * call will continue to send it until such time that we get
1968 		 * a CWR, to remove it. It is, however, less likely that we
1969 		 * will find a ecn echo on the chain though.
1970 		 */
1971 		sctp_fix_ecn_echo(&stcb->asoc);
1972 	}
1973 get_out:
1974 	if (stcb) {
1975 		SCTP_TCB_UNLOCK(stcb);
1976 	} else if (inp != NULL) {
1977 		SCTP_INP_WUNLOCK(inp);
1978 	} else {
1979 		SCTP_WQ_ADDR_UNLOCK();
1980 	}
1981 
1982 out_decr:
1983 	if (inp) {
1984 		SCTP_INP_DECR_REF(inp);
1985 	}
1986 
1987 out_no_decr:
1988 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1989 	CURVNET_RESTORE();
1990 }
1991 
1992 void
1993 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1994     struct sctp_nets *net)
1995 {
1996 	uint32_t to_ticks;
1997 	struct sctp_timer *tmr;
1998 
1999 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
2000 		return;
2001 
2002 	tmr = NULL;
2003 	if (stcb) {
2004 		SCTP_TCB_LOCK_ASSERT(stcb);
2005 	}
2006 	switch (t_type) {
2007 	case SCTP_TIMER_TYPE_ADDR_WQ:
2008 		/* Only 1 tick away :-) */
2009 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2010 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
2011 		break;
2012 	case SCTP_TIMER_TYPE_SEND:
2013 		/* Here we use the RTO timer */
2014 		{
2015 			int rto_val;
2016 
2017 			if ((stcb == NULL) || (net == NULL)) {
2018 				return;
2019 			}
2020 			tmr = &net->rxt_timer;
2021 			if (net->RTO == 0) {
2022 				rto_val = stcb->asoc.initial_rto;
2023 			} else {
2024 				rto_val = net->RTO;
2025 			}
2026 			to_ticks = MSEC_TO_TICKS(rto_val);
2027 		}
2028 		break;
2029 	case SCTP_TIMER_TYPE_INIT:
2030 		/*
2031 		 * Here we use the INIT timer default usually about 1
2032 		 * minute.
2033 		 */
2034 		if ((stcb == NULL) || (net == NULL)) {
2035 			return;
2036 		}
2037 		tmr = &net->rxt_timer;
2038 		if (net->RTO == 0) {
2039 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2040 		} else {
2041 			to_ticks = MSEC_TO_TICKS(net->RTO);
2042 		}
2043 		break;
2044 	case SCTP_TIMER_TYPE_RECV:
2045 		/*
2046 		 * Here we use the Delayed-Ack timer value from the inp
2047 		 * ususually about 200ms.
2048 		 */
2049 		if (stcb == NULL) {
2050 			return;
2051 		}
2052 		tmr = &stcb->asoc.dack_timer;
2053 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2054 		break;
2055 	case SCTP_TIMER_TYPE_SHUTDOWN:
2056 		/* Here we use the RTO of the destination. */
2057 		if ((stcb == NULL) || (net == NULL)) {
2058 			return;
2059 		}
2060 		if (net->RTO == 0) {
2061 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2062 		} else {
2063 			to_ticks = MSEC_TO_TICKS(net->RTO);
2064 		}
2065 		tmr = &net->rxt_timer;
2066 		break;
2067 	case SCTP_TIMER_TYPE_HEARTBEAT:
2068 		/*
2069 		 * the net is used here so that we can add in the RTO. Even
2070 		 * though we use a different timer. We also add the HB timer
2071 		 * PLUS a random jitter.
2072 		 */
2073 		if ((stcb == NULL) || (net == NULL)) {
2074 			return;
2075 		} else {
2076 			uint32_t rndval;
2077 			uint32_t jitter;
2078 
2079 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2080 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2081 				return;
2082 			}
2083 			if (net->RTO == 0) {
2084 				to_ticks = stcb->asoc.initial_rto;
2085 			} else {
2086 				to_ticks = net->RTO;
2087 			}
2088 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2089 			jitter = rndval % to_ticks;
2090 			if (jitter >= (to_ticks >> 1)) {
2091 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2092 			} else {
2093 				to_ticks = to_ticks - jitter;
2094 			}
2095 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2096 			    !(net->dest_state & SCTP_ADDR_PF)) {
2097 				to_ticks += net->heart_beat_delay;
2098 			}
2099 			/*
2100 			 * Now we must convert the to_ticks that are now in
2101 			 * ms to ticks.
2102 			 */
2103 			to_ticks = MSEC_TO_TICKS(to_ticks);
2104 			tmr = &net->hb_timer;
2105 		}
2106 		break;
2107 	case SCTP_TIMER_TYPE_COOKIE:
2108 		/*
2109 		 * Here we can use the RTO timer from the network since one
2110 		 * RTT was compelete. If a retran happened then we will be
2111 		 * using the RTO initial value.
2112 		 */
2113 		if ((stcb == NULL) || (net == NULL)) {
2114 			return;
2115 		}
2116 		if (net->RTO == 0) {
2117 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2118 		} else {
2119 			to_ticks = MSEC_TO_TICKS(net->RTO);
2120 		}
2121 		tmr = &net->rxt_timer;
2122 		break;
2123 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2124 		/*
2125 		 * nothing needed but the endpoint here ususually about 60
2126 		 * minutes.
2127 		 */
2128 		tmr = &inp->sctp_ep.signature_change;
2129 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2130 		break;
2131 	case SCTP_TIMER_TYPE_ASOCKILL:
2132 		if (stcb == NULL) {
2133 			return;
2134 		}
2135 		tmr = &stcb->asoc.strreset_timer;
2136 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2137 		break;
2138 	case SCTP_TIMER_TYPE_INPKILL:
2139 		/*
2140 		 * The inp is setup to die. We re-use the signature_chage
2141 		 * timer since that has stopped and we are in the GONE
2142 		 * state.
2143 		 */
2144 		tmr = &inp->sctp_ep.signature_change;
2145 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2146 		break;
2147 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2148 		/*
2149 		 * Here we use the value found in the EP for PMTU ususually
2150 		 * about 10 minutes.
2151 		 */
2152 		if ((stcb == NULL) || (net == NULL)) {
2153 			return;
2154 		}
2155 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2156 			return;
2157 		}
2158 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2159 		tmr = &net->pmtu_timer;
2160 		break;
2161 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2162 		/* Here we use the RTO of the destination */
2163 		if ((stcb == NULL) || (net == NULL)) {
2164 			return;
2165 		}
2166 		if (net->RTO == 0) {
2167 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2168 		} else {
2169 			to_ticks = MSEC_TO_TICKS(net->RTO);
2170 		}
2171 		tmr = &net->rxt_timer;
2172 		break;
2173 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2174 		/*
2175 		 * Here we use the endpoints shutdown guard timer usually
2176 		 * about 3 minutes.
2177 		 */
2178 		if (stcb == NULL) {
2179 			return;
2180 		}
2181 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2182 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2183 		} else {
2184 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2185 		}
2186 		tmr = &stcb->asoc.shut_guard_timer;
2187 		break;
2188 	case SCTP_TIMER_TYPE_STRRESET:
2189 		/*
2190 		 * Here the timer comes from the stcb but its value is from
2191 		 * the net's RTO.
2192 		 */
2193 		if ((stcb == NULL) || (net == NULL)) {
2194 			return;
2195 		}
2196 		if (net->RTO == 0) {
2197 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2198 		} else {
2199 			to_ticks = MSEC_TO_TICKS(net->RTO);
2200 		}
2201 		tmr = &stcb->asoc.strreset_timer;
2202 		break;
2203 	case SCTP_TIMER_TYPE_ASCONF:
2204 		/*
2205 		 * Here the timer comes from the stcb but its value is from
2206 		 * the net's RTO.
2207 		 */
2208 		if ((stcb == NULL) || (net == NULL)) {
2209 			return;
2210 		}
2211 		if (net->RTO == 0) {
2212 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2213 		} else {
2214 			to_ticks = MSEC_TO_TICKS(net->RTO);
2215 		}
2216 		tmr = &stcb->asoc.asconf_timer;
2217 		break;
2218 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2219 		if ((stcb == NULL) || (net != NULL)) {
2220 			return;
2221 		}
2222 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2223 		tmr = &stcb->asoc.delete_prim_timer;
2224 		break;
2225 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2226 		if (stcb == NULL) {
2227 			return;
2228 		}
2229 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2230 			/*
2231 			 * Really an error since stcb is NOT set to
2232 			 * autoclose
2233 			 */
2234 			return;
2235 		}
2236 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2237 		tmr = &stcb->asoc.autoclose_timer;
2238 		break;
2239 	default:
2240 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2241 		    __func__, t_type);
2242 		return;
2243 		break;
2244 	}
2245 	if ((to_ticks <= 0) || (tmr == NULL)) {
2246 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2247 		    __func__, t_type, to_ticks, (void *)tmr);
2248 		return;
2249 	}
2250 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2251 		/*
2252 		 * we do NOT allow you to have it already running. if it is
2253 		 * we leave the current one up unchanged
2254 		 */
2255 		return;
2256 	}
2257 	/* At this point we can proceed */
2258 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2259 		stcb->asoc.num_send_timers_up++;
2260 	}
2261 	tmr->stopped_from = 0;
2262 	tmr->type = t_type;
2263 	tmr->ep = (void *)inp;
2264 	tmr->tcb = (void *)stcb;
2265 	tmr->net = (void *)net;
2266 	tmr->self = (void *)tmr;
2267 	tmr->vnet = (void *)curvnet;
2268 	tmr->ticks = sctp_get_tick_count();
2269 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2270 	return;
2271 }
2272 
2273 void
2274 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2275     struct sctp_nets *net, uint32_t from)
2276 {
2277 	struct sctp_timer *tmr;
2278 
2279 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2280 	    (inp == NULL))
2281 		return;
2282 
2283 	tmr = NULL;
2284 	if (stcb) {
2285 		SCTP_TCB_LOCK_ASSERT(stcb);
2286 	}
2287 	switch (t_type) {
2288 	case SCTP_TIMER_TYPE_ADDR_WQ:
2289 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2290 		break;
2291 	case SCTP_TIMER_TYPE_SEND:
2292 		if ((stcb == NULL) || (net == NULL)) {
2293 			return;
2294 		}
2295 		tmr = &net->rxt_timer;
2296 		break;
2297 	case SCTP_TIMER_TYPE_INIT:
2298 		if ((stcb == NULL) || (net == NULL)) {
2299 			return;
2300 		}
2301 		tmr = &net->rxt_timer;
2302 		break;
2303 	case SCTP_TIMER_TYPE_RECV:
2304 		if (stcb == NULL) {
2305 			return;
2306 		}
2307 		tmr = &stcb->asoc.dack_timer;
2308 		break;
2309 	case SCTP_TIMER_TYPE_SHUTDOWN:
2310 		if ((stcb == NULL) || (net == NULL)) {
2311 			return;
2312 		}
2313 		tmr = &net->rxt_timer;
2314 		break;
2315 	case SCTP_TIMER_TYPE_HEARTBEAT:
2316 		if ((stcb == NULL) || (net == NULL)) {
2317 			return;
2318 		}
2319 		tmr = &net->hb_timer;
2320 		break;
2321 	case SCTP_TIMER_TYPE_COOKIE:
2322 		if ((stcb == NULL) || (net == NULL)) {
2323 			return;
2324 		}
2325 		tmr = &net->rxt_timer;
2326 		break;
2327 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2328 		/* nothing needed but the endpoint here */
2329 		tmr = &inp->sctp_ep.signature_change;
2330 		/*
2331 		 * We re-use the newcookie timer for the INP kill timer. We
2332 		 * must assure that we do not kill it by accident.
2333 		 */
2334 		break;
2335 	case SCTP_TIMER_TYPE_ASOCKILL:
2336 		/*
2337 		 * Stop the asoc kill timer.
2338 		 */
2339 		if (stcb == NULL) {
2340 			return;
2341 		}
2342 		tmr = &stcb->asoc.strreset_timer;
2343 		break;
2344 
2345 	case SCTP_TIMER_TYPE_INPKILL:
2346 		/*
2347 		 * The inp is setup to die. We re-use the signature_chage
2348 		 * timer since that has stopped and we are in the GONE
2349 		 * state.
2350 		 */
2351 		tmr = &inp->sctp_ep.signature_change;
2352 		break;
2353 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2354 		if ((stcb == NULL) || (net == NULL)) {
2355 			return;
2356 		}
2357 		tmr = &net->pmtu_timer;
2358 		break;
2359 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2360 		if ((stcb == NULL) || (net == NULL)) {
2361 			return;
2362 		}
2363 		tmr = &net->rxt_timer;
2364 		break;
2365 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2366 		if (stcb == NULL) {
2367 			return;
2368 		}
2369 		tmr = &stcb->asoc.shut_guard_timer;
2370 		break;
2371 	case SCTP_TIMER_TYPE_STRRESET:
2372 		if (stcb == NULL) {
2373 			return;
2374 		}
2375 		tmr = &stcb->asoc.strreset_timer;
2376 		break;
2377 	case SCTP_TIMER_TYPE_ASCONF:
2378 		if (stcb == NULL) {
2379 			return;
2380 		}
2381 		tmr = &stcb->asoc.asconf_timer;
2382 		break;
2383 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2384 		if (stcb == NULL) {
2385 			return;
2386 		}
2387 		tmr = &stcb->asoc.delete_prim_timer;
2388 		break;
2389 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2390 		if (stcb == NULL) {
2391 			return;
2392 		}
2393 		tmr = &stcb->asoc.autoclose_timer;
2394 		break;
2395 	default:
2396 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2397 		    __func__, t_type);
2398 		break;
2399 	}
2400 	if (tmr == NULL) {
2401 		return;
2402 	}
2403 	if ((tmr->type != t_type) && tmr->type) {
2404 		/*
2405 		 * Ok we have a timer that is under joint use. Cookie timer
2406 		 * per chance with the SEND timer. We therefore are NOT
2407 		 * running the timer that the caller wants stopped.  So just
2408 		 * return.
2409 		 */
2410 		return;
2411 	}
2412 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2413 		stcb->asoc.num_send_timers_up--;
2414 		if (stcb->asoc.num_send_timers_up < 0) {
2415 			stcb->asoc.num_send_timers_up = 0;
2416 		}
2417 	}
2418 	tmr->self = NULL;
2419 	tmr->stopped_from = from;
2420 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2421 	return;
2422 }
2423 
2424 uint32_t
2425 sctp_calculate_len(struct mbuf *m)
2426 {
2427 	uint32_t tlen = 0;
2428 	struct mbuf *at;
2429 
2430 	at = m;
2431 	while (at) {
2432 		tlen += SCTP_BUF_LEN(at);
2433 		at = SCTP_BUF_NEXT(at);
2434 	}
2435 	return (tlen);
2436 }
2437 
2438 void
2439 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2440     struct sctp_association *asoc, uint32_t mtu)
2441 {
2442 	/*
2443 	 * Reset the P-MTU size on this association, this involves changing
2444 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2445 	 * allow the DF flag to be cleared.
2446 	 */
2447 	struct sctp_tmit_chunk *chk;
2448 	unsigned int eff_mtu, ovh;
2449 
2450 	asoc->smallest_mtu = mtu;
2451 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2452 		ovh = SCTP_MIN_OVERHEAD;
2453 	} else {
2454 		ovh = SCTP_MIN_V4_OVERHEAD;
2455 	}
2456 	eff_mtu = mtu - ovh;
2457 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2458 		if (chk->send_size > eff_mtu) {
2459 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2460 		}
2461 	}
2462 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2463 		if (chk->send_size > eff_mtu) {
2464 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2465 		}
2466 	}
2467 }
2468 
2469 
2470 /*
2471  * given an association and starting time of the current RTT period return
2472  * RTO in number of msecs net should point to the current network
2473  */
2474 
2475 uint32_t
2476 sctp_calculate_rto(struct sctp_tcb *stcb,
2477     struct sctp_association *asoc,
2478     struct sctp_nets *net,
2479     struct timeval *old,
2480     int rtt_from_sack)
2481 {
2482 	/*-
2483 	 * given an association and the starting time of the current RTT
2484 	 * period (in value1/value2) return RTO in number of msecs.
2485 	 */
2486 	int32_t rtt;		/* RTT in ms */
2487 	uint32_t new_rto;
2488 	int first_measure = 0;
2489 	struct timeval now;
2490 
2491 	/************************/
2492 	/* 1. calculate new RTT */
2493 	/************************/
2494 	/* get the current time */
2495 	if (stcb->asoc.use_precise_time) {
2496 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2497 	} else {
2498 		(void)SCTP_GETTIME_TIMEVAL(&now);
2499 	}
2500 	timevalsub(&now, old);
2501 	/* store the current RTT in us */
2502 	net->rtt = (uint64_t)1000000 * (uint64_t)now.tv_sec +
2503 	    (uint64_t)now.tv_usec;
2504 	/* compute rtt in ms */
2505 	rtt = (int32_t)(net->rtt / 1000);
2506 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2507 		/*
2508 		 * Tell the CC module that a new update has just occurred
2509 		 * from a sack
2510 		 */
2511 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2512 	}
2513 	/*
2514 	 * Do we need to determine the lan? We do this only on sacks i.e.
2515 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2516 	 */
2517 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2518 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2519 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2520 			net->lan_type = SCTP_LAN_INTERNET;
2521 		} else {
2522 			net->lan_type = SCTP_LAN_LOCAL;
2523 		}
2524 	}
2525 
2526 	/***************************/
2527 	/* 2. update RTTVAR & SRTT */
2528 	/***************************/
2529 	/*-
2530 	 * Compute the scaled average lastsa and the
2531 	 * scaled variance lastsv as described in van Jacobson
2532 	 * Paper "Congestion Avoidance and Control", Annex A.
2533 	 *
2534 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2535 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2536 	 */
2537 	if (net->RTO_measured) {
2538 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2539 		net->lastsa += rtt;
2540 		if (rtt < 0) {
2541 			rtt = -rtt;
2542 		}
2543 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2544 		net->lastsv += rtt;
2545 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2546 			rto_logging(net, SCTP_LOG_RTTVAR);
2547 		}
2548 	} else {
2549 		/* First RTO measurment */
2550 		net->RTO_measured = 1;
2551 		first_measure = 1;
2552 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2553 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2554 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2555 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2556 		}
2557 	}
2558 	if (net->lastsv == 0) {
2559 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2560 	}
2561 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2562 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2563 	    (stcb->asoc.sat_network_lockout == 0)) {
2564 		stcb->asoc.sat_network = 1;
2565 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2566 		stcb->asoc.sat_network = 0;
2567 		stcb->asoc.sat_network_lockout = 1;
2568 	}
2569 	/* bound it, per C6/C7 in Section 5.3.1 */
2570 	if (new_rto < stcb->asoc.minrto) {
2571 		new_rto = stcb->asoc.minrto;
2572 	}
2573 	if (new_rto > stcb->asoc.maxrto) {
2574 		new_rto = stcb->asoc.maxrto;
2575 	}
2576 	/* we are now returning the RTO */
2577 	return (new_rto);
2578 }
2579 
2580 /*
2581  * return a pointer to a contiguous piece of data from the given mbuf chain
2582  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2583  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2584  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2585  */
2586 caddr_t
2587 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2588 {
2589 	uint32_t count;
2590 	uint8_t *ptr;
2591 
2592 	ptr = in_ptr;
2593 	if ((off < 0) || (len <= 0))
2594 		return (NULL);
2595 
2596 	/* find the desired start location */
2597 	while ((m != NULL) && (off > 0)) {
2598 		if (off < SCTP_BUF_LEN(m))
2599 			break;
2600 		off -= SCTP_BUF_LEN(m);
2601 		m = SCTP_BUF_NEXT(m);
2602 	}
2603 	if (m == NULL)
2604 		return (NULL);
2605 
2606 	/* is the current mbuf large enough (eg. contiguous)? */
2607 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2608 		return (mtod(m, caddr_t)+off);
2609 	} else {
2610 		/* else, it spans more than one mbuf, so save a temp copy... */
2611 		while ((m != NULL) && (len > 0)) {
2612 			count = min(SCTP_BUF_LEN(m) - off, len);
2613 			memcpy(ptr, mtod(m, caddr_t)+off, count);
2614 			len -= count;
2615 			ptr += count;
2616 			off = 0;
2617 			m = SCTP_BUF_NEXT(m);
2618 		}
2619 		if ((m == NULL) && (len > 0))
2620 			return (NULL);
2621 		else
2622 			return ((caddr_t)in_ptr);
2623 	}
2624 }
2625 
2626 
2627 
2628 struct sctp_paramhdr *
2629 sctp_get_next_param(struct mbuf *m,
2630     int offset,
2631     struct sctp_paramhdr *pull,
2632     int pull_limit)
2633 {
2634 	/* This just provides a typed signature to Peter's Pull routine */
2635 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2636 	    (uint8_t *)pull));
2637 }
2638 
2639 
2640 struct mbuf *
2641 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2642 {
2643 	struct mbuf *m_last;
2644 	caddr_t dp;
2645 
2646 	if (padlen > 3) {
2647 		return (NULL);
2648 	}
2649 	if (padlen <= M_TRAILINGSPACE(m)) {
2650 		/*
2651 		 * The easy way. We hope the majority of the time we hit
2652 		 * here :)
2653 		 */
2654 		m_last = m;
2655 	} else {
2656 		/* Hard way we must grow the mbuf chain */
2657 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2658 		if (m_last == NULL) {
2659 			return (NULL);
2660 		}
2661 		SCTP_BUF_LEN(m_last) = 0;
2662 		SCTP_BUF_NEXT(m_last) = NULL;
2663 		SCTP_BUF_NEXT(m) = m_last;
2664 	}
2665 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2666 	SCTP_BUF_LEN(m_last) += padlen;
2667 	memset(dp, 0, padlen);
2668 	return (m_last);
2669 }
2670 
2671 struct mbuf *
2672 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2673 {
2674 	/* find the last mbuf in chain and pad it */
2675 	struct mbuf *m_at;
2676 
2677 	if (last_mbuf != NULL) {
2678 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2679 	} else {
2680 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2681 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2682 				return (sctp_add_pad_tombuf(m_at, padval));
2683 			}
2684 		}
2685 	}
2686 	return (NULL);
2687 }
2688 
2689 static void
2690 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2691     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2692 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2693     SCTP_UNUSED
2694 #endif
2695 )
2696 {
2697 	struct mbuf *m_notify;
2698 	struct sctp_assoc_change *sac;
2699 	struct sctp_queued_to_read *control;
2700 	unsigned int notif_len;
2701 	uint16_t abort_len;
2702 	unsigned int i;
2703 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2704 	struct socket *so;
2705 #endif
2706 
2707 	if (stcb == NULL) {
2708 		return;
2709 	}
2710 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2711 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2712 		if (abort != NULL) {
2713 			abort_len = ntohs(abort->ch.chunk_length);
2714 			/*
2715 			 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
2716 			 * contiguous.
2717 			 */
2718 			if (abort_len > SCTP_CHUNK_BUFFER_SIZE) {
2719 				abort_len = SCTP_CHUNK_BUFFER_SIZE;
2720 			}
2721 		} else {
2722 			abort_len = 0;
2723 		}
2724 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2725 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2726 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2727 			notif_len += abort_len;
2728 		}
2729 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2730 		if (m_notify == NULL) {
2731 			/* Retry with smaller value. */
2732 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2733 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2734 			if (m_notify == NULL) {
2735 				goto set_error;
2736 			}
2737 		}
2738 		SCTP_BUF_NEXT(m_notify) = NULL;
2739 		sac = mtod(m_notify, struct sctp_assoc_change *);
2740 		memset(sac, 0, notif_len);
2741 		sac->sac_type = SCTP_ASSOC_CHANGE;
2742 		sac->sac_flags = 0;
2743 		sac->sac_length = sizeof(struct sctp_assoc_change);
2744 		sac->sac_state = state;
2745 		sac->sac_error = error;
2746 		/* XXX verify these stream counts */
2747 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2748 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2749 		sac->sac_assoc_id = sctp_get_associd(stcb);
2750 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2751 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2752 				i = 0;
2753 				if (stcb->asoc.prsctp_supported == 1) {
2754 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2755 				}
2756 				if (stcb->asoc.auth_supported == 1) {
2757 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2758 				}
2759 				if (stcb->asoc.asconf_supported == 1) {
2760 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2761 				}
2762 				if (stcb->asoc.idata_supported == 1) {
2763 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2764 				}
2765 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2766 				if (stcb->asoc.reconfig_supported == 1) {
2767 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2768 				}
2769 				sac->sac_length += i;
2770 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2771 				memcpy(sac->sac_info, abort, abort_len);
2772 				sac->sac_length += abort_len;
2773 			}
2774 		}
2775 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2776 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2777 		    0, 0, stcb->asoc.context, 0, 0, 0,
2778 		    m_notify);
2779 		if (control != NULL) {
2780 			control->length = SCTP_BUF_LEN(m_notify);
2781 			control->spec_flags = M_NOTIFICATION;
2782 			/* not that we need this */
2783 			control->tail_mbuf = m_notify;
2784 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2785 			    control,
2786 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2787 			    so_locked);
2788 		} else {
2789 			sctp_m_freem(m_notify);
2790 		}
2791 	}
2792 	/*
2793 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2794 	 * comes in.
2795 	 */
2796 set_error:
2797 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2798 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2799 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2800 		SOCK_LOCK(stcb->sctp_socket);
2801 		if (from_peer) {
2802 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2803 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2804 				stcb->sctp_socket->so_error = ECONNREFUSED;
2805 			} else {
2806 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2807 				stcb->sctp_socket->so_error = ECONNRESET;
2808 			}
2809 		} else {
2810 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2811 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2812 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2813 				stcb->sctp_socket->so_error = ETIMEDOUT;
2814 			} else {
2815 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2816 				stcb->sctp_socket->so_error = ECONNABORTED;
2817 			}
2818 		}
2819 		SOCK_UNLOCK(stcb->sctp_socket);
2820 	}
2821 	/* Wake ANY sleepers */
2822 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2823 	so = SCTP_INP_SO(stcb->sctp_ep);
2824 	if (!so_locked) {
2825 		atomic_add_int(&stcb->asoc.refcnt, 1);
2826 		SCTP_TCB_UNLOCK(stcb);
2827 		SCTP_SOCKET_LOCK(so, 1);
2828 		SCTP_TCB_LOCK(stcb);
2829 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2830 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2831 			SCTP_SOCKET_UNLOCK(so, 1);
2832 			return;
2833 		}
2834 	}
2835 #endif
2836 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2837 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2838 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2839 		socantrcvmore(stcb->sctp_socket);
2840 	}
2841 	sorwakeup(stcb->sctp_socket);
2842 	sowwakeup(stcb->sctp_socket);
2843 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2844 	if (!so_locked) {
2845 		SCTP_SOCKET_UNLOCK(so, 1);
2846 	}
2847 #endif
2848 }
2849 
2850 static void
2851 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2852     struct sockaddr *sa, uint32_t error, int so_locked
2853 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2854     SCTP_UNUSED
2855 #endif
2856 )
2857 {
2858 	struct mbuf *m_notify;
2859 	struct sctp_paddr_change *spc;
2860 	struct sctp_queued_to_read *control;
2861 
2862 	if ((stcb == NULL) ||
2863 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2864 		/* event not enabled */
2865 		return;
2866 	}
2867 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2868 	if (m_notify == NULL)
2869 		return;
2870 	SCTP_BUF_LEN(m_notify) = 0;
2871 	spc = mtod(m_notify, struct sctp_paddr_change *);
2872 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2873 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2874 	spc->spc_flags = 0;
2875 	spc->spc_length = sizeof(struct sctp_paddr_change);
2876 	switch (sa->sa_family) {
2877 #ifdef INET
2878 	case AF_INET:
2879 #ifdef INET6
2880 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2881 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2882 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2883 		} else {
2884 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2885 		}
2886 #else
2887 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2888 #endif
2889 		break;
2890 #endif
2891 #ifdef INET6
2892 	case AF_INET6:
2893 		{
2894 			struct sockaddr_in6 *sin6;
2895 
2896 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2897 
2898 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2899 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2900 				if (sin6->sin6_scope_id == 0) {
2901 					/* recover scope_id for user */
2902 					(void)sa6_recoverscope(sin6);
2903 				} else {
2904 					/* clear embedded scope_id for user */
2905 					in6_clearscope(&sin6->sin6_addr);
2906 				}
2907 			}
2908 			break;
2909 		}
2910 #endif
2911 	default:
2912 		/* TSNH */
2913 		break;
2914 	}
2915 	spc->spc_state = state;
2916 	spc->spc_error = error;
2917 	spc->spc_assoc_id = sctp_get_associd(stcb);
2918 
2919 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2920 	SCTP_BUF_NEXT(m_notify) = NULL;
2921 
2922 	/* append to socket */
2923 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2924 	    0, 0, stcb->asoc.context, 0, 0, 0,
2925 	    m_notify);
2926 	if (control == NULL) {
2927 		/* no memory */
2928 		sctp_m_freem(m_notify);
2929 		return;
2930 	}
2931 	control->length = SCTP_BUF_LEN(m_notify);
2932 	control->spec_flags = M_NOTIFICATION;
2933 	/* not that we need this */
2934 	control->tail_mbuf = m_notify;
2935 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2936 	    control,
2937 	    &stcb->sctp_socket->so_rcv, 1,
2938 	    SCTP_READ_LOCK_NOT_HELD,
2939 	    so_locked);
2940 }
2941 
2942 
2943 static void
2944 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2945     struct sctp_tmit_chunk *chk, int so_locked
2946 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2947     SCTP_UNUSED
2948 #endif
2949 )
2950 {
2951 	struct mbuf *m_notify;
2952 	struct sctp_send_failed *ssf;
2953 	struct sctp_send_failed_event *ssfe;
2954 	struct sctp_queued_to_read *control;
2955 	struct sctp_chunkhdr *chkhdr;
2956 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
2957 
2958 	if ((stcb == NULL) ||
2959 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2960 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2961 		/* event not enabled */
2962 		return;
2963 	}
2964 
2965 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2966 		notifhdr_len = sizeof(struct sctp_send_failed_event);
2967 	} else {
2968 		notifhdr_len = sizeof(struct sctp_send_failed);
2969 	}
2970 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
2971 	if (m_notify == NULL)
2972 		/* no space left */
2973 		return;
2974 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
2975 	if (stcb->asoc.idata_supported) {
2976 		chkhdr_len = sizeof(struct sctp_idata_chunk);
2977 	} else {
2978 		chkhdr_len = sizeof(struct sctp_data_chunk);
2979 	}
2980 	/* Use some defaults in case we can't access the chunk header */
2981 	if (chk->send_size >= chkhdr_len) {
2982 		payload_len = chk->send_size - chkhdr_len;
2983 	} else {
2984 		payload_len = 0;
2985 	}
2986 	padding_len = 0;
2987 	if (chk->data != NULL) {
2988 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
2989 		if (chkhdr != NULL) {
2990 			chk_len = ntohs(chkhdr->chunk_length);
2991 			if ((chk_len >= chkhdr_len) &&
2992 			    (chk->send_size >= chk_len) &&
2993 			    (chk->send_size - chk_len < 4)) {
2994 				padding_len = chk->send_size - chk_len;
2995 				payload_len = chk->send_size - chkhdr_len - padding_len;
2996 			}
2997 		}
2998 	}
2999 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3000 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3001 		memset(ssfe, 0, notifhdr_len);
3002 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3003 		if (sent) {
3004 			ssfe->ssfe_flags = SCTP_DATA_SENT;
3005 		} else {
3006 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3007 		}
3008 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
3009 		ssfe->ssfe_error = error;
3010 		/* not exactly what the user sent in, but should be close :) */
3011 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
3012 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
3013 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
3014 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
3015 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3016 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3017 	} else {
3018 		ssf = mtod(m_notify, struct sctp_send_failed *);
3019 		memset(ssf, 0, notifhdr_len);
3020 		ssf->ssf_type = SCTP_SEND_FAILED;
3021 		if (sent) {
3022 			ssf->ssf_flags = SCTP_DATA_SENT;
3023 		} else {
3024 			ssf->ssf_flags = SCTP_DATA_UNSENT;
3025 		}
3026 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
3027 		ssf->ssf_error = error;
3028 		/* not exactly what the user sent in, but should be close :) */
3029 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
3030 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
3031 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3032 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
3033 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
3034 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3035 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3036 	}
3037 	if (chk->data != NULL) {
3038 		/* Trim off the sctp chunk header (it should be there) */
3039 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
3040 			m_adj(chk->data, chkhdr_len);
3041 			m_adj(chk->data, -padding_len);
3042 			sctp_mbuf_crush(chk->data);
3043 			chk->send_size -= (chkhdr_len + padding_len);
3044 		}
3045 	}
3046 	SCTP_BUF_NEXT(m_notify) = chk->data;
3047 	/* Steal off the mbuf */
3048 	chk->data = NULL;
3049 	/*
3050 	 * For this case, we check the actual socket buffer, since the assoc
3051 	 * is going away we don't want to overfill the socket buffer for a
3052 	 * non-reader
3053 	 */
3054 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3055 		sctp_m_freem(m_notify);
3056 		return;
3057 	}
3058 	/* append to socket */
3059 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3060 	    0, 0, stcb->asoc.context, 0, 0, 0,
3061 	    m_notify);
3062 	if (control == NULL) {
3063 		/* no memory */
3064 		sctp_m_freem(m_notify);
3065 		return;
3066 	}
3067 	control->length = SCTP_BUF_LEN(m_notify);
3068 	control->spec_flags = M_NOTIFICATION;
3069 	/* not that we need this */
3070 	control->tail_mbuf = m_notify;
3071 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3072 	    control,
3073 	    &stcb->sctp_socket->so_rcv, 1,
3074 	    SCTP_READ_LOCK_NOT_HELD,
3075 	    so_locked);
3076 }
3077 
3078 
3079 static void
3080 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3081     struct sctp_stream_queue_pending *sp, int so_locked
3082 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3083     SCTP_UNUSED
3084 #endif
3085 )
3086 {
3087 	struct mbuf *m_notify;
3088 	struct sctp_send_failed *ssf;
3089 	struct sctp_send_failed_event *ssfe;
3090 	struct sctp_queued_to_read *control;
3091 	int notifhdr_len;
3092 
3093 	if ((stcb == NULL) ||
3094 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3095 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3096 		/* event not enabled */
3097 		return;
3098 	}
3099 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3100 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3101 	} else {
3102 		notifhdr_len = sizeof(struct sctp_send_failed);
3103 	}
3104 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3105 	if (m_notify == NULL) {
3106 		/* no space left */
3107 		return;
3108 	}
3109 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3110 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3111 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3112 		memset(ssfe, 0, notifhdr_len);
3113 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3114 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3115 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3116 		ssfe->ssfe_error = error;
3117 		/* not exactly what the user sent in, but should be close :) */
3118 		ssfe->ssfe_info.snd_sid = sp->sid;
3119 		if (sp->some_taken) {
3120 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3121 		} else {
3122 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3123 		}
3124 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3125 		ssfe->ssfe_info.snd_context = sp->context;
3126 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3127 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3128 	} else {
3129 		ssf = mtod(m_notify, struct sctp_send_failed *);
3130 		memset(ssf, 0, notifhdr_len);
3131 		ssf->ssf_type = SCTP_SEND_FAILED;
3132 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3133 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3134 		ssf->ssf_error = error;
3135 		/* not exactly what the user sent in, but should be close :) */
3136 		ssf->ssf_info.sinfo_stream = sp->sid;
3137 		ssf->ssf_info.sinfo_ssn = 0;
3138 		if (sp->some_taken) {
3139 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3140 		} else {
3141 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3142 		}
3143 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3144 		ssf->ssf_info.sinfo_context = sp->context;
3145 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3146 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3147 	}
3148 	SCTP_BUF_NEXT(m_notify) = sp->data;
3149 
3150 	/* Steal off the mbuf */
3151 	sp->data = NULL;
3152 	/*
3153 	 * For this case, we check the actual socket buffer, since the assoc
3154 	 * is going away we don't want to overfill the socket buffer for a
3155 	 * non-reader
3156 	 */
3157 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3158 		sctp_m_freem(m_notify);
3159 		return;
3160 	}
3161 	/* append to socket */
3162 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3163 	    0, 0, stcb->asoc.context, 0, 0, 0,
3164 	    m_notify);
3165 	if (control == NULL) {
3166 		/* no memory */
3167 		sctp_m_freem(m_notify);
3168 		return;
3169 	}
3170 	control->length = SCTP_BUF_LEN(m_notify);
3171 	control->spec_flags = M_NOTIFICATION;
3172 	/* not that we need this */
3173 	control->tail_mbuf = m_notify;
3174 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3175 	    control,
3176 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3177 }
3178 
3179 
3180 
3181 static void
3182 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3183 {
3184 	struct mbuf *m_notify;
3185 	struct sctp_adaptation_event *sai;
3186 	struct sctp_queued_to_read *control;
3187 
3188 	if ((stcb == NULL) ||
3189 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3190 		/* event not enabled */
3191 		return;
3192 	}
3193 
3194 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3195 	if (m_notify == NULL)
3196 		/* no space left */
3197 		return;
3198 	SCTP_BUF_LEN(m_notify) = 0;
3199 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3200 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3201 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3202 	sai->sai_flags = 0;
3203 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3204 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3205 	sai->sai_assoc_id = sctp_get_associd(stcb);
3206 
3207 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3208 	SCTP_BUF_NEXT(m_notify) = NULL;
3209 
3210 	/* append to socket */
3211 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3212 	    0, 0, stcb->asoc.context, 0, 0, 0,
3213 	    m_notify);
3214 	if (control == NULL) {
3215 		/* no memory */
3216 		sctp_m_freem(m_notify);
3217 		return;
3218 	}
3219 	control->length = SCTP_BUF_LEN(m_notify);
3220 	control->spec_flags = M_NOTIFICATION;
3221 	/* not that we need this */
3222 	control->tail_mbuf = m_notify;
3223 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3224 	    control,
3225 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3226 }
3227 
3228 /* This always must be called with the read-queue LOCKED in the INP */
3229 static void
3230 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3231     uint32_t val, int so_locked
3232 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3233     SCTP_UNUSED
3234 #endif
3235 )
3236 {
3237 	struct mbuf *m_notify;
3238 	struct sctp_pdapi_event *pdapi;
3239 	struct sctp_queued_to_read *control;
3240 	struct sockbuf *sb;
3241 
3242 	if ((stcb == NULL) ||
3243 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3244 		/* event not enabled */
3245 		return;
3246 	}
3247 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3248 		return;
3249 	}
3250 
3251 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3252 	if (m_notify == NULL)
3253 		/* no space left */
3254 		return;
3255 	SCTP_BUF_LEN(m_notify) = 0;
3256 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3257 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3258 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3259 	pdapi->pdapi_flags = 0;
3260 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3261 	pdapi->pdapi_indication = error;
3262 	pdapi->pdapi_stream = (val >> 16);
3263 	pdapi->pdapi_seq = (val & 0x0000ffff);
3264 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3265 
3266 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3267 	SCTP_BUF_NEXT(m_notify) = NULL;
3268 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3269 	    0, 0, stcb->asoc.context, 0, 0, 0,
3270 	    m_notify);
3271 	if (control == NULL) {
3272 		/* no memory */
3273 		sctp_m_freem(m_notify);
3274 		return;
3275 	}
3276 	control->length = SCTP_BUF_LEN(m_notify);
3277 	control->spec_flags = M_NOTIFICATION;
3278 	/* not that we need this */
3279 	control->tail_mbuf = m_notify;
3280 	sb = &stcb->sctp_socket->so_rcv;
3281 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3282 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3283 	}
3284 	sctp_sballoc(stcb, sb, m_notify);
3285 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3286 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3287 	}
3288 	control->end_added = 1;
3289 	if (stcb->asoc.control_pdapi)
3290 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3291 	else {
3292 		/* we really should not see this case */
3293 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3294 	}
3295 	if (stcb->sctp_ep && stcb->sctp_socket) {
3296 		/* This should always be the case */
3297 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3298 		struct socket *so;
3299 
3300 		so = SCTP_INP_SO(stcb->sctp_ep);
3301 		if (!so_locked) {
3302 			atomic_add_int(&stcb->asoc.refcnt, 1);
3303 			SCTP_TCB_UNLOCK(stcb);
3304 			SCTP_SOCKET_LOCK(so, 1);
3305 			SCTP_TCB_LOCK(stcb);
3306 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3307 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3308 				SCTP_SOCKET_UNLOCK(so, 1);
3309 				return;
3310 			}
3311 		}
3312 #endif
3313 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3314 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3315 		if (!so_locked) {
3316 			SCTP_SOCKET_UNLOCK(so, 1);
3317 		}
3318 #endif
3319 	}
3320 }
3321 
3322 static void
3323 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3324 {
3325 	struct mbuf *m_notify;
3326 	struct sctp_shutdown_event *sse;
3327 	struct sctp_queued_to_read *control;
3328 
3329 	/*
3330 	 * For TCP model AND UDP connected sockets we will send an error up
3331 	 * when an SHUTDOWN completes
3332 	 */
3333 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3334 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3335 		/* mark socket closed for read/write and wakeup! */
3336 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3337 		struct socket *so;
3338 
3339 		so = SCTP_INP_SO(stcb->sctp_ep);
3340 		atomic_add_int(&stcb->asoc.refcnt, 1);
3341 		SCTP_TCB_UNLOCK(stcb);
3342 		SCTP_SOCKET_LOCK(so, 1);
3343 		SCTP_TCB_LOCK(stcb);
3344 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3345 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3346 			SCTP_SOCKET_UNLOCK(so, 1);
3347 			return;
3348 		}
3349 #endif
3350 		socantsendmore(stcb->sctp_socket);
3351 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3352 		SCTP_SOCKET_UNLOCK(so, 1);
3353 #endif
3354 	}
3355 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3356 		/* event not enabled */
3357 		return;
3358 	}
3359 
3360 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3361 	if (m_notify == NULL)
3362 		/* no space left */
3363 		return;
3364 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3365 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3366 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3367 	sse->sse_flags = 0;
3368 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3369 	sse->sse_assoc_id = sctp_get_associd(stcb);
3370 
3371 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3372 	SCTP_BUF_NEXT(m_notify) = NULL;
3373 
3374 	/* append to socket */
3375 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3376 	    0, 0, stcb->asoc.context, 0, 0, 0,
3377 	    m_notify);
3378 	if (control == NULL) {
3379 		/* no memory */
3380 		sctp_m_freem(m_notify);
3381 		return;
3382 	}
3383 	control->length = SCTP_BUF_LEN(m_notify);
3384 	control->spec_flags = M_NOTIFICATION;
3385 	/* not that we need this */
3386 	control->tail_mbuf = m_notify;
3387 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3388 	    control,
3389 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3390 }
3391 
3392 static void
3393 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3394     int so_locked
3395 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3396     SCTP_UNUSED
3397 #endif
3398 )
3399 {
3400 	struct mbuf *m_notify;
3401 	struct sctp_sender_dry_event *event;
3402 	struct sctp_queued_to_read *control;
3403 
3404 	if ((stcb == NULL) ||
3405 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3406 		/* event not enabled */
3407 		return;
3408 	}
3409 
3410 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3411 	if (m_notify == NULL) {
3412 		/* no space left */
3413 		return;
3414 	}
3415 	SCTP_BUF_LEN(m_notify) = 0;
3416 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3417 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3418 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3419 	event->sender_dry_flags = 0;
3420 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3421 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3422 
3423 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3424 	SCTP_BUF_NEXT(m_notify) = NULL;
3425 
3426 	/* append to socket */
3427 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3428 	    0, 0, stcb->asoc.context, 0, 0, 0,
3429 	    m_notify);
3430 	if (control == NULL) {
3431 		/* no memory */
3432 		sctp_m_freem(m_notify);
3433 		return;
3434 	}
3435 	control->length = SCTP_BUF_LEN(m_notify);
3436 	control->spec_flags = M_NOTIFICATION;
3437 	/* not that we need this */
3438 	control->tail_mbuf = m_notify;
3439 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3440 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3441 }
3442 
3443 
3444 void
3445 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3446 {
3447 	struct mbuf *m_notify;
3448 	struct sctp_queued_to_read *control;
3449 	struct sctp_stream_change_event *stradd;
3450 
3451 	if ((stcb == NULL) ||
3452 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3453 		/* event not enabled */
3454 		return;
3455 	}
3456 	if ((stcb->asoc.peer_req_out) && flag) {
3457 		/* Peer made the request, don't tell the local user */
3458 		stcb->asoc.peer_req_out = 0;
3459 		return;
3460 	}
3461 	stcb->asoc.peer_req_out = 0;
3462 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3463 	if (m_notify == NULL)
3464 		/* no space left */
3465 		return;
3466 	SCTP_BUF_LEN(m_notify) = 0;
3467 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3468 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3469 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3470 	stradd->strchange_flags = flag;
3471 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3472 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3473 	stradd->strchange_instrms = numberin;
3474 	stradd->strchange_outstrms = numberout;
3475 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3476 	SCTP_BUF_NEXT(m_notify) = NULL;
3477 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3478 		/* no space */
3479 		sctp_m_freem(m_notify);
3480 		return;
3481 	}
3482 	/* append to socket */
3483 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3484 	    0, 0, stcb->asoc.context, 0, 0, 0,
3485 	    m_notify);
3486 	if (control == NULL) {
3487 		/* no memory */
3488 		sctp_m_freem(m_notify);
3489 		return;
3490 	}
3491 	control->length = SCTP_BUF_LEN(m_notify);
3492 	control->spec_flags = M_NOTIFICATION;
3493 	/* not that we need this */
3494 	control->tail_mbuf = m_notify;
3495 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3496 	    control,
3497 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3498 }
3499 
3500 void
3501 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3502 {
3503 	struct mbuf *m_notify;
3504 	struct sctp_queued_to_read *control;
3505 	struct sctp_assoc_reset_event *strasoc;
3506 
3507 	if ((stcb == NULL) ||
3508 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3509 		/* event not enabled */
3510 		return;
3511 	}
3512 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3513 	if (m_notify == NULL)
3514 		/* no space left */
3515 		return;
3516 	SCTP_BUF_LEN(m_notify) = 0;
3517 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3518 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3519 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3520 	strasoc->assocreset_flags = flag;
3521 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3522 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3523 	strasoc->assocreset_local_tsn = sending_tsn;
3524 	strasoc->assocreset_remote_tsn = recv_tsn;
3525 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3526 	SCTP_BUF_NEXT(m_notify) = NULL;
3527 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3528 		/* no space */
3529 		sctp_m_freem(m_notify);
3530 		return;
3531 	}
3532 	/* append to socket */
3533 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3534 	    0, 0, stcb->asoc.context, 0, 0, 0,
3535 	    m_notify);
3536 	if (control == NULL) {
3537 		/* no memory */
3538 		sctp_m_freem(m_notify);
3539 		return;
3540 	}
3541 	control->length = SCTP_BUF_LEN(m_notify);
3542 	control->spec_flags = M_NOTIFICATION;
3543 	/* not that we need this */
3544 	control->tail_mbuf = m_notify;
3545 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3546 	    control,
3547 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3548 }
3549 
3550 
3551 
3552 static void
3553 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3554     int number_entries, uint16_t *list, int flag)
3555 {
3556 	struct mbuf *m_notify;
3557 	struct sctp_queued_to_read *control;
3558 	struct sctp_stream_reset_event *strreset;
3559 	int len;
3560 
3561 	if ((stcb == NULL) ||
3562 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3563 		/* event not enabled */
3564 		return;
3565 	}
3566 
3567 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3568 	if (m_notify == NULL)
3569 		/* no space left */
3570 		return;
3571 	SCTP_BUF_LEN(m_notify) = 0;
3572 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3573 	if (len > M_TRAILINGSPACE(m_notify)) {
3574 		/* never enough room */
3575 		sctp_m_freem(m_notify);
3576 		return;
3577 	}
3578 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3579 	memset(strreset, 0, len);
3580 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3581 	strreset->strreset_flags = flag;
3582 	strreset->strreset_length = len;
3583 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3584 	if (number_entries) {
3585 		int i;
3586 
3587 		for (i = 0; i < number_entries; i++) {
3588 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3589 		}
3590 	}
3591 	SCTP_BUF_LEN(m_notify) = len;
3592 	SCTP_BUF_NEXT(m_notify) = NULL;
3593 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3594 		/* no space */
3595 		sctp_m_freem(m_notify);
3596 		return;
3597 	}
3598 	/* append to socket */
3599 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3600 	    0, 0, stcb->asoc.context, 0, 0, 0,
3601 	    m_notify);
3602 	if (control == NULL) {
3603 		/* no memory */
3604 		sctp_m_freem(m_notify);
3605 		return;
3606 	}
3607 	control->length = SCTP_BUF_LEN(m_notify);
3608 	control->spec_flags = M_NOTIFICATION;
3609 	/* not that we need this */
3610 	control->tail_mbuf = m_notify;
3611 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3612 	    control,
3613 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3614 }
3615 
3616 
3617 static void
3618 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3619 {
3620 	struct mbuf *m_notify;
3621 	struct sctp_remote_error *sre;
3622 	struct sctp_queued_to_read *control;
3623 	unsigned int notif_len;
3624 	uint16_t chunk_len;
3625 
3626 	if ((stcb == NULL) ||
3627 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3628 		return;
3629 	}
3630 	if (chunk != NULL) {
3631 		chunk_len = ntohs(chunk->ch.chunk_length);
3632 		/*
3633 		 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
3634 		 * contiguous.
3635 		 */
3636 		if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) {
3637 			chunk_len = SCTP_CHUNK_BUFFER_SIZE;
3638 		}
3639 	} else {
3640 		chunk_len = 0;
3641 	}
3642 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3643 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3644 	if (m_notify == NULL) {
3645 		/* Retry with smaller value. */
3646 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3647 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3648 		if (m_notify == NULL) {
3649 			return;
3650 		}
3651 	}
3652 	SCTP_BUF_NEXT(m_notify) = NULL;
3653 	sre = mtod(m_notify, struct sctp_remote_error *);
3654 	memset(sre, 0, notif_len);
3655 	sre->sre_type = SCTP_REMOTE_ERROR;
3656 	sre->sre_flags = 0;
3657 	sre->sre_length = sizeof(struct sctp_remote_error);
3658 	sre->sre_error = error;
3659 	sre->sre_assoc_id = sctp_get_associd(stcb);
3660 	if (notif_len > sizeof(struct sctp_remote_error)) {
3661 		memcpy(sre->sre_data, chunk, chunk_len);
3662 		sre->sre_length += chunk_len;
3663 	}
3664 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3665 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3666 	    0, 0, stcb->asoc.context, 0, 0, 0,
3667 	    m_notify);
3668 	if (control != NULL) {
3669 		control->length = SCTP_BUF_LEN(m_notify);
3670 		control->spec_flags = M_NOTIFICATION;
3671 		/* not that we need this */
3672 		control->tail_mbuf = m_notify;
3673 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3674 		    control,
3675 		    &stcb->sctp_socket->so_rcv, 1,
3676 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3677 	} else {
3678 		sctp_m_freem(m_notify);
3679 	}
3680 }
3681 
3682 
3683 void
3684 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3685     uint32_t error, void *data, int so_locked
3686 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3687     SCTP_UNUSED
3688 #endif
3689 )
3690 {
3691 	if ((stcb == NULL) ||
3692 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3693 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3694 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3695 		/* If the socket is gone we are out of here */
3696 		return;
3697 	}
3698 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3699 		return;
3700 	}
3701 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3702 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3703 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3704 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3705 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3706 			/* Don't report these in front states */
3707 			return;
3708 		}
3709 	}
3710 	switch (notification) {
3711 	case SCTP_NOTIFY_ASSOC_UP:
3712 		if (stcb->asoc.assoc_up_sent == 0) {
3713 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3714 			stcb->asoc.assoc_up_sent = 1;
3715 		}
3716 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3717 			sctp_notify_adaptation_layer(stcb);
3718 		}
3719 		if (stcb->asoc.auth_supported == 0) {
3720 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3721 			    NULL, so_locked);
3722 		}
3723 		break;
3724 	case SCTP_NOTIFY_ASSOC_DOWN:
3725 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3726 		break;
3727 	case SCTP_NOTIFY_INTERFACE_DOWN:
3728 		{
3729 			struct sctp_nets *net;
3730 
3731 			net = (struct sctp_nets *)data;
3732 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3733 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3734 			break;
3735 		}
3736 	case SCTP_NOTIFY_INTERFACE_UP:
3737 		{
3738 			struct sctp_nets *net;
3739 
3740 			net = (struct sctp_nets *)data;
3741 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3742 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3743 			break;
3744 		}
3745 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3746 		{
3747 			struct sctp_nets *net;
3748 
3749 			net = (struct sctp_nets *)data;
3750 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3751 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3752 			break;
3753 		}
3754 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3755 		sctp_notify_send_failed2(stcb, error,
3756 		    (struct sctp_stream_queue_pending *)data, so_locked);
3757 		break;
3758 	case SCTP_NOTIFY_SENT_DG_FAIL:
3759 		sctp_notify_send_failed(stcb, 1, error,
3760 		    (struct sctp_tmit_chunk *)data, so_locked);
3761 		break;
3762 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3763 		sctp_notify_send_failed(stcb, 0, error,
3764 		    (struct sctp_tmit_chunk *)data, so_locked);
3765 		break;
3766 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3767 		{
3768 			uint32_t val;
3769 
3770 			val = *((uint32_t *)data);
3771 
3772 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3773 			break;
3774 		}
3775 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3776 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3777 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3778 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3779 		} else {
3780 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3781 		}
3782 		break;
3783 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3784 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3785 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3786 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3787 		} else {
3788 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3789 		}
3790 		break;
3791 	case SCTP_NOTIFY_ASSOC_RESTART:
3792 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3793 		if (stcb->asoc.auth_supported == 0) {
3794 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3795 			    NULL, so_locked);
3796 		}
3797 		break;
3798 	case SCTP_NOTIFY_STR_RESET_SEND:
3799 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3800 		break;
3801 	case SCTP_NOTIFY_STR_RESET_RECV:
3802 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3803 		break;
3804 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3805 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3806 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3807 		break;
3808 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3809 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3810 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3811 		break;
3812 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3813 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3814 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3815 		break;
3816 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3817 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3818 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3819 		break;
3820 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3821 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3822 		    error, so_locked);
3823 		break;
3824 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3825 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3826 		    error, so_locked);
3827 		break;
3828 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3829 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3830 		    error, so_locked);
3831 		break;
3832 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3833 		sctp_notify_shutdown_event(stcb);
3834 		break;
3835 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3836 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3837 		    (uint16_t)(uintptr_t)data,
3838 		    so_locked);
3839 		break;
3840 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3841 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3842 		    (uint16_t)(uintptr_t)data,
3843 		    so_locked);
3844 		break;
3845 	case SCTP_NOTIFY_NO_PEER_AUTH:
3846 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3847 		    (uint16_t)(uintptr_t)data,
3848 		    so_locked);
3849 		break;
3850 	case SCTP_NOTIFY_SENDER_DRY:
3851 		sctp_notify_sender_dry_event(stcb, so_locked);
3852 		break;
3853 	case SCTP_NOTIFY_REMOTE_ERROR:
3854 		sctp_notify_remote_error(stcb, error, data);
3855 		break;
3856 	default:
3857 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3858 		    __func__, notification, notification);
3859 		break;
3860 	}			/* end switch */
3861 }
3862 
3863 void
3864 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3865 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3866     SCTP_UNUSED
3867 #endif
3868 )
3869 {
3870 	struct sctp_association *asoc;
3871 	struct sctp_stream_out *outs;
3872 	struct sctp_tmit_chunk *chk, *nchk;
3873 	struct sctp_stream_queue_pending *sp, *nsp;
3874 	int i;
3875 
3876 	if (stcb == NULL) {
3877 		return;
3878 	}
3879 	asoc = &stcb->asoc;
3880 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3881 		/* already being freed */
3882 		return;
3883 	}
3884 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3885 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3886 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3887 		return;
3888 	}
3889 	/* now through all the gunk freeing chunks */
3890 	if (holds_lock == 0) {
3891 		SCTP_TCB_SEND_LOCK(stcb);
3892 	}
3893 	/* sent queue SHOULD be empty */
3894 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3895 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3896 		asoc->sent_queue_cnt--;
3897 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3898 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3899 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3900 #ifdef INVARIANTS
3901 			} else {
3902 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3903 #endif
3904 			}
3905 		}
3906 		if (chk->data != NULL) {
3907 			sctp_free_bufspace(stcb, asoc, chk, 1);
3908 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3909 			    error, chk, so_locked);
3910 			if (chk->data) {
3911 				sctp_m_freem(chk->data);
3912 				chk->data = NULL;
3913 			}
3914 		}
3915 		sctp_free_a_chunk(stcb, chk, so_locked);
3916 		/* sa_ignore FREED_MEMORY */
3917 	}
3918 	/* pending send queue SHOULD be empty */
3919 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3920 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3921 		asoc->send_queue_cnt--;
3922 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3923 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3924 #ifdef INVARIANTS
3925 		} else {
3926 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3927 #endif
3928 		}
3929 		if (chk->data != NULL) {
3930 			sctp_free_bufspace(stcb, asoc, chk, 1);
3931 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3932 			    error, chk, so_locked);
3933 			if (chk->data) {
3934 				sctp_m_freem(chk->data);
3935 				chk->data = NULL;
3936 			}
3937 		}
3938 		sctp_free_a_chunk(stcb, chk, so_locked);
3939 		/* sa_ignore FREED_MEMORY */
3940 	}
3941 	for (i = 0; i < asoc->streamoutcnt; i++) {
3942 		/* For each stream */
3943 		outs = &asoc->strmout[i];
3944 		/* clean up any sends there */
3945 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3946 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
3947 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3948 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock);
3949 			sctp_free_spbufspace(stcb, asoc, sp);
3950 			if (sp->data) {
3951 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3952 				    error, (void *)sp, so_locked);
3953 				if (sp->data) {
3954 					sctp_m_freem(sp->data);
3955 					sp->data = NULL;
3956 					sp->tail_mbuf = NULL;
3957 					sp->length = 0;
3958 				}
3959 			}
3960 			if (sp->net) {
3961 				sctp_free_remote_addr(sp->net);
3962 				sp->net = NULL;
3963 			}
3964 			/* Free the chunk */
3965 			sctp_free_a_strmoq(stcb, sp, so_locked);
3966 			/* sa_ignore FREED_MEMORY */
3967 		}
3968 	}
3969 
3970 	if (holds_lock == 0) {
3971 		SCTP_TCB_SEND_UNLOCK(stcb);
3972 	}
3973 }
3974 
3975 void
3976 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3977     struct sctp_abort_chunk *abort, int so_locked
3978 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3979     SCTP_UNUSED
3980 #endif
3981 )
3982 {
3983 	if (stcb == NULL) {
3984 		return;
3985 	}
3986 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3987 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3988 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3989 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3990 	}
3991 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3992 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3993 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3994 		return;
3995 	}
3996 	/* Tell them we lost the asoc */
3997 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3998 	if (from_peer) {
3999 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
4000 	} else {
4001 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
4002 	}
4003 }
4004 
4005 void
4006 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4007     struct mbuf *m, int iphlen,
4008     struct sockaddr *src, struct sockaddr *dst,
4009     struct sctphdr *sh, struct mbuf *op_err,
4010     uint8_t mflowtype, uint32_t mflowid,
4011     uint32_t vrf_id, uint16_t port)
4012 {
4013 	uint32_t vtag;
4014 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4015 	struct socket *so;
4016 #endif
4017 
4018 	vtag = 0;
4019 	if (stcb != NULL) {
4020 		vtag = stcb->asoc.peer_vtag;
4021 		vrf_id = stcb->asoc.vrf_id;
4022 	}
4023 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
4024 	    mflowtype, mflowid, inp->fibnum,
4025 	    vrf_id, port);
4026 	if (stcb != NULL) {
4027 		/* We have a TCB to abort, send notification too */
4028 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
4029 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4030 		/* Ok, now lets free it */
4031 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4032 		so = SCTP_INP_SO(inp);
4033 		atomic_add_int(&stcb->asoc.refcnt, 1);
4034 		SCTP_TCB_UNLOCK(stcb);
4035 		SCTP_SOCKET_LOCK(so, 1);
4036 		SCTP_TCB_LOCK(stcb);
4037 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4038 #endif
4039 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4040 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4041 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4042 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4043 		}
4044 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4045 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
4046 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4047 		SCTP_SOCKET_UNLOCK(so, 1);
4048 #endif
4049 	}
4050 }
4051 #ifdef SCTP_ASOCLOG_OF_TSNS
4052 void
4053 sctp_print_out_track_log(struct sctp_tcb *stcb)
4054 {
4055 #ifdef NOSIY_PRINTS
4056 	int i;
4057 
4058 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4059 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4060 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4061 		SCTP_PRINTF("None rcvd\n");
4062 		goto none_in;
4063 	}
4064 	if (stcb->asoc.tsn_in_wrapped) {
4065 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4066 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4067 			    stcb->asoc.in_tsnlog[i].tsn,
4068 			    stcb->asoc.in_tsnlog[i].strm,
4069 			    stcb->asoc.in_tsnlog[i].seq,
4070 			    stcb->asoc.in_tsnlog[i].flgs,
4071 			    stcb->asoc.in_tsnlog[i].sz);
4072 		}
4073 	}
4074 	if (stcb->asoc.tsn_in_at) {
4075 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4076 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4077 			    stcb->asoc.in_tsnlog[i].tsn,
4078 			    stcb->asoc.in_tsnlog[i].strm,
4079 			    stcb->asoc.in_tsnlog[i].seq,
4080 			    stcb->asoc.in_tsnlog[i].flgs,
4081 			    stcb->asoc.in_tsnlog[i].sz);
4082 		}
4083 	}
4084 none_in:
4085 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4086 	if ((stcb->asoc.tsn_out_at == 0) &&
4087 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4088 		SCTP_PRINTF("None sent\n");
4089 	}
4090 	if (stcb->asoc.tsn_out_wrapped) {
4091 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4092 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4093 			    stcb->asoc.out_tsnlog[i].tsn,
4094 			    stcb->asoc.out_tsnlog[i].strm,
4095 			    stcb->asoc.out_tsnlog[i].seq,
4096 			    stcb->asoc.out_tsnlog[i].flgs,
4097 			    stcb->asoc.out_tsnlog[i].sz);
4098 		}
4099 	}
4100 	if (stcb->asoc.tsn_out_at) {
4101 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4102 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4103 			    stcb->asoc.out_tsnlog[i].tsn,
4104 			    stcb->asoc.out_tsnlog[i].strm,
4105 			    stcb->asoc.out_tsnlog[i].seq,
4106 			    stcb->asoc.out_tsnlog[i].flgs,
4107 			    stcb->asoc.out_tsnlog[i].sz);
4108 		}
4109 	}
4110 #endif
4111 }
4112 #endif
4113 
4114 void
4115 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4116     struct mbuf *op_err,
4117     int so_locked
4118 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4119     SCTP_UNUSED
4120 #endif
4121 )
4122 {
4123 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4124 	struct socket *so;
4125 #endif
4126 
4127 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4128 	so = SCTP_INP_SO(inp);
4129 #endif
4130 	if (stcb == NULL) {
4131 		/* Got to have a TCB */
4132 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4133 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4134 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4135 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4136 			}
4137 		}
4138 		return;
4139 	} else {
4140 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4141 	}
4142 	/* notify the peer */
4143 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4144 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4145 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4146 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4147 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4148 	}
4149 	/* notify the ulp */
4150 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4151 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4152 	}
4153 	/* now free the asoc */
4154 #ifdef SCTP_ASOCLOG_OF_TSNS
4155 	sctp_print_out_track_log(stcb);
4156 #endif
4157 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4158 	if (!so_locked) {
4159 		atomic_add_int(&stcb->asoc.refcnt, 1);
4160 		SCTP_TCB_UNLOCK(stcb);
4161 		SCTP_SOCKET_LOCK(so, 1);
4162 		SCTP_TCB_LOCK(stcb);
4163 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4164 	}
4165 #endif
4166 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4167 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4168 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4169 	if (!so_locked) {
4170 		SCTP_SOCKET_UNLOCK(so, 1);
4171 	}
4172 #endif
4173 }
4174 
4175 void
4176 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4177     struct sockaddr *src, struct sockaddr *dst,
4178     struct sctphdr *sh, struct sctp_inpcb *inp,
4179     struct mbuf *cause,
4180     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4181     uint32_t vrf_id, uint16_t port)
4182 {
4183 	struct sctp_chunkhdr *ch, chunk_buf;
4184 	unsigned int chk_length;
4185 	int contains_init_chunk;
4186 
4187 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4188 	/* Generate a TO address for future reference */
4189 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4190 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4191 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4192 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4193 		}
4194 	}
4195 	contains_init_chunk = 0;
4196 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4197 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4198 	while (ch != NULL) {
4199 		chk_length = ntohs(ch->chunk_length);
4200 		if (chk_length < sizeof(*ch)) {
4201 			/* break to abort land */
4202 			break;
4203 		}
4204 		switch (ch->chunk_type) {
4205 		case SCTP_INIT:
4206 			contains_init_chunk = 1;
4207 			break;
4208 		case SCTP_PACKET_DROPPED:
4209 			/* we don't respond to pkt-dropped */
4210 			return;
4211 		case SCTP_ABORT_ASSOCIATION:
4212 			/* we don't respond with an ABORT to an ABORT */
4213 			return;
4214 		case SCTP_SHUTDOWN_COMPLETE:
4215 			/*
4216 			 * we ignore it since we are not waiting for it and
4217 			 * peer is gone
4218 			 */
4219 			return;
4220 		case SCTP_SHUTDOWN_ACK:
4221 			sctp_send_shutdown_complete2(src, dst, sh,
4222 			    mflowtype, mflowid, fibnum,
4223 			    vrf_id, port);
4224 			return;
4225 		default:
4226 			break;
4227 		}
4228 		offset += SCTP_SIZE32(chk_length);
4229 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4230 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4231 	}
4232 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4233 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4234 	    (contains_init_chunk == 0))) {
4235 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4236 		    mflowtype, mflowid, fibnum,
4237 		    vrf_id, port);
4238 	}
4239 }
4240 
4241 /*
4242  * check the inbound datagram to make sure there is not an abort inside it,
4243  * if there is return 1, else return 0.
4244  */
4245 int
4246 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4247 {
4248 	struct sctp_chunkhdr *ch;
4249 	struct sctp_init_chunk *init_chk, chunk_buf;
4250 	int offset;
4251 	unsigned int chk_length;
4252 
4253 	offset = iphlen + sizeof(struct sctphdr);
4254 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4255 	    (uint8_t *)&chunk_buf);
4256 	while (ch != NULL) {
4257 		chk_length = ntohs(ch->chunk_length);
4258 		if (chk_length < sizeof(*ch)) {
4259 			/* packet is probably corrupt */
4260 			break;
4261 		}
4262 		/* we seem to be ok, is it an abort? */
4263 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4264 			/* yep, tell them */
4265 			return (1);
4266 		}
4267 		if (ch->chunk_type == SCTP_INITIATION) {
4268 			/* need to update the Vtag */
4269 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4270 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4271 			if (init_chk != NULL) {
4272 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4273 			}
4274 		}
4275 		/* Nope, move to the next chunk */
4276 		offset += SCTP_SIZE32(chk_length);
4277 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4278 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4279 	}
4280 	return (0);
4281 }
4282 
4283 /*
4284  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4285  * set (i.e. it's 0) so, create this function to compare link local scopes
4286  */
4287 #ifdef INET6
4288 uint32_t
4289 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4290 {
4291 	struct sockaddr_in6 a, b;
4292 
4293 	/* save copies */
4294 	a = *addr1;
4295 	b = *addr2;
4296 
4297 	if (a.sin6_scope_id == 0)
4298 		if (sa6_recoverscope(&a)) {
4299 			/* can't get scope, so can't match */
4300 			return (0);
4301 		}
4302 	if (b.sin6_scope_id == 0)
4303 		if (sa6_recoverscope(&b)) {
4304 			/* can't get scope, so can't match */
4305 			return (0);
4306 		}
4307 	if (a.sin6_scope_id != b.sin6_scope_id)
4308 		return (0);
4309 
4310 	return (1);
4311 }
4312 
4313 /*
4314  * returns a sockaddr_in6 with embedded scope recovered and removed
4315  */
4316 struct sockaddr_in6 *
4317 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4318 {
4319 	/* check and strip embedded scope junk */
4320 	if (addr->sin6_family == AF_INET6) {
4321 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4322 			if (addr->sin6_scope_id == 0) {
4323 				*store = *addr;
4324 				if (!sa6_recoverscope(store)) {
4325 					/* use the recovered scope */
4326 					addr = store;
4327 				}
4328 			} else {
4329 				/* else, return the original "to" addr */
4330 				in6_clearscope(&addr->sin6_addr);
4331 			}
4332 		}
4333 	}
4334 	return (addr);
4335 }
4336 #endif
4337 
4338 /*
4339  * are the two addresses the same?  currently a "scopeless" check returns: 1
4340  * if same, 0 if not
4341  */
4342 int
4343 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4344 {
4345 
4346 	/* must be valid */
4347 	if (sa1 == NULL || sa2 == NULL)
4348 		return (0);
4349 
4350 	/* must be the same family */
4351 	if (sa1->sa_family != sa2->sa_family)
4352 		return (0);
4353 
4354 	switch (sa1->sa_family) {
4355 #ifdef INET6
4356 	case AF_INET6:
4357 		{
4358 			/* IPv6 addresses */
4359 			struct sockaddr_in6 *sin6_1, *sin6_2;
4360 
4361 			sin6_1 = (struct sockaddr_in6 *)sa1;
4362 			sin6_2 = (struct sockaddr_in6 *)sa2;
4363 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4364 			    sin6_2));
4365 		}
4366 #endif
4367 #ifdef INET
4368 	case AF_INET:
4369 		{
4370 			/* IPv4 addresses */
4371 			struct sockaddr_in *sin_1, *sin_2;
4372 
4373 			sin_1 = (struct sockaddr_in *)sa1;
4374 			sin_2 = (struct sockaddr_in *)sa2;
4375 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4376 		}
4377 #endif
4378 	default:
4379 		/* we don't do these... */
4380 		return (0);
4381 	}
4382 }
4383 
4384 void
4385 sctp_print_address(struct sockaddr *sa)
4386 {
4387 #ifdef INET6
4388 	char ip6buf[INET6_ADDRSTRLEN];
4389 #endif
4390 
4391 	switch (sa->sa_family) {
4392 #ifdef INET6
4393 	case AF_INET6:
4394 		{
4395 			struct sockaddr_in6 *sin6;
4396 
4397 			sin6 = (struct sockaddr_in6 *)sa;
4398 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4399 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4400 			    ntohs(sin6->sin6_port),
4401 			    sin6->sin6_scope_id);
4402 			break;
4403 		}
4404 #endif
4405 #ifdef INET
4406 	case AF_INET:
4407 		{
4408 			struct sockaddr_in *sin;
4409 			unsigned char *p;
4410 
4411 			sin = (struct sockaddr_in *)sa;
4412 			p = (unsigned char *)&sin->sin_addr;
4413 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4414 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4415 			break;
4416 		}
4417 #endif
4418 	default:
4419 		SCTP_PRINTF("?\n");
4420 		break;
4421 	}
4422 }
4423 
4424 void
4425 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4426     struct sctp_inpcb *new_inp,
4427     struct sctp_tcb *stcb,
4428     int waitflags)
4429 {
4430 	/*
4431 	 * go through our old INP and pull off any control structures that
4432 	 * belong to stcb and move then to the new inp.
4433 	 */
4434 	struct socket *old_so, *new_so;
4435 	struct sctp_queued_to_read *control, *nctl;
4436 	struct sctp_readhead tmp_queue;
4437 	struct mbuf *m;
4438 	int error = 0;
4439 
4440 	old_so = old_inp->sctp_socket;
4441 	new_so = new_inp->sctp_socket;
4442 	TAILQ_INIT(&tmp_queue);
4443 	error = sblock(&old_so->so_rcv, waitflags);
4444 	if (error) {
4445 		/*
4446 		 * Gak, can't get sblock, we have a problem. data will be
4447 		 * left stranded.. and we don't dare look at it since the
4448 		 * other thread may be reading something. Oh well, its a
4449 		 * screwed up app that does a peeloff OR a accept while
4450 		 * reading from the main socket... actually its only the
4451 		 * peeloff() case, since I think read will fail on a
4452 		 * listening socket..
4453 		 */
4454 		return;
4455 	}
4456 	/* lock the socket buffers */
4457 	SCTP_INP_READ_LOCK(old_inp);
4458 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4459 		/* Pull off all for out target stcb */
4460 		if (control->stcb == stcb) {
4461 			/* remove it we want it */
4462 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4463 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4464 			m = control->data;
4465 			while (m) {
4466 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4467 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4468 				}
4469 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4470 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4471 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4472 				}
4473 				m = SCTP_BUF_NEXT(m);
4474 			}
4475 		}
4476 	}
4477 	SCTP_INP_READ_UNLOCK(old_inp);
4478 	/* Remove the sb-lock on the old socket */
4479 
4480 	sbunlock(&old_so->so_rcv);
4481 	/* Now we move them over to the new socket buffer */
4482 	SCTP_INP_READ_LOCK(new_inp);
4483 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4484 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4485 		m = control->data;
4486 		while (m) {
4487 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4488 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4489 			}
4490 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4491 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4492 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4493 			}
4494 			m = SCTP_BUF_NEXT(m);
4495 		}
4496 	}
4497 	SCTP_INP_READ_UNLOCK(new_inp);
4498 }
4499 
4500 void
4501 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4502     struct sctp_tcb *stcb,
4503     int so_locked
4504 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4505     SCTP_UNUSED
4506 #endif
4507 )
4508 {
4509 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4510 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4511 		struct socket *so;
4512 
4513 		so = SCTP_INP_SO(inp);
4514 		if (!so_locked) {
4515 			if (stcb) {
4516 				atomic_add_int(&stcb->asoc.refcnt, 1);
4517 				SCTP_TCB_UNLOCK(stcb);
4518 			}
4519 			SCTP_SOCKET_LOCK(so, 1);
4520 			if (stcb) {
4521 				SCTP_TCB_LOCK(stcb);
4522 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4523 			}
4524 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4525 				SCTP_SOCKET_UNLOCK(so, 1);
4526 				return;
4527 			}
4528 		}
4529 #endif
4530 		sctp_sorwakeup(inp, inp->sctp_socket);
4531 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4532 		if (!so_locked) {
4533 			SCTP_SOCKET_UNLOCK(so, 1);
4534 		}
4535 #endif
4536 	}
4537 }
4538 
4539 void
4540 sctp_add_to_readq(struct sctp_inpcb *inp,
4541     struct sctp_tcb *stcb,
4542     struct sctp_queued_to_read *control,
4543     struct sockbuf *sb,
4544     int end,
4545     int inp_read_lock_held,
4546     int so_locked
4547 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4548     SCTP_UNUSED
4549 #endif
4550 )
4551 {
4552 	/*
4553 	 * Here we must place the control on the end of the socket read
4554 	 * queue AND increment sb_cc so that select will work properly on
4555 	 * read.
4556 	 */
4557 	struct mbuf *m, *prev = NULL;
4558 
4559 	if (inp == NULL) {
4560 		/* Gak, TSNH!! */
4561 #ifdef INVARIANTS
4562 		panic("Gak, inp NULL on add_to_readq");
4563 #endif
4564 		return;
4565 	}
4566 	if (inp_read_lock_held == 0)
4567 		SCTP_INP_READ_LOCK(inp);
4568 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4569 		sctp_free_remote_addr(control->whoFrom);
4570 		if (control->data) {
4571 			sctp_m_freem(control->data);
4572 			control->data = NULL;
4573 		}
4574 		sctp_free_a_readq(stcb, control);
4575 		if (inp_read_lock_held == 0)
4576 			SCTP_INP_READ_UNLOCK(inp);
4577 		return;
4578 	}
4579 	if (!(control->spec_flags & M_NOTIFICATION)) {
4580 		atomic_add_int(&inp->total_recvs, 1);
4581 		if (!control->do_not_ref_stcb) {
4582 			atomic_add_int(&stcb->total_recvs, 1);
4583 		}
4584 	}
4585 	m = control->data;
4586 	control->held_length = 0;
4587 	control->length = 0;
4588 	while (m) {
4589 		if (SCTP_BUF_LEN(m) == 0) {
4590 			/* Skip mbufs with NO length */
4591 			if (prev == NULL) {
4592 				/* First one */
4593 				control->data = sctp_m_free(m);
4594 				m = control->data;
4595 			} else {
4596 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4597 				m = SCTP_BUF_NEXT(prev);
4598 			}
4599 			if (m == NULL) {
4600 				control->tail_mbuf = prev;
4601 			}
4602 			continue;
4603 		}
4604 		prev = m;
4605 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4606 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4607 		}
4608 		sctp_sballoc(stcb, sb, m);
4609 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4610 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4611 		}
4612 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4613 		m = SCTP_BUF_NEXT(m);
4614 	}
4615 	if (prev != NULL) {
4616 		control->tail_mbuf = prev;
4617 	} else {
4618 		/* Everything got collapsed out?? */
4619 		sctp_free_remote_addr(control->whoFrom);
4620 		sctp_free_a_readq(stcb, control);
4621 		if (inp_read_lock_held == 0)
4622 			SCTP_INP_READ_UNLOCK(inp);
4623 		return;
4624 	}
4625 	if (end) {
4626 		control->end_added = 1;
4627 	}
4628 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4629 	control->on_read_q = 1;
4630 	if (inp_read_lock_held == 0)
4631 		SCTP_INP_READ_UNLOCK(inp);
4632 	if (inp && inp->sctp_socket) {
4633 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4634 	}
4635 }
4636 
4637 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4638  *************ALTERNATE ROUTING CODE
4639  */
4640 
4641 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4642  *************ALTERNATE ROUTING CODE
4643  */
4644 
4645 struct mbuf *
4646 sctp_generate_cause(uint16_t code, char *info)
4647 {
4648 	struct mbuf *m;
4649 	struct sctp_gen_error_cause *cause;
4650 	size_t info_len;
4651 	uint16_t len;
4652 
4653 	if ((code == 0) || (info == NULL)) {
4654 		return (NULL);
4655 	}
4656 	info_len = strlen(info);
4657 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4658 		return (NULL);
4659 	}
4660 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4661 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4662 	if (m != NULL) {
4663 		SCTP_BUF_LEN(m) = len;
4664 		cause = mtod(m, struct sctp_gen_error_cause *);
4665 		cause->code = htons(code);
4666 		cause->length = htons(len);
4667 		memcpy(cause->info, info, info_len);
4668 	}
4669 	return (m);
4670 }
4671 
4672 struct mbuf *
4673 sctp_generate_no_user_data_cause(uint32_t tsn)
4674 {
4675 	struct mbuf *m;
4676 	struct sctp_error_no_user_data *no_user_data_cause;
4677 	uint16_t len;
4678 
4679 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4680 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4681 	if (m != NULL) {
4682 		SCTP_BUF_LEN(m) = len;
4683 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4684 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4685 		no_user_data_cause->cause.length = htons(len);
4686 		no_user_data_cause->tsn = htonl(tsn);
4687 	}
4688 	return (m);
4689 }
4690 
4691 #ifdef SCTP_MBCNT_LOGGING
4692 void
4693 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4694     struct sctp_tmit_chunk *tp1, int chk_cnt)
4695 {
4696 	if (tp1->data == NULL) {
4697 		return;
4698 	}
4699 	asoc->chunks_on_out_queue -= chk_cnt;
4700 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4701 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4702 		    asoc->total_output_queue_size,
4703 		    tp1->book_size,
4704 		    0,
4705 		    tp1->mbcnt);
4706 	}
4707 	if (asoc->total_output_queue_size >= tp1->book_size) {
4708 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4709 	} else {
4710 		asoc->total_output_queue_size = 0;
4711 	}
4712 
4713 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4714 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4715 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4716 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4717 		} else {
4718 			stcb->sctp_socket->so_snd.sb_cc = 0;
4719 
4720 		}
4721 	}
4722 }
4723 
4724 #endif
4725 
4726 int
4727 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4728     uint8_t sent, int so_locked
4729 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4730     SCTP_UNUSED
4731 #endif
4732 )
4733 {
4734 	struct sctp_stream_out *strq;
4735 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4736 	struct sctp_stream_queue_pending *sp;
4737 	uint32_t mid;
4738 	uint16_t sid;
4739 	uint8_t foundeom = 0;
4740 	int ret_sz = 0;
4741 	int notdone;
4742 	int do_wakeup_routine = 0;
4743 
4744 	sid = tp1->rec.data.sid;
4745 	mid = tp1->rec.data.mid;
4746 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4747 		stcb->asoc.abandoned_sent[0]++;
4748 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4749 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
4750 #if defined(SCTP_DETAILED_STR_STATS)
4751 		stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4752 #endif
4753 	} else {
4754 		stcb->asoc.abandoned_unsent[0]++;
4755 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4756 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4757 #if defined(SCTP_DETAILED_STR_STATS)
4758 		stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4759 #endif
4760 	}
4761 	do {
4762 		ret_sz += tp1->book_size;
4763 		if (tp1->data != NULL) {
4764 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4765 				sctp_flight_size_decrease(tp1);
4766 				sctp_total_flight_decrease(stcb, tp1);
4767 			}
4768 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4769 			stcb->asoc.peers_rwnd += tp1->send_size;
4770 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4771 			if (sent) {
4772 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4773 			} else {
4774 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4775 			}
4776 			if (tp1->data) {
4777 				sctp_m_freem(tp1->data);
4778 				tp1->data = NULL;
4779 			}
4780 			do_wakeup_routine = 1;
4781 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4782 				stcb->asoc.sent_queue_cnt_removeable--;
4783 			}
4784 		}
4785 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4786 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4787 		    SCTP_DATA_NOT_FRAG) {
4788 			/* not frag'ed we ae done   */
4789 			notdone = 0;
4790 			foundeom = 1;
4791 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4792 			/* end of frag, we are done */
4793 			notdone = 0;
4794 			foundeom = 1;
4795 		} else {
4796 			/*
4797 			 * Its a begin or middle piece, we must mark all of
4798 			 * it
4799 			 */
4800 			notdone = 1;
4801 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4802 		}
4803 	} while (tp1 && notdone);
4804 	if (foundeom == 0) {
4805 		/*
4806 		 * The multi-part message was scattered across the send and
4807 		 * sent queue.
4808 		 */
4809 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4810 			if ((tp1->rec.data.sid != sid) ||
4811 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4812 				break;
4813 			}
4814 			/*
4815 			 * save to chk in case we have some on stream out
4816 			 * queue. If so and we have an un-transmitted one we
4817 			 * don't have to fudge the TSN.
4818 			 */
4819 			chk = tp1;
4820 			ret_sz += tp1->book_size;
4821 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4822 			if (sent) {
4823 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4824 			} else {
4825 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4826 			}
4827 			if (tp1->data) {
4828 				sctp_m_freem(tp1->data);
4829 				tp1->data = NULL;
4830 			}
4831 			/* No flight involved here book the size to 0 */
4832 			tp1->book_size = 0;
4833 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4834 				foundeom = 1;
4835 			}
4836 			do_wakeup_routine = 1;
4837 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4838 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4839 			/*
4840 			 * on to the sent queue so we can wait for it to be
4841 			 * passed by.
4842 			 */
4843 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4844 			    sctp_next);
4845 			stcb->asoc.send_queue_cnt--;
4846 			stcb->asoc.sent_queue_cnt++;
4847 		}
4848 	}
4849 	if (foundeom == 0) {
4850 		/*
4851 		 * Still no eom found. That means there is stuff left on the
4852 		 * stream out queue.. yuck.
4853 		 */
4854 		SCTP_TCB_SEND_LOCK(stcb);
4855 		strq = &stcb->asoc.strmout[sid];
4856 		sp = TAILQ_FIRST(&strq->outqueue);
4857 		if (sp != NULL) {
4858 			sp->discard_rest = 1;
4859 			/*
4860 			 * We may need to put a chunk on the queue that
4861 			 * holds the TSN that would have been sent with the
4862 			 * LAST bit.
4863 			 */
4864 			if (chk == NULL) {
4865 				/* Yep, we have to */
4866 				sctp_alloc_a_chunk(stcb, chk);
4867 				if (chk == NULL) {
4868 					/*
4869 					 * we are hosed. All we can do is
4870 					 * nothing.. which will cause an
4871 					 * abort if the peer is paying
4872 					 * attention.
4873 					 */
4874 					goto oh_well;
4875 				}
4876 				memset(chk, 0, sizeof(*chk));
4877 				chk->rec.data.rcv_flags = 0;
4878 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4879 				chk->asoc = &stcb->asoc;
4880 				if (stcb->asoc.idata_supported == 0) {
4881 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4882 						chk->rec.data.mid = 0;
4883 					} else {
4884 						chk->rec.data.mid = strq->next_mid_ordered;
4885 					}
4886 				} else {
4887 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4888 						chk->rec.data.mid = strq->next_mid_unordered;
4889 					} else {
4890 						chk->rec.data.mid = strq->next_mid_ordered;
4891 					}
4892 				}
4893 				chk->rec.data.sid = sp->sid;
4894 				chk->rec.data.ppid = sp->ppid;
4895 				chk->rec.data.context = sp->context;
4896 				chk->flags = sp->act_flags;
4897 				chk->whoTo = NULL;
4898 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4899 				strq->chunks_on_queues++;
4900 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4901 				stcb->asoc.sent_queue_cnt++;
4902 				stcb->asoc.pr_sctp_cnt++;
4903 			}
4904 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4905 			if (sp->sinfo_flags & SCTP_UNORDERED) {
4906 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4907 			}
4908 			if (stcb->asoc.idata_supported == 0) {
4909 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4910 					strq->next_mid_ordered++;
4911 				}
4912 			} else {
4913 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4914 					strq->next_mid_unordered++;
4915 				} else {
4916 					strq->next_mid_ordered++;
4917 				}
4918 			}
4919 	oh_well:
4920 			if (sp->data) {
4921 				/*
4922 				 * Pull any data to free up the SB and allow
4923 				 * sender to "add more" while we will throw
4924 				 * away :-)
4925 				 */
4926 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4927 				ret_sz += sp->length;
4928 				do_wakeup_routine = 1;
4929 				sp->some_taken = 1;
4930 				sctp_m_freem(sp->data);
4931 				sp->data = NULL;
4932 				sp->tail_mbuf = NULL;
4933 				sp->length = 0;
4934 			}
4935 		}
4936 		SCTP_TCB_SEND_UNLOCK(stcb);
4937 	}
4938 	if (do_wakeup_routine) {
4939 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4940 		struct socket *so;
4941 
4942 		so = SCTP_INP_SO(stcb->sctp_ep);
4943 		if (!so_locked) {
4944 			atomic_add_int(&stcb->asoc.refcnt, 1);
4945 			SCTP_TCB_UNLOCK(stcb);
4946 			SCTP_SOCKET_LOCK(so, 1);
4947 			SCTP_TCB_LOCK(stcb);
4948 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4949 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4950 				/* assoc was freed while we were unlocked */
4951 				SCTP_SOCKET_UNLOCK(so, 1);
4952 				return (ret_sz);
4953 			}
4954 		}
4955 #endif
4956 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4957 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4958 		if (!so_locked) {
4959 			SCTP_SOCKET_UNLOCK(so, 1);
4960 		}
4961 #endif
4962 	}
4963 	return (ret_sz);
4964 }
4965 
4966 /*
4967  * checks to see if the given address, sa, is one that is currently known by
4968  * the kernel note: can't distinguish the same address on multiple interfaces
4969  * and doesn't handle multiple addresses with different zone/scope id's note:
4970  * ifa_ifwithaddr() compares the entire sockaddr struct
4971  */
4972 struct sctp_ifa *
4973 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4974     int holds_lock)
4975 {
4976 	struct sctp_laddr *laddr;
4977 
4978 	if (holds_lock == 0) {
4979 		SCTP_INP_RLOCK(inp);
4980 	}
4981 
4982 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4983 		if (laddr->ifa == NULL)
4984 			continue;
4985 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4986 			continue;
4987 #ifdef INET
4988 		if (addr->sa_family == AF_INET) {
4989 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4990 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4991 				/* found him. */
4992 				if (holds_lock == 0) {
4993 					SCTP_INP_RUNLOCK(inp);
4994 				}
4995 				return (laddr->ifa);
4996 				break;
4997 			}
4998 		}
4999 #endif
5000 #ifdef INET6
5001 		if (addr->sa_family == AF_INET6) {
5002 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5003 			    &laddr->ifa->address.sin6)) {
5004 				/* found him. */
5005 				if (holds_lock == 0) {
5006 					SCTP_INP_RUNLOCK(inp);
5007 				}
5008 				return (laddr->ifa);
5009 				break;
5010 			}
5011 		}
5012 #endif
5013 	}
5014 	if (holds_lock == 0) {
5015 		SCTP_INP_RUNLOCK(inp);
5016 	}
5017 	return (NULL);
5018 }
5019 
5020 uint32_t
5021 sctp_get_ifa_hash_val(struct sockaddr *addr)
5022 {
5023 	switch (addr->sa_family) {
5024 #ifdef INET
5025 	case AF_INET:
5026 		{
5027 			struct sockaddr_in *sin;
5028 
5029 			sin = (struct sockaddr_in *)addr;
5030 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5031 		}
5032 #endif
5033 #ifdef INET6
5034 	case AF_INET6:
5035 		{
5036 			struct sockaddr_in6 *sin6;
5037 			uint32_t hash_of_addr;
5038 
5039 			sin6 = (struct sockaddr_in6 *)addr;
5040 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5041 			    sin6->sin6_addr.s6_addr32[1] +
5042 			    sin6->sin6_addr.s6_addr32[2] +
5043 			    sin6->sin6_addr.s6_addr32[3]);
5044 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5045 			return (hash_of_addr);
5046 		}
5047 #endif
5048 	default:
5049 		break;
5050 	}
5051 	return (0);
5052 }
5053 
5054 struct sctp_ifa *
5055 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5056 {
5057 	struct sctp_ifa *sctp_ifap;
5058 	struct sctp_vrf *vrf;
5059 	struct sctp_ifalist *hash_head;
5060 	uint32_t hash_of_addr;
5061 
5062 	if (holds_lock == 0)
5063 		SCTP_IPI_ADDR_RLOCK();
5064 
5065 	vrf = sctp_find_vrf(vrf_id);
5066 	if (vrf == NULL) {
5067 		if (holds_lock == 0)
5068 			SCTP_IPI_ADDR_RUNLOCK();
5069 		return (NULL);
5070 	}
5071 
5072 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5073 
5074 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5075 	if (hash_head == NULL) {
5076 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5077 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5078 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5079 		sctp_print_address(addr);
5080 		SCTP_PRINTF("No such bucket for address\n");
5081 		if (holds_lock == 0)
5082 			SCTP_IPI_ADDR_RUNLOCK();
5083 
5084 		return (NULL);
5085 	}
5086 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5087 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5088 			continue;
5089 #ifdef INET
5090 		if (addr->sa_family == AF_INET) {
5091 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5092 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5093 				/* found him. */
5094 				if (holds_lock == 0)
5095 					SCTP_IPI_ADDR_RUNLOCK();
5096 				return (sctp_ifap);
5097 				break;
5098 			}
5099 		}
5100 #endif
5101 #ifdef INET6
5102 		if (addr->sa_family == AF_INET6) {
5103 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5104 			    &sctp_ifap->address.sin6)) {
5105 				/* found him. */
5106 				if (holds_lock == 0)
5107 					SCTP_IPI_ADDR_RUNLOCK();
5108 				return (sctp_ifap);
5109 				break;
5110 			}
5111 		}
5112 #endif
5113 	}
5114 	if (holds_lock == 0)
5115 		SCTP_IPI_ADDR_RUNLOCK();
5116 	return (NULL);
5117 }
5118 
5119 static void
5120 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5121     uint32_t rwnd_req)
5122 {
5123 	/* User pulled some data, do we need a rwnd update? */
5124 	int r_unlocked = 0;
5125 	uint32_t dif, rwnd;
5126 	struct socket *so = NULL;
5127 
5128 	if (stcb == NULL)
5129 		return;
5130 
5131 	atomic_add_int(&stcb->asoc.refcnt, 1);
5132 
5133 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5134 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5135 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5136 		/* Pre-check If we are freeing no update */
5137 		goto no_lock;
5138 	}
5139 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5140 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5141 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5142 		goto out;
5143 	}
5144 	so = stcb->sctp_socket;
5145 	if (so == NULL) {
5146 		goto out;
5147 	}
5148 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5149 	/* Have you have freed enough to look */
5150 	*freed_so_far = 0;
5151 	/* Yep, its worth a look and the lock overhead */
5152 
5153 	/* Figure out what the rwnd would be */
5154 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5155 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5156 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5157 	} else {
5158 		dif = 0;
5159 	}
5160 	if (dif >= rwnd_req) {
5161 		if (hold_rlock) {
5162 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5163 			r_unlocked = 1;
5164 		}
5165 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5166 			/*
5167 			 * One last check before we allow the guy possibly
5168 			 * to get in. There is a race, where the guy has not
5169 			 * reached the gate. In that case
5170 			 */
5171 			goto out;
5172 		}
5173 		SCTP_TCB_LOCK(stcb);
5174 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5175 			/* No reports here */
5176 			SCTP_TCB_UNLOCK(stcb);
5177 			goto out;
5178 		}
5179 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5180 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5181 
5182 		sctp_chunk_output(stcb->sctp_ep, stcb,
5183 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5184 		/* make sure no timer is running */
5185 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5186 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5187 		SCTP_TCB_UNLOCK(stcb);
5188 	} else {
5189 		/* Update how much we have pending */
5190 		stcb->freed_by_sorcv_sincelast = dif;
5191 	}
5192 out:
5193 	if (so && r_unlocked && hold_rlock) {
5194 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5195 	}
5196 
5197 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5198 no_lock:
5199 	atomic_add_int(&stcb->asoc.refcnt, -1);
5200 	return;
5201 }
5202 
5203 int
5204 sctp_sorecvmsg(struct socket *so,
5205     struct uio *uio,
5206     struct mbuf **mp,
5207     struct sockaddr *from,
5208     int fromlen,
5209     int *msg_flags,
5210     struct sctp_sndrcvinfo *sinfo,
5211     int filling_sinfo)
5212 {
5213 	/*
5214 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5215 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5216 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5217 	 * On the way out we may send out any combination of:
5218 	 * MSG_NOTIFICATION MSG_EOR
5219 	 *
5220 	 */
5221 	struct sctp_inpcb *inp = NULL;
5222 	int my_len = 0;
5223 	int cp_len = 0, error = 0;
5224 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5225 	struct mbuf *m = NULL;
5226 	struct sctp_tcb *stcb = NULL;
5227 	int wakeup_read_socket = 0;
5228 	int freecnt_applied = 0;
5229 	int out_flags = 0, in_flags = 0;
5230 	int block_allowed = 1;
5231 	uint32_t freed_so_far = 0;
5232 	uint32_t copied_so_far = 0;
5233 	int in_eeor_mode = 0;
5234 	int no_rcv_needed = 0;
5235 	uint32_t rwnd_req = 0;
5236 	int hold_sblock = 0;
5237 	int hold_rlock = 0;
5238 	ssize_t slen = 0;
5239 	uint32_t held_length = 0;
5240 	int sockbuf_lock = 0;
5241 
5242 	if (uio == NULL) {
5243 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5244 		return (EINVAL);
5245 	}
5246 
5247 	if (msg_flags) {
5248 		in_flags = *msg_flags;
5249 		if (in_flags & MSG_PEEK)
5250 			SCTP_STAT_INCR(sctps_read_peeks);
5251 	} else {
5252 		in_flags = 0;
5253 	}
5254 	slen = uio->uio_resid;
5255 
5256 	/* Pull in and set up our int flags */
5257 	if (in_flags & MSG_OOB) {
5258 		/* Out of band's NOT supported */
5259 		return (EOPNOTSUPP);
5260 	}
5261 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5262 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5263 		return (EINVAL);
5264 	}
5265 	if ((in_flags & (MSG_DONTWAIT
5266 	    | MSG_NBIO
5267 	    )) ||
5268 	    SCTP_SO_IS_NBIO(so)) {
5269 		block_allowed = 0;
5270 	}
5271 	/* setup the endpoint */
5272 	inp = (struct sctp_inpcb *)so->so_pcb;
5273 	if (inp == NULL) {
5274 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5275 		return (EFAULT);
5276 	}
5277 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5278 	/* Must be at least a MTU's worth */
5279 	if (rwnd_req < SCTP_MIN_RWND)
5280 		rwnd_req = SCTP_MIN_RWND;
5281 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5282 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5283 		sctp_misc_ints(SCTP_SORECV_ENTER,
5284 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5285 	}
5286 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5287 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5288 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5289 	}
5290 
5291 
5292 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5293 	if (error) {
5294 		goto release_unlocked;
5295 	}
5296 	sockbuf_lock = 1;
5297 restart:
5298 
5299 
5300 restart_nosblocks:
5301 	if (hold_sblock == 0) {
5302 		SOCKBUF_LOCK(&so->so_rcv);
5303 		hold_sblock = 1;
5304 	}
5305 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5306 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5307 		goto out;
5308 	}
5309 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5310 		if (so->so_error) {
5311 			error = so->so_error;
5312 			if ((in_flags & MSG_PEEK) == 0)
5313 				so->so_error = 0;
5314 			goto out;
5315 		} else {
5316 			if (so->so_rcv.sb_cc == 0) {
5317 				/* indicate EOF */
5318 				error = 0;
5319 				goto out;
5320 			}
5321 		}
5322 	}
5323 	if (so->so_rcv.sb_cc <= held_length) {
5324 		if (so->so_error) {
5325 			error = so->so_error;
5326 			if ((in_flags & MSG_PEEK) == 0) {
5327 				so->so_error = 0;
5328 			}
5329 			goto out;
5330 		}
5331 		if ((so->so_rcv.sb_cc == 0) &&
5332 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5333 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5334 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5335 				/*
5336 				 * For active open side clear flags for
5337 				 * re-use passive open is blocked by
5338 				 * connect.
5339 				 */
5340 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5341 					/*
5342 					 * You were aborted, passive side
5343 					 * always hits here
5344 					 */
5345 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5346 					error = ECONNRESET;
5347 				}
5348 				so->so_state &= ~(SS_ISCONNECTING |
5349 				    SS_ISDISCONNECTING |
5350 				    SS_ISCONFIRMING |
5351 				    SS_ISCONNECTED);
5352 				if (error == 0) {
5353 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5354 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5355 						error = ENOTCONN;
5356 					}
5357 				}
5358 				goto out;
5359 			}
5360 		}
5361 		if (block_allowed) {
5362 			error = sbwait(&so->so_rcv);
5363 			if (error) {
5364 				goto out;
5365 			}
5366 			held_length = 0;
5367 			goto restart_nosblocks;
5368 		} else {
5369 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5370 			error = EWOULDBLOCK;
5371 			goto out;
5372 		}
5373 	}
5374 	if (hold_sblock == 1) {
5375 		SOCKBUF_UNLOCK(&so->so_rcv);
5376 		hold_sblock = 0;
5377 	}
5378 	/* we possibly have data we can read */
5379 	/* sa_ignore FREED_MEMORY */
5380 	control = TAILQ_FIRST(&inp->read_queue);
5381 	if (control == NULL) {
5382 		/*
5383 		 * This could be happening since the appender did the
5384 		 * increment but as not yet did the tailq insert onto the
5385 		 * read_queue
5386 		 */
5387 		if (hold_rlock == 0) {
5388 			SCTP_INP_READ_LOCK(inp);
5389 		}
5390 		control = TAILQ_FIRST(&inp->read_queue);
5391 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5392 #ifdef INVARIANTS
5393 			panic("Huh, its non zero and nothing on control?");
5394 #endif
5395 			so->so_rcv.sb_cc = 0;
5396 		}
5397 		SCTP_INP_READ_UNLOCK(inp);
5398 		hold_rlock = 0;
5399 		goto restart;
5400 	}
5401 
5402 	if ((control->length == 0) &&
5403 	    (control->do_not_ref_stcb)) {
5404 		/*
5405 		 * Clean up code for freeing assoc that left behind a
5406 		 * pdapi.. maybe a peer in EEOR that just closed after
5407 		 * sending and never indicated a EOR.
5408 		 */
5409 		if (hold_rlock == 0) {
5410 			hold_rlock = 1;
5411 			SCTP_INP_READ_LOCK(inp);
5412 		}
5413 		control->held_length = 0;
5414 		if (control->data) {
5415 			/* Hmm there is data here .. fix */
5416 			struct mbuf *m_tmp;
5417 			int cnt = 0;
5418 
5419 			m_tmp = control->data;
5420 			while (m_tmp) {
5421 				cnt += SCTP_BUF_LEN(m_tmp);
5422 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5423 					control->tail_mbuf = m_tmp;
5424 					control->end_added = 1;
5425 				}
5426 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5427 			}
5428 			control->length = cnt;
5429 		} else {
5430 			/* remove it */
5431 			TAILQ_REMOVE(&inp->read_queue, control, next);
5432 			/* Add back any hiddend data */
5433 			sctp_free_remote_addr(control->whoFrom);
5434 			sctp_free_a_readq(stcb, control);
5435 		}
5436 		if (hold_rlock) {
5437 			hold_rlock = 0;
5438 			SCTP_INP_READ_UNLOCK(inp);
5439 		}
5440 		goto restart;
5441 	}
5442 	if ((control->length == 0) &&
5443 	    (control->end_added == 1)) {
5444 		/*
5445 		 * Do we also need to check for (control->pdapi_aborted ==
5446 		 * 1)?
5447 		 */
5448 		if (hold_rlock == 0) {
5449 			hold_rlock = 1;
5450 			SCTP_INP_READ_LOCK(inp);
5451 		}
5452 		TAILQ_REMOVE(&inp->read_queue, control, next);
5453 		if (control->data) {
5454 #ifdef INVARIANTS
5455 			panic("control->data not null but control->length == 0");
5456 #else
5457 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5458 			sctp_m_freem(control->data);
5459 			control->data = NULL;
5460 #endif
5461 		}
5462 		if (control->aux_data) {
5463 			sctp_m_free(control->aux_data);
5464 			control->aux_data = NULL;
5465 		}
5466 #ifdef INVARIANTS
5467 		if (control->on_strm_q) {
5468 			panic("About to free ctl:%p so:%p and its in %d",
5469 			    control, so, control->on_strm_q);
5470 		}
5471 #endif
5472 		sctp_free_remote_addr(control->whoFrom);
5473 		sctp_free_a_readq(stcb, control);
5474 		if (hold_rlock) {
5475 			hold_rlock = 0;
5476 			SCTP_INP_READ_UNLOCK(inp);
5477 		}
5478 		goto restart;
5479 	}
5480 	if (control->length == 0) {
5481 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5482 		    (filling_sinfo)) {
5483 			/* find a more suitable one then this */
5484 			ctl = TAILQ_NEXT(control, next);
5485 			while (ctl) {
5486 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5487 				    (ctl->some_taken ||
5488 				    (ctl->spec_flags & M_NOTIFICATION) ||
5489 				    ((ctl->do_not_ref_stcb == 0) &&
5490 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5491 				    ) {
5492 					/*-
5493 					 * If we have a different TCB next, and there is data
5494 					 * present. If we have already taken some (pdapi), OR we can
5495 					 * ref the tcb and no delivery as started on this stream, we
5496 					 * take it. Note we allow a notification on a different
5497 					 * assoc to be delivered..
5498 					 */
5499 					control = ctl;
5500 					goto found_one;
5501 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5502 					    (ctl->length) &&
5503 					    ((ctl->some_taken) ||
5504 					    ((ctl->do_not_ref_stcb == 0) &&
5505 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5506 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5507 					/*-
5508 					 * If we have the same tcb, and there is data present, and we
5509 					 * have the strm interleave feature present. Then if we have
5510 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5511 					 * not started a delivery for this stream, we can take it.
5512 					 * Note we do NOT allow a notificaiton on the same assoc to
5513 					 * be delivered.
5514 					 */
5515 					control = ctl;
5516 					goto found_one;
5517 				}
5518 				ctl = TAILQ_NEXT(ctl, next);
5519 			}
5520 		}
5521 		/*
5522 		 * if we reach here, not suitable replacement is available
5523 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5524 		 * into the our held count, and its time to sleep again.
5525 		 */
5526 		held_length = so->so_rcv.sb_cc;
5527 		control->held_length = so->so_rcv.sb_cc;
5528 		goto restart;
5529 	}
5530 	/* Clear the held length since there is something to read */
5531 	control->held_length = 0;
5532 found_one:
5533 	/*
5534 	 * If we reach here, control has a some data for us to read off.
5535 	 * Note that stcb COULD be NULL.
5536 	 */
5537 	if (hold_rlock == 0) {
5538 		hold_rlock = 1;
5539 		SCTP_INP_READ_LOCK(inp);
5540 	}
5541 	control->some_taken++;
5542 	stcb = control->stcb;
5543 	if (stcb) {
5544 		if ((control->do_not_ref_stcb == 0) &&
5545 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5546 			if (freecnt_applied == 0)
5547 				stcb = NULL;
5548 		} else if (control->do_not_ref_stcb == 0) {
5549 			/* you can't free it on me please */
5550 			/*
5551 			 * The lock on the socket buffer protects us so the
5552 			 * free code will stop. But since we used the
5553 			 * socketbuf lock and the sender uses the tcb_lock
5554 			 * to increment, we need to use the atomic add to
5555 			 * the refcnt
5556 			 */
5557 			if (freecnt_applied) {
5558 #ifdef INVARIANTS
5559 				panic("refcnt already incremented");
5560 #else
5561 				SCTP_PRINTF("refcnt already incremented?\n");
5562 #endif
5563 			} else {
5564 				atomic_add_int(&stcb->asoc.refcnt, 1);
5565 				freecnt_applied = 1;
5566 			}
5567 			/*
5568 			 * Setup to remember how much we have not yet told
5569 			 * the peer our rwnd has opened up. Note we grab the
5570 			 * value from the tcb from last time. Note too that
5571 			 * sack sending clears this when a sack is sent,
5572 			 * which is fine. Once we hit the rwnd_req, we then
5573 			 * will go to the sctp_user_rcvd() that will not
5574 			 * lock until it KNOWs it MUST send a WUP-SACK.
5575 			 */
5576 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5577 			stcb->freed_by_sorcv_sincelast = 0;
5578 		}
5579 	}
5580 	if (stcb &&
5581 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5582 	    control->do_not_ref_stcb == 0) {
5583 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5584 	}
5585 
5586 	/* First lets get off the sinfo and sockaddr info */
5587 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5588 		sinfo->sinfo_stream = control->sinfo_stream;
5589 		sinfo->sinfo_ssn = (uint16_t)control->mid;
5590 		sinfo->sinfo_flags = control->sinfo_flags;
5591 		sinfo->sinfo_ppid = control->sinfo_ppid;
5592 		sinfo->sinfo_context = control->sinfo_context;
5593 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5594 		sinfo->sinfo_tsn = control->sinfo_tsn;
5595 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5596 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5597 		nxt = TAILQ_NEXT(control, next);
5598 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5599 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5600 			struct sctp_extrcvinfo *s_extra;
5601 
5602 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5603 			if ((nxt) &&
5604 			    (nxt->length)) {
5605 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5606 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5607 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5608 				}
5609 				if (nxt->spec_flags & M_NOTIFICATION) {
5610 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5611 				}
5612 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5613 				s_extra->serinfo_next_length = nxt->length;
5614 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5615 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5616 				if (nxt->tail_mbuf != NULL) {
5617 					if (nxt->end_added) {
5618 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5619 					}
5620 				}
5621 			} else {
5622 				/*
5623 				 * we explicitly 0 this, since the memcpy
5624 				 * got some other things beyond the older
5625 				 * sinfo_ that is on the control's structure
5626 				 * :-D
5627 				 */
5628 				nxt = NULL;
5629 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5630 				s_extra->serinfo_next_aid = 0;
5631 				s_extra->serinfo_next_length = 0;
5632 				s_extra->serinfo_next_ppid = 0;
5633 				s_extra->serinfo_next_stream = 0;
5634 			}
5635 		}
5636 		/*
5637 		 * update off the real current cum-ack, if we have an stcb.
5638 		 */
5639 		if ((control->do_not_ref_stcb == 0) && stcb)
5640 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5641 		/*
5642 		 * mask off the high bits, we keep the actual chunk bits in
5643 		 * there.
5644 		 */
5645 		sinfo->sinfo_flags &= 0x00ff;
5646 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5647 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5648 		}
5649 	}
5650 #ifdef SCTP_ASOCLOG_OF_TSNS
5651 	{
5652 		int index, newindex;
5653 		struct sctp_pcbtsn_rlog *entry;
5654 
5655 		do {
5656 			index = inp->readlog_index;
5657 			newindex = index + 1;
5658 			if (newindex >= SCTP_READ_LOG_SIZE) {
5659 				newindex = 0;
5660 			}
5661 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5662 		entry = &inp->readlog[index];
5663 		entry->vtag = control->sinfo_assoc_id;
5664 		entry->strm = control->sinfo_stream;
5665 		entry->seq = (uint16_t)control->mid;
5666 		entry->sz = control->length;
5667 		entry->flgs = control->sinfo_flags;
5668 	}
5669 #endif
5670 	if ((fromlen > 0) && (from != NULL)) {
5671 		union sctp_sockstore store;
5672 		size_t len;
5673 
5674 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5675 #ifdef INET6
5676 		case AF_INET6:
5677 			len = sizeof(struct sockaddr_in6);
5678 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5679 			store.sin6.sin6_port = control->port_from;
5680 			break;
5681 #endif
5682 #ifdef INET
5683 		case AF_INET:
5684 #ifdef INET6
5685 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5686 				len = sizeof(struct sockaddr_in6);
5687 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5688 				    &store.sin6);
5689 				store.sin6.sin6_port = control->port_from;
5690 			} else {
5691 				len = sizeof(struct sockaddr_in);
5692 				store.sin = control->whoFrom->ro._l_addr.sin;
5693 				store.sin.sin_port = control->port_from;
5694 			}
5695 #else
5696 			len = sizeof(struct sockaddr_in);
5697 			store.sin = control->whoFrom->ro._l_addr.sin;
5698 			store.sin.sin_port = control->port_from;
5699 #endif
5700 			break;
5701 #endif
5702 		default:
5703 			len = 0;
5704 			break;
5705 		}
5706 		memcpy(from, &store, min((size_t)fromlen, len));
5707 #ifdef INET6
5708 		{
5709 			struct sockaddr_in6 lsa6, *from6;
5710 
5711 			from6 = (struct sockaddr_in6 *)from;
5712 			sctp_recover_scope_mac(from6, (&lsa6));
5713 		}
5714 #endif
5715 	}
5716 	if (hold_rlock) {
5717 		SCTP_INP_READ_UNLOCK(inp);
5718 		hold_rlock = 0;
5719 	}
5720 	if (hold_sblock) {
5721 		SOCKBUF_UNLOCK(&so->so_rcv);
5722 		hold_sblock = 0;
5723 	}
5724 	/* now copy out what data we can */
5725 	if (mp == NULL) {
5726 		/* copy out each mbuf in the chain up to length */
5727 get_more_data:
5728 		m = control->data;
5729 		while (m) {
5730 			/* Move out all we can */
5731 			cp_len = (int)uio->uio_resid;
5732 			my_len = (int)SCTP_BUF_LEN(m);
5733 			if (cp_len > my_len) {
5734 				/* not enough in this buf */
5735 				cp_len = my_len;
5736 			}
5737 			if (hold_rlock) {
5738 				SCTP_INP_READ_UNLOCK(inp);
5739 				hold_rlock = 0;
5740 			}
5741 			if (cp_len > 0)
5742 				error = uiomove(mtod(m, char *), cp_len, uio);
5743 			/* re-read */
5744 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5745 				goto release;
5746 			}
5747 
5748 			if ((control->do_not_ref_stcb == 0) && stcb &&
5749 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5750 				no_rcv_needed = 1;
5751 			}
5752 			if (error) {
5753 				/* error we are out of here */
5754 				goto release;
5755 			}
5756 			SCTP_INP_READ_LOCK(inp);
5757 			hold_rlock = 1;
5758 			if (cp_len == SCTP_BUF_LEN(m)) {
5759 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5760 				    (control->end_added)) {
5761 					out_flags |= MSG_EOR;
5762 					if ((control->do_not_ref_stcb == 0) &&
5763 					    (control->stcb != NULL) &&
5764 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5765 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5766 				}
5767 				if (control->spec_flags & M_NOTIFICATION) {
5768 					out_flags |= MSG_NOTIFICATION;
5769 				}
5770 				/* we ate up the mbuf */
5771 				if (in_flags & MSG_PEEK) {
5772 					/* just looking */
5773 					m = SCTP_BUF_NEXT(m);
5774 					copied_so_far += cp_len;
5775 				} else {
5776 					/* dispose of the mbuf */
5777 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5778 						sctp_sblog(&so->so_rcv,
5779 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5780 					}
5781 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5782 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5783 						sctp_sblog(&so->so_rcv,
5784 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5785 					}
5786 					copied_so_far += cp_len;
5787 					freed_so_far += cp_len;
5788 					freed_so_far += MSIZE;
5789 					atomic_subtract_int(&control->length, cp_len);
5790 					control->data = sctp_m_free(m);
5791 					m = control->data;
5792 					/*
5793 					 * been through it all, must hold sb
5794 					 * lock ok to null tail
5795 					 */
5796 					if (control->data == NULL) {
5797 #ifdef INVARIANTS
5798 						if ((control->end_added == 0) ||
5799 						    (TAILQ_NEXT(control, next) == NULL)) {
5800 							/*
5801 							 * If the end is not
5802 							 * added, OR the
5803 							 * next is NOT null
5804 							 * we MUST have the
5805 							 * lock.
5806 							 */
5807 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5808 								panic("Hmm we don't own the lock?");
5809 							}
5810 						}
5811 #endif
5812 						control->tail_mbuf = NULL;
5813 #ifdef INVARIANTS
5814 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5815 							panic("end_added, nothing left and no MSG_EOR");
5816 						}
5817 #endif
5818 					}
5819 				}
5820 			} else {
5821 				/* Do we need to trim the mbuf? */
5822 				if (control->spec_flags & M_NOTIFICATION) {
5823 					out_flags |= MSG_NOTIFICATION;
5824 				}
5825 				if ((in_flags & MSG_PEEK) == 0) {
5826 					SCTP_BUF_RESV_UF(m, cp_len);
5827 					SCTP_BUF_LEN(m) -= cp_len;
5828 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5829 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5830 					}
5831 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5832 					if ((control->do_not_ref_stcb == 0) &&
5833 					    stcb) {
5834 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5835 					}
5836 					copied_so_far += cp_len;
5837 					freed_so_far += cp_len;
5838 					freed_so_far += MSIZE;
5839 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5840 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5841 						    SCTP_LOG_SBRESULT, 0);
5842 					}
5843 					atomic_subtract_int(&control->length, cp_len);
5844 				} else {
5845 					copied_so_far += cp_len;
5846 				}
5847 			}
5848 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5849 				break;
5850 			}
5851 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5852 			    (control->do_not_ref_stcb == 0) &&
5853 			    (freed_so_far >= rwnd_req)) {
5854 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5855 			}
5856 		}		/* end while(m) */
5857 		/*
5858 		 * At this point we have looked at it all and we either have
5859 		 * a MSG_EOR/or read all the user wants... <OR>
5860 		 * control->length == 0.
5861 		 */
5862 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5863 			/* we are done with this control */
5864 			if (control->length == 0) {
5865 				if (control->data) {
5866 #ifdef INVARIANTS
5867 					panic("control->data not null at read eor?");
5868 #else
5869 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5870 					sctp_m_freem(control->data);
5871 					control->data = NULL;
5872 #endif
5873 				}
5874 		done_with_control:
5875 				if (hold_rlock == 0) {
5876 					SCTP_INP_READ_LOCK(inp);
5877 					hold_rlock = 1;
5878 				}
5879 				TAILQ_REMOVE(&inp->read_queue, control, next);
5880 				/* Add back any hiddend data */
5881 				if (control->held_length) {
5882 					held_length = 0;
5883 					control->held_length = 0;
5884 					wakeup_read_socket = 1;
5885 				}
5886 				if (control->aux_data) {
5887 					sctp_m_free(control->aux_data);
5888 					control->aux_data = NULL;
5889 				}
5890 				no_rcv_needed = control->do_not_ref_stcb;
5891 				sctp_free_remote_addr(control->whoFrom);
5892 				control->data = NULL;
5893 #ifdef INVARIANTS
5894 				if (control->on_strm_q) {
5895 					panic("About to free ctl:%p so:%p and its in %d",
5896 					    control, so, control->on_strm_q);
5897 				}
5898 #endif
5899 				sctp_free_a_readq(stcb, control);
5900 				control = NULL;
5901 				if ((freed_so_far >= rwnd_req) &&
5902 				    (no_rcv_needed == 0))
5903 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5904 
5905 			} else {
5906 				/*
5907 				 * The user did not read all of this
5908 				 * message, turn off the returned MSG_EOR
5909 				 * since we are leaving more behind on the
5910 				 * control to read.
5911 				 */
5912 #ifdef INVARIANTS
5913 				if (control->end_added &&
5914 				    (control->data == NULL) &&
5915 				    (control->tail_mbuf == NULL)) {
5916 					panic("Gak, control->length is corrupt?");
5917 				}
5918 #endif
5919 				no_rcv_needed = control->do_not_ref_stcb;
5920 				out_flags &= ~MSG_EOR;
5921 			}
5922 		}
5923 		if (out_flags & MSG_EOR) {
5924 			goto release;
5925 		}
5926 		if ((uio->uio_resid == 0) ||
5927 		    ((in_eeor_mode) &&
5928 		    (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
5929 			goto release;
5930 		}
5931 		/*
5932 		 * If I hit here the receiver wants more and this message is
5933 		 * NOT done (pd-api). So two questions. Can we block? if not
5934 		 * we are done. Did the user NOT set MSG_WAITALL?
5935 		 */
5936 		if (block_allowed == 0) {
5937 			goto release;
5938 		}
5939 		/*
5940 		 * We need to wait for more data a few things: - We don't
5941 		 * sbunlock() so we don't get someone else reading. - We
5942 		 * must be sure to account for the case where what is added
5943 		 * is NOT to our control when we wakeup.
5944 		 */
5945 
5946 		/*
5947 		 * Do we need to tell the transport a rwnd update might be
5948 		 * needed before we go to sleep?
5949 		 */
5950 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5951 		    ((freed_so_far >= rwnd_req) &&
5952 		    (control->do_not_ref_stcb == 0) &&
5953 		    (no_rcv_needed == 0))) {
5954 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5955 		}
5956 wait_some_more:
5957 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5958 			goto release;
5959 		}
5960 
5961 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5962 			goto release;
5963 
5964 		if (hold_rlock == 1) {
5965 			SCTP_INP_READ_UNLOCK(inp);
5966 			hold_rlock = 0;
5967 		}
5968 		if (hold_sblock == 0) {
5969 			SOCKBUF_LOCK(&so->so_rcv);
5970 			hold_sblock = 1;
5971 		}
5972 		if ((copied_so_far) && (control->length == 0) &&
5973 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5974 			goto release;
5975 		}
5976 		if (so->so_rcv.sb_cc <= control->held_length) {
5977 			error = sbwait(&so->so_rcv);
5978 			if (error) {
5979 				goto release;
5980 			}
5981 			control->held_length = 0;
5982 		}
5983 		if (hold_sblock) {
5984 			SOCKBUF_UNLOCK(&so->so_rcv);
5985 			hold_sblock = 0;
5986 		}
5987 		if (control->length == 0) {
5988 			/* still nothing here */
5989 			if (control->end_added == 1) {
5990 				/* he aborted, or is done i.e.did a shutdown */
5991 				out_flags |= MSG_EOR;
5992 				if (control->pdapi_aborted) {
5993 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5994 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5995 
5996 					out_flags |= MSG_TRUNC;
5997 				} else {
5998 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5999 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6000 				}
6001 				goto done_with_control;
6002 			}
6003 			if (so->so_rcv.sb_cc > held_length) {
6004 				control->held_length = so->so_rcv.sb_cc;
6005 				held_length = 0;
6006 			}
6007 			goto wait_some_more;
6008 		} else if (control->data == NULL) {
6009 			/*
6010 			 * we must re-sync since data is probably being
6011 			 * added
6012 			 */
6013 			SCTP_INP_READ_LOCK(inp);
6014 			if ((control->length > 0) && (control->data == NULL)) {
6015 				/*
6016 				 * big trouble.. we have the lock and its
6017 				 * corrupt?
6018 				 */
6019 #ifdef INVARIANTS
6020 				panic("Impossible data==NULL length !=0");
6021 #endif
6022 				out_flags |= MSG_EOR;
6023 				out_flags |= MSG_TRUNC;
6024 				control->length = 0;
6025 				SCTP_INP_READ_UNLOCK(inp);
6026 				goto done_with_control;
6027 			}
6028 			SCTP_INP_READ_UNLOCK(inp);
6029 			/* We will fall around to get more data */
6030 		}
6031 		goto get_more_data;
6032 	} else {
6033 		/*-
6034 		 * Give caller back the mbuf chain,
6035 		 * store in uio_resid the length
6036 		 */
6037 		wakeup_read_socket = 0;
6038 		if ((control->end_added == 0) ||
6039 		    (TAILQ_NEXT(control, next) == NULL)) {
6040 			/* Need to get rlock */
6041 			if (hold_rlock == 0) {
6042 				SCTP_INP_READ_LOCK(inp);
6043 				hold_rlock = 1;
6044 			}
6045 		}
6046 		if (control->end_added) {
6047 			out_flags |= MSG_EOR;
6048 			if ((control->do_not_ref_stcb == 0) &&
6049 			    (control->stcb != NULL) &&
6050 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6051 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6052 		}
6053 		if (control->spec_flags & M_NOTIFICATION) {
6054 			out_flags |= MSG_NOTIFICATION;
6055 		}
6056 		uio->uio_resid = control->length;
6057 		*mp = control->data;
6058 		m = control->data;
6059 		while (m) {
6060 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6061 				sctp_sblog(&so->so_rcv,
6062 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6063 			}
6064 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6065 			freed_so_far += SCTP_BUF_LEN(m);
6066 			freed_so_far += MSIZE;
6067 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6068 				sctp_sblog(&so->so_rcv,
6069 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6070 			}
6071 			m = SCTP_BUF_NEXT(m);
6072 		}
6073 		control->data = control->tail_mbuf = NULL;
6074 		control->length = 0;
6075 		if (out_flags & MSG_EOR) {
6076 			/* Done with this control */
6077 			goto done_with_control;
6078 		}
6079 	}
6080 release:
6081 	if (hold_rlock == 1) {
6082 		SCTP_INP_READ_UNLOCK(inp);
6083 		hold_rlock = 0;
6084 	}
6085 	if (hold_sblock == 1) {
6086 		SOCKBUF_UNLOCK(&so->so_rcv);
6087 		hold_sblock = 0;
6088 	}
6089 
6090 	sbunlock(&so->so_rcv);
6091 	sockbuf_lock = 0;
6092 
6093 release_unlocked:
6094 	if (hold_sblock) {
6095 		SOCKBUF_UNLOCK(&so->so_rcv);
6096 		hold_sblock = 0;
6097 	}
6098 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6099 		if ((freed_so_far >= rwnd_req) &&
6100 		    (control && (control->do_not_ref_stcb == 0)) &&
6101 		    (no_rcv_needed == 0))
6102 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6103 	}
6104 out:
6105 	if (msg_flags) {
6106 		*msg_flags = out_flags;
6107 	}
6108 	if (((out_flags & MSG_EOR) == 0) &&
6109 	    ((in_flags & MSG_PEEK) == 0) &&
6110 	    (sinfo) &&
6111 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6112 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6113 		struct sctp_extrcvinfo *s_extra;
6114 
6115 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6116 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6117 	}
6118 	if (hold_rlock == 1) {
6119 		SCTP_INP_READ_UNLOCK(inp);
6120 	}
6121 	if (hold_sblock) {
6122 		SOCKBUF_UNLOCK(&so->so_rcv);
6123 	}
6124 	if (sockbuf_lock) {
6125 		sbunlock(&so->so_rcv);
6126 	}
6127 
6128 	if (freecnt_applied) {
6129 		/*
6130 		 * The lock on the socket buffer protects us so the free
6131 		 * code will stop. But since we used the socketbuf lock and
6132 		 * the sender uses the tcb_lock to increment, we need to use
6133 		 * the atomic add to the refcnt.
6134 		 */
6135 		if (stcb == NULL) {
6136 #ifdef INVARIANTS
6137 			panic("stcb for refcnt has gone NULL?");
6138 			goto stage_left;
6139 #else
6140 			goto stage_left;
6141 #endif
6142 		}
6143 		/* Save the value back for next time */
6144 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6145 		atomic_add_int(&stcb->asoc.refcnt, -1);
6146 	}
6147 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6148 		if (stcb) {
6149 			sctp_misc_ints(SCTP_SORECV_DONE,
6150 			    freed_so_far,
6151 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6152 			    stcb->asoc.my_rwnd,
6153 			    so->so_rcv.sb_cc);
6154 		} else {
6155 			sctp_misc_ints(SCTP_SORECV_DONE,
6156 			    freed_so_far,
6157 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6158 			    0,
6159 			    so->so_rcv.sb_cc);
6160 		}
6161 	}
6162 stage_left:
6163 	if (wakeup_read_socket) {
6164 		sctp_sorwakeup(inp, so);
6165 	}
6166 	return (error);
6167 }
6168 
6169 
6170 #ifdef SCTP_MBUF_LOGGING
6171 struct mbuf *
6172 sctp_m_free(struct mbuf *m)
6173 {
6174 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6175 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6176 	}
6177 	return (m_free(m));
6178 }
6179 
6180 void
6181 sctp_m_freem(struct mbuf *mb)
6182 {
6183 	while (mb != NULL)
6184 		mb = sctp_m_free(mb);
6185 }
6186 
6187 #endif
6188 
6189 int
6190 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6191 {
6192 	/*
6193 	 * Given a local address. For all associations that holds the
6194 	 * address, request a peer-set-primary.
6195 	 */
6196 	struct sctp_ifa *ifa;
6197 	struct sctp_laddr *wi;
6198 
6199 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6200 	if (ifa == NULL) {
6201 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6202 		return (EADDRNOTAVAIL);
6203 	}
6204 	/*
6205 	 * Now that we have the ifa we must awaken the iterator with this
6206 	 * message.
6207 	 */
6208 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6209 	if (wi == NULL) {
6210 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6211 		return (ENOMEM);
6212 	}
6213 	/* Now incr the count and int wi structure */
6214 	SCTP_INCR_LADDR_COUNT();
6215 	memset(wi, 0, sizeof(*wi));
6216 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6217 	wi->ifa = ifa;
6218 	wi->action = SCTP_SET_PRIM_ADDR;
6219 	atomic_add_int(&ifa->refcount, 1);
6220 
6221 	/* Now add it to the work queue */
6222 	SCTP_WQ_ADDR_LOCK();
6223 	/*
6224 	 * Should this really be a tailq? As it is we will process the
6225 	 * newest first :-0
6226 	 */
6227 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6228 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6229 	    (struct sctp_inpcb *)NULL,
6230 	    (struct sctp_tcb *)NULL,
6231 	    (struct sctp_nets *)NULL);
6232 	SCTP_WQ_ADDR_UNLOCK();
6233 	return (0);
6234 }
6235 
6236 
6237 int
6238 sctp_soreceive(struct socket *so,
6239     struct sockaddr **psa,
6240     struct uio *uio,
6241     struct mbuf **mp0,
6242     struct mbuf **controlp,
6243     int *flagsp)
6244 {
6245 	int error, fromlen;
6246 	uint8_t sockbuf[256];
6247 	struct sockaddr *from;
6248 	struct sctp_extrcvinfo sinfo;
6249 	int filling_sinfo = 1;
6250 	int flags;
6251 	struct sctp_inpcb *inp;
6252 
6253 	inp = (struct sctp_inpcb *)so->so_pcb;
6254 	/* pickup the assoc we are reading from */
6255 	if (inp == NULL) {
6256 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6257 		return (EINVAL);
6258 	}
6259 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6260 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6261 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6262 	    (controlp == NULL)) {
6263 		/* user does not want the sndrcv ctl */
6264 		filling_sinfo = 0;
6265 	}
6266 	if (psa) {
6267 		from = (struct sockaddr *)sockbuf;
6268 		fromlen = sizeof(sockbuf);
6269 		from->sa_len = 0;
6270 	} else {
6271 		from = NULL;
6272 		fromlen = 0;
6273 	}
6274 
6275 	if (filling_sinfo) {
6276 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6277 	}
6278 	if (flagsp != NULL) {
6279 		flags = *flagsp;
6280 	} else {
6281 		flags = 0;
6282 	}
6283 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags,
6284 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6285 	if (flagsp != NULL) {
6286 		*flagsp = flags;
6287 	}
6288 	if (controlp != NULL) {
6289 		/* copy back the sinfo in a CMSG format */
6290 		if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) {
6291 			*controlp = sctp_build_ctl_nchunk(inp,
6292 			    (struct sctp_sndrcvinfo *)&sinfo);
6293 		} else {
6294 			*controlp = NULL;
6295 		}
6296 	}
6297 	if (psa) {
6298 		/* copy back the address info */
6299 		if (from && from->sa_len) {
6300 			*psa = sodupsockaddr(from, M_NOWAIT);
6301 		} else {
6302 			*psa = NULL;
6303 		}
6304 	}
6305 	return (error);
6306 }
6307 
6308 
6309 
6310 
6311 
6312 int
6313 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6314     int totaddr, int *error)
6315 {
6316 	int added = 0;
6317 	int i;
6318 	struct sctp_inpcb *inp;
6319 	struct sockaddr *sa;
6320 	size_t incr = 0;
6321 #ifdef INET
6322 	struct sockaddr_in *sin;
6323 #endif
6324 #ifdef INET6
6325 	struct sockaddr_in6 *sin6;
6326 #endif
6327 
6328 	sa = addr;
6329 	inp = stcb->sctp_ep;
6330 	*error = 0;
6331 	for (i = 0; i < totaddr; i++) {
6332 		switch (sa->sa_family) {
6333 #ifdef INET
6334 		case AF_INET:
6335 			incr = sizeof(struct sockaddr_in);
6336 			sin = (struct sockaddr_in *)sa;
6337 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6338 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6339 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6340 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6341 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6342 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6343 				*error = EINVAL;
6344 				goto out_now;
6345 			}
6346 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6347 			    SCTP_DONOT_SETSCOPE,
6348 			    SCTP_ADDR_IS_CONFIRMED)) {
6349 				/* assoc gone no un-lock */
6350 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6351 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6352 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6353 				*error = ENOBUFS;
6354 				goto out_now;
6355 			}
6356 			added++;
6357 			break;
6358 #endif
6359 #ifdef INET6
6360 		case AF_INET6:
6361 			incr = sizeof(struct sockaddr_in6);
6362 			sin6 = (struct sockaddr_in6 *)sa;
6363 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6364 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6365 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6366 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6367 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6368 				*error = EINVAL;
6369 				goto out_now;
6370 			}
6371 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6372 			    SCTP_DONOT_SETSCOPE,
6373 			    SCTP_ADDR_IS_CONFIRMED)) {
6374 				/* assoc gone no un-lock */
6375 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6376 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6377 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6378 				*error = ENOBUFS;
6379 				goto out_now;
6380 			}
6381 			added++;
6382 			break;
6383 #endif
6384 		default:
6385 			break;
6386 		}
6387 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6388 	}
6389 out_now:
6390 	return (added);
6391 }
6392 
6393 struct sctp_tcb *
6394 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6395     unsigned int *totaddr,
6396     unsigned int *num_v4, unsigned int *num_v6, int *error,
6397     unsigned int limit, int *bad_addr)
6398 {
6399 	struct sockaddr *sa;
6400 	struct sctp_tcb *stcb = NULL;
6401 	unsigned int incr, at, i;
6402 
6403 	at = 0;
6404 	sa = addr;
6405 	*error = *num_v6 = *num_v4 = 0;
6406 	/* account and validate addresses */
6407 	for (i = 0; i < *totaddr; i++) {
6408 		switch (sa->sa_family) {
6409 #ifdef INET
6410 		case AF_INET:
6411 			incr = (unsigned int)sizeof(struct sockaddr_in);
6412 			if (sa->sa_len != incr) {
6413 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6414 				*error = EINVAL;
6415 				*bad_addr = 1;
6416 				return (NULL);
6417 			}
6418 			(*num_v4) += 1;
6419 			break;
6420 #endif
6421 #ifdef INET6
6422 		case AF_INET6:
6423 			{
6424 				struct sockaddr_in6 *sin6;
6425 
6426 				sin6 = (struct sockaddr_in6 *)sa;
6427 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6428 					/* Must be non-mapped for connectx */
6429 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6430 					*error = EINVAL;
6431 					*bad_addr = 1;
6432 					return (NULL);
6433 				}
6434 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6435 				if (sa->sa_len != incr) {
6436 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6437 					*error = EINVAL;
6438 					*bad_addr = 1;
6439 					return (NULL);
6440 				}
6441 				(*num_v6) += 1;
6442 				break;
6443 			}
6444 #endif
6445 		default:
6446 			*totaddr = i;
6447 			incr = 0;
6448 			/* we are done */
6449 			break;
6450 		}
6451 		if (i == *totaddr) {
6452 			break;
6453 		}
6454 		SCTP_INP_INCR_REF(inp);
6455 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6456 		if (stcb != NULL) {
6457 			/* Already have or am bring up an association */
6458 			return (stcb);
6459 		} else {
6460 			SCTP_INP_DECR_REF(inp);
6461 		}
6462 		if ((at + incr) > limit) {
6463 			*totaddr = i;
6464 			break;
6465 		}
6466 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6467 	}
6468 	return ((struct sctp_tcb *)NULL);
6469 }
6470 
6471 /*
6472  * sctp_bindx(ADD) for one address.
6473  * assumes all arguments are valid/checked by caller.
6474  */
6475 void
6476 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6477     struct sockaddr *sa, sctp_assoc_t assoc_id,
6478     uint32_t vrf_id, int *error, void *p)
6479 {
6480 	struct sockaddr *addr_touse;
6481 #if defined(INET) && defined(INET6)
6482 	struct sockaddr_in sin;
6483 #endif
6484 
6485 	/* see if we're bound all already! */
6486 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6487 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6488 		*error = EINVAL;
6489 		return;
6490 	}
6491 	addr_touse = sa;
6492 #ifdef INET6
6493 	if (sa->sa_family == AF_INET6) {
6494 #ifdef INET
6495 		struct sockaddr_in6 *sin6;
6496 
6497 #endif
6498 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6499 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6500 			*error = EINVAL;
6501 			return;
6502 		}
6503 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6504 			/* can only bind v6 on PF_INET6 sockets */
6505 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6506 			*error = EINVAL;
6507 			return;
6508 		}
6509 #ifdef INET
6510 		sin6 = (struct sockaddr_in6 *)addr_touse;
6511 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6512 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6513 			    SCTP_IPV6_V6ONLY(inp)) {
6514 				/* can't bind v4-mapped on PF_INET sockets */
6515 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6516 				*error = EINVAL;
6517 				return;
6518 			}
6519 			in6_sin6_2_sin(&sin, sin6);
6520 			addr_touse = (struct sockaddr *)&sin;
6521 		}
6522 #endif
6523 	}
6524 #endif
6525 #ifdef INET
6526 	if (sa->sa_family == AF_INET) {
6527 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6528 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6529 			*error = EINVAL;
6530 			return;
6531 		}
6532 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6533 		    SCTP_IPV6_V6ONLY(inp)) {
6534 			/* can't bind v4 on PF_INET sockets */
6535 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6536 			*error = EINVAL;
6537 			return;
6538 		}
6539 	}
6540 #endif
6541 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6542 		if (p == NULL) {
6543 			/* Can't get proc for Net/Open BSD */
6544 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6545 			*error = EINVAL;
6546 			return;
6547 		}
6548 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6549 		return;
6550 	}
6551 	/*
6552 	 * No locks required here since bind and mgmt_ep_sa all do their own
6553 	 * locking. If we do something for the FIX: below we may need to
6554 	 * lock in that case.
6555 	 */
6556 	if (assoc_id == 0) {
6557 		/* add the address */
6558 		struct sctp_inpcb *lep;
6559 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6560 
6561 		/* validate the incoming port */
6562 		if ((lsin->sin_port != 0) &&
6563 		    (lsin->sin_port != inp->sctp_lport)) {
6564 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6565 			*error = EINVAL;
6566 			return;
6567 		} else {
6568 			/* user specified 0 port, set it to existing port */
6569 			lsin->sin_port = inp->sctp_lport;
6570 		}
6571 
6572 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6573 		if (lep != NULL) {
6574 			/*
6575 			 * We must decrement the refcount since we have the
6576 			 * ep already and are binding. No remove going on
6577 			 * here.
6578 			 */
6579 			SCTP_INP_DECR_REF(lep);
6580 		}
6581 		if (lep == inp) {
6582 			/* already bound to it.. ok */
6583 			return;
6584 		} else if (lep == NULL) {
6585 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6586 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6587 			    SCTP_ADD_IP_ADDRESS,
6588 			    vrf_id, NULL);
6589 		} else {
6590 			*error = EADDRINUSE;
6591 		}
6592 		if (*error)
6593 			return;
6594 	} else {
6595 		/*
6596 		 * FIX: decide whether we allow assoc based bindx
6597 		 */
6598 	}
6599 }
6600 
6601 /*
6602  * sctp_bindx(DELETE) for one address.
6603  * assumes all arguments are valid/checked by caller.
6604  */
6605 void
6606 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6607     struct sockaddr *sa, sctp_assoc_t assoc_id,
6608     uint32_t vrf_id, int *error)
6609 {
6610 	struct sockaddr *addr_touse;
6611 #if defined(INET) && defined(INET6)
6612 	struct sockaddr_in sin;
6613 #endif
6614 
6615 	/* see if we're bound all already! */
6616 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6617 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6618 		*error = EINVAL;
6619 		return;
6620 	}
6621 	addr_touse = sa;
6622 #ifdef INET6
6623 	if (sa->sa_family == AF_INET6) {
6624 #ifdef INET
6625 		struct sockaddr_in6 *sin6;
6626 #endif
6627 
6628 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6629 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6630 			*error = EINVAL;
6631 			return;
6632 		}
6633 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6634 			/* can only bind v6 on PF_INET6 sockets */
6635 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6636 			*error = EINVAL;
6637 			return;
6638 		}
6639 #ifdef INET
6640 		sin6 = (struct sockaddr_in6 *)addr_touse;
6641 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6642 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6643 			    SCTP_IPV6_V6ONLY(inp)) {
6644 				/* can't bind mapped-v4 on PF_INET sockets */
6645 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6646 				*error = EINVAL;
6647 				return;
6648 			}
6649 			in6_sin6_2_sin(&sin, sin6);
6650 			addr_touse = (struct sockaddr *)&sin;
6651 		}
6652 #endif
6653 	}
6654 #endif
6655 #ifdef INET
6656 	if (sa->sa_family == AF_INET) {
6657 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6658 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6659 			*error = EINVAL;
6660 			return;
6661 		}
6662 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6663 		    SCTP_IPV6_V6ONLY(inp)) {
6664 			/* can't bind v4 on PF_INET sockets */
6665 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6666 			*error = EINVAL;
6667 			return;
6668 		}
6669 	}
6670 #endif
6671 	/*
6672 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6673 	 * below is ever changed we may need to lock before calling
6674 	 * association level binding.
6675 	 */
6676 	if (assoc_id == 0) {
6677 		/* delete the address */
6678 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6679 		    SCTP_DEL_IP_ADDRESS,
6680 		    vrf_id, NULL);
6681 	} else {
6682 		/*
6683 		 * FIX: decide whether we allow assoc based bindx
6684 		 */
6685 	}
6686 }
6687 
6688 /*
6689  * returns the valid local address count for an assoc, taking into account
6690  * all scoping rules
6691  */
6692 int
6693 sctp_local_addr_count(struct sctp_tcb *stcb)
6694 {
6695 	int loopback_scope;
6696 #if defined(INET)
6697 	int ipv4_local_scope, ipv4_addr_legal;
6698 #endif
6699 #if defined (INET6)
6700 	int local_scope, site_scope, ipv6_addr_legal;
6701 #endif
6702 	struct sctp_vrf *vrf;
6703 	struct sctp_ifn *sctp_ifn;
6704 	struct sctp_ifa *sctp_ifa;
6705 	int count = 0;
6706 
6707 	/* Turn on all the appropriate scopes */
6708 	loopback_scope = stcb->asoc.scope.loopback_scope;
6709 #if defined(INET)
6710 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6711 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6712 #endif
6713 #if defined(INET6)
6714 	local_scope = stcb->asoc.scope.local_scope;
6715 	site_scope = stcb->asoc.scope.site_scope;
6716 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6717 #endif
6718 	SCTP_IPI_ADDR_RLOCK();
6719 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6720 	if (vrf == NULL) {
6721 		/* no vrf, no addresses */
6722 		SCTP_IPI_ADDR_RUNLOCK();
6723 		return (0);
6724 	}
6725 
6726 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6727 		/*
6728 		 * bound all case: go through all ifns on the vrf
6729 		 */
6730 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6731 			if ((loopback_scope == 0) &&
6732 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6733 				continue;
6734 			}
6735 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6736 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6737 					continue;
6738 				switch (sctp_ifa->address.sa.sa_family) {
6739 #ifdef INET
6740 				case AF_INET:
6741 					if (ipv4_addr_legal) {
6742 						struct sockaddr_in *sin;
6743 
6744 						sin = &sctp_ifa->address.sin;
6745 						if (sin->sin_addr.s_addr == 0) {
6746 							/*
6747 							 * skip unspecified
6748 							 * addrs
6749 							 */
6750 							continue;
6751 						}
6752 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6753 						    &sin->sin_addr) != 0) {
6754 							continue;
6755 						}
6756 						if ((ipv4_local_scope == 0) &&
6757 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6758 							continue;
6759 						}
6760 						/* count this one */
6761 						count++;
6762 					} else {
6763 						continue;
6764 					}
6765 					break;
6766 #endif
6767 #ifdef INET6
6768 				case AF_INET6:
6769 					if (ipv6_addr_legal) {
6770 						struct sockaddr_in6 *sin6;
6771 
6772 						sin6 = &sctp_ifa->address.sin6;
6773 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6774 							continue;
6775 						}
6776 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6777 						    &sin6->sin6_addr) != 0) {
6778 							continue;
6779 						}
6780 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6781 							if (local_scope == 0)
6782 								continue;
6783 							if (sin6->sin6_scope_id == 0) {
6784 								if (sa6_recoverscope(sin6) != 0)
6785 									/*
6786 									 *
6787 									 * bad
6788 									 * link
6789 									 *
6790 									 * local
6791 									 *
6792 									 * address
6793 									 */
6794 									continue;
6795 							}
6796 						}
6797 						if ((site_scope == 0) &&
6798 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6799 							continue;
6800 						}
6801 						/* count this one */
6802 						count++;
6803 					}
6804 					break;
6805 #endif
6806 				default:
6807 					/* TSNH */
6808 					break;
6809 				}
6810 			}
6811 		}
6812 	} else {
6813 		/*
6814 		 * subset bound case
6815 		 */
6816 		struct sctp_laddr *laddr;
6817 
6818 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6819 		    sctp_nxt_addr) {
6820 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6821 				continue;
6822 			}
6823 			/* count this one */
6824 			count++;
6825 		}
6826 	}
6827 	SCTP_IPI_ADDR_RUNLOCK();
6828 	return (count);
6829 }
6830 
6831 #if defined(SCTP_LOCAL_TRACE_BUF)
6832 
6833 void
6834 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6835 {
6836 	uint32_t saveindex, newindex;
6837 
6838 	do {
6839 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6840 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6841 			newindex = 1;
6842 		} else {
6843 			newindex = saveindex + 1;
6844 		}
6845 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6846 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6847 		saveindex = 0;
6848 	}
6849 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6850 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6851 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6852 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6853 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6854 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6855 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6856 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6857 }
6858 
6859 #endif
6860 static void
6861 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6862     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6863 {
6864 	struct ip *iph;
6865 #ifdef INET6
6866 	struct ip6_hdr *ip6;
6867 #endif
6868 	struct mbuf *sp, *last;
6869 	struct udphdr *uhdr;
6870 	uint16_t port;
6871 
6872 	if ((m->m_flags & M_PKTHDR) == 0) {
6873 		/* Can't handle one that is not a pkt hdr */
6874 		goto out;
6875 	}
6876 	/* Pull the src port */
6877 	iph = mtod(m, struct ip *);
6878 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6879 	port = uhdr->uh_sport;
6880 	/*
6881 	 * Split out the mbuf chain. Leave the IP header in m, place the
6882 	 * rest in the sp.
6883 	 */
6884 	sp = m_split(m, off, M_NOWAIT);
6885 	if (sp == NULL) {
6886 		/* Gak, drop packet, we can't do a split */
6887 		goto out;
6888 	}
6889 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6890 		/* Gak, packet can't have an SCTP header in it - too small */
6891 		m_freem(sp);
6892 		goto out;
6893 	}
6894 	/* Now pull up the UDP header and SCTP header together */
6895 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6896 	if (sp == NULL) {
6897 		/* Gak pullup failed */
6898 		goto out;
6899 	}
6900 	/* Trim out the UDP header */
6901 	m_adj(sp, sizeof(struct udphdr));
6902 
6903 	/* Now reconstruct the mbuf chain */
6904 	for (last = m; last->m_next; last = last->m_next);
6905 	last->m_next = sp;
6906 	m->m_pkthdr.len += sp->m_pkthdr.len;
6907 	/*
6908 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6909 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6910 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6911 	 * SCTP checksum. Therefore, clear the bit.
6912 	 */
6913 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6914 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6915 	    m->m_pkthdr.len,
6916 	    if_name(m->m_pkthdr.rcvif),
6917 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6918 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6919 	iph = mtod(m, struct ip *);
6920 	switch (iph->ip_v) {
6921 #ifdef INET
6922 	case IPVERSION:
6923 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6924 		sctp_input_with_port(m, off, port);
6925 		break;
6926 #endif
6927 #ifdef INET6
6928 	case IPV6_VERSION >> 4:
6929 		ip6 = mtod(m, struct ip6_hdr *);
6930 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6931 		sctp6_input_with_port(&m, &off, port);
6932 		break;
6933 #endif
6934 	default:
6935 		goto out;
6936 		break;
6937 	}
6938 	return;
6939 out:
6940 	m_freem(m);
6941 }
6942 
6943 #ifdef INET
6944 static void
6945 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6946 {
6947 	struct ip *outer_ip, *inner_ip;
6948 	struct sctphdr *sh;
6949 	struct icmp *icmp;
6950 	struct udphdr *udp;
6951 	struct sctp_inpcb *inp;
6952 	struct sctp_tcb *stcb;
6953 	struct sctp_nets *net;
6954 	struct sctp_init_chunk *ch;
6955 	struct sockaddr_in src, dst;
6956 	uint8_t type, code;
6957 
6958 	inner_ip = (struct ip *)vip;
6959 	icmp = (struct icmp *)((caddr_t)inner_ip -
6960 	    (sizeof(struct icmp) - sizeof(struct ip)));
6961 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6962 	if (ntohs(outer_ip->ip_len) <
6963 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6964 		return;
6965 	}
6966 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6967 	sh = (struct sctphdr *)(udp + 1);
6968 	memset(&src, 0, sizeof(struct sockaddr_in));
6969 	src.sin_family = AF_INET;
6970 	src.sin_len = sizeof(struct sockaddr_in);
6971 	src.sin_port = sh->src_port;
6972 	src.sin_addr = inner_ip->ip_src;
6973 	memset(&dst, 0, sizeof(struct sockaddr_in));
6974 	dst.sin_family = AF_INET;
6975 	dst.sin_len = sizeof(struct sockaddr_in);
6976 	dst.sin_port = sh->dest_port;
6977 	dst.sin_addr = inner_ip->ip_dst;
6978 	/*
6979 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6980 	 * holds our local endpoint address. Thus we reverse the dst and the
6981 	 * src in the lookup.
6982 	 */
6983 	inp = NULL;
6984 	net = NULL;
6985 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6986 	    (struct sockaddr *)&src,
6987 	    &inp, &net, 1,
6988 	    SCTP_DEFAULT_VRFID);
6989 	if ((stcb != NULL) &&
6990 	    (net != NULL) &&
6991 	    (inp != NULL)) {
6992 		/* Check the UDP port numbers */
6993 		if ((udp->uh_dport != net->port) ||
6994 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6995 			SCTP_TCB_UNLOCK(stcb);
6996 			return;
6997 		}
6998 		/* Check the verification tag */
6999 		if (ntohl(sh->v_tag) != 0) {
7000 			/*
7001 			 * This must be the verification tag used for
7002 			 * sending out packets. We don't consider packets
7003 			 * reflecting the verification tag.
7004 			 */
7005 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
7006 				SCTP_TCB_UNLOCK(stcb);
7007 				return;
7008 			}
7009 		} else {
7010 			if (ntohs(outer_ip->ip_len) >=
7011 			    sizeof(struct ip) +
7012 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
7013 				/*
7014 				 * In this case we can check if we got an
7015 				 * INIT chunk and if the initiate tag
7016 				 * matches.
7017 				 */
7018 				ch = (struct sctp_init_chunk *)(sh + 1);
7019 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
7020 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
7021 					SCTP_TCB_UNLOCK(stcb);
7022 					return;
7023 				}
7024 			} else {
7025 				SCTP_TCB_UNLOCK(stcb);
7026 				return;
7027 			}
7028 		}
7029 		type = icmp->icmp_type;
7030 		code = icmp->icmp_code;
7031 		if ((type == ICMP_UNREACH) &&
7032 		    (code == ICMP_UNREACH_PORT)) {
7033 			code = ICMP_UNREACH_PROTOCOL;
7034 		}
7035 		sctp_notify(inp, stcb, net, type, code,
7036 		    ntohs(inner_ip->ip_len),
7037 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
7038 	} else {
7039 		if ((stcb == NULL) && (inp != NULL)) {
7040 			/* reduce ref-count */
7041 			SCTP_INP_WLOCK(inp);
7042 			SCTP_INP_DECR_REF(inp);
7043 			SCTP_INP_WUNLOCK(inp);
7044 		}
7045 		if (stcb) {
7046 			SCTP_TCB_UNLOCK(stcb);
7047 		}
7048 	}
7049 	return;
7050 }
7051 #endif
7052 
7053 #ifdef INET6
7054 static void
7055 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
7056 {
7057 	struct ip6ctlparam *ip6cp;
7058 	struct sctp_inpcb *inp;
7059 	struct sctp_tcb *stcb;
7060 	struct sctp_nets *net;
7061 	struct sctphdr sh;
7062 	struct udphdr udp;
7063 	struct sockaddr_in6 src, dst;
7064 	uint8_t type, code;
7065 
7066 	ip6cp = (struct ip6ctlparam *)d;
7067 	/*
7068 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
7069 	 */
7070 	if (ip6cp->ip6c_m == NULL) {
7071 		return;
7072 	}
7073 	/*
7074 	 * Check if we can safely examine the ports and the verification tag
7075 	 * of the SCTP common header.
7076 	 */
7077 	if (ip6cp->ip6c_m->m_pkthdr.len <
7078 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7079 		return;
7080 	}
7081 	/* Copy out the UDP header. */
7082 	memset(&udp, 0, sizeof(struct udphdr));
7083 	m_copydata(ip6cp->ip6c_m,
7084 	    ip6cp->ip6c_off,
7085 	    sizeof(struct udphdr),
7086 	    (caddr_t)&udp);
7087 	/* Copy out the port numbers and the verification tag. */
7088 	memset(&sh, 0, sizeof(struct sctphdr));
7089 	m_copydata(ip6cp->ip6c_m,
7090 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7091 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7092 	    (caddr_t)&sh);
7093 	memset(&src, 0, sizeof(struct sockaddr_in6));
7094 	src.sin6_family = AF_INET6;
7095 	src.sin6_len = sizeof(struct sockaddr_in6);
7096 	src.sin6_port = sh.src_port;
7097 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7098 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7099 		return;
7100 	}
7101 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7102 	dst.sin6_family = AF_INET6;
7103 	dst.sin6_len = sizeof(struct sockaddr_in6);
7104 	dst.sin6_port = sh.dest_port;
7105 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7106 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7107 		return;
7108 	}
7109 	inp = NULL;
7110 	net = NULL;
7111 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7112 	    (struct sockaddr *)&src,
7113 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7114 	if ((stcb != NULL) &&
7115 	    (net != NULL) &&
7116 	    (inp != NULL)) {
7117 		/* Check the UDP port numbers */
7118 		if ((udp.uh_dport != net->port) ||
7119 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7120 			SCTP_TCB_UNLOCK(stcb);
7121 			return;
7122 		}
7123 		/* Check the verification tag */
7124 		if (ntohl(sh.v_tag) != 0) {
7125 			/*
7126 			 * This must be the verification tag used for
7127 			 * sending out packets. We don't consider packets
7128 			 * reflecting the verification tag.
7129 			 */
7130 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7131 				SCTP_TCB_UNLOCK(stcb);
7132 				return;
7133 			}
7134 		} else {
7135 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7136 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7137 			    sizeof(struct sctphdr) +
7138 			    sizeof(struct sctp_chunkhdr) +
7139 			    offsetof(struct sctp_init, a_rwnd)) {
7140 				/*
7141 				 * In this case we can check if we got an
7142 				 * INIT chunk and if the initiate tag
7143 				 * matches.
7144 				 */
7145 				uint32_t initiate_tag;
7146 				uint8_t chunk_type;
7147 
7148 				m_copydata(ip6cp->ip6c_m,
7149 				    ip6cp->ip6c_off +
7150 				    sizeof(struct udphdr) +
7151 				    sizeof(struct sctphdr),
7152 				    sizeof(uint8_t),
7153 				    (caddr_t)&chunk_type);
7154 				m_copydata(ip6cp->ip6c_m,
7155 				    ip6cp->ip6c_off +
7156 				    sizeof(struct udphdr) +
7157 				    sizeof(struct sctphdr) +
7158 				    sizeof(struct sctp_chunkhdr),
7159 				    sizeof(uint32_t),
7160 				    (caddr_t)&initiate_tag);
7161 				if ((chunk_type != SCTP_INITIATION) ||
7162 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7163 					SCTP_TCB_UNLOCK(stcb);
7164 					return;
7165 				}
7166 			} else {
7167 				SCTP_TCB_UNLOCK(stcb);
7168 				return;
7169 			}
7170 		}
7171 		type = ip6cp->ip6c_icmp6->icmp6_type;
7172 		code = ip6cp->ip6c_icmp6->icmp6_code;
7173 		if ((type == ICMP6_DST_UNREACH) &&
7174 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7175 			type = ICMP6_PARAM_PROB;
7176 			code = ICMP6_PARAMPROB_NEXTHEADER;
7177 		}
7178 		sctp6_notify(inp, stcb, net, type, code,
7179 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7180 	} else {
7181 		if ((stcb == NULL) && (inp != NULL)) {
7182 			/* reduce inp's ref-count */
7183 			SCTP_INP_WLOCK(inp);
7184 			SCTP_INP_DECR_REF(inp);
7185 			SCTP_INP_WUNLOCK(inp);
7186 		}
7187 		if (stcb) {
7188 			SCTP_TCB_UNLOCK(stcb);
7189 		}
7190 	}
7191 }
7192 #endif
7193 
7194 void
7195 sctp_over_udp_stop(void)
7196 {
7197 	/*
7198 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7199 	 * for writting!
7200 	 */
7201 #ifdef INET
7202 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7203 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7204 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7205 	}
7206 #endif
7207 #ifdef INET6
7208 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7209 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7210 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7211 	}
7212 #endif
7213 }
7214 
7215 int
7216 sctp_over_udp_start(void)
7217 {
7218 	uint16_t port;
7219 	int ret;
7220 #ifdef INET
7221 	struct sockaddr_in sin;
7222 #endif
7223 #ifdef INET6
7224 	struct sockaddr_in6 sin6;
7225 #endif
7226 	/*
7227 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7228 	 * for writting!
7229 	 */
7230 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7231 	if (ntohs(port) == 0) {
7232 		/* Must have a port set */
7233 		return (EINVAL);
7234 	}
7235 #ifdef INET
7236 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7237 		/* Already running -- must stop first */
7238 		return (EALREADY);
7239 	}
7240 #endif
7241 #ifdef INET6
7242 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7243 		/* Already running -- must stop first */
7244 		return (EALREADY);
7245 	}
7246 #endif
7247 #ifdef INET
7248 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7249 	    SOCK_DGRAM, IPPROTO_UDP,
7250 	    curthread->td_ucred, curthread))) {
7251 		sctp_over_udp_stop();
7252 		return (ret);
7253 	}
7254 	/* Call the special UDP hook. */
7255 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7256 	    sctp_recv_udp_tunneled_packet,
7257 	    sctp_recv_icmp_tunneled_packet,
7258 	    NULL))) {
7259 		sctp_over_udp_stop();
7260 		return (ret);
7261 	}
7262 	/* Ok, we have a socket, bind it to the port. */
7263 	memset(&sin, 0, sizeof(struct sockaddr_in));
7264 	sin.sin_len = sizeof(struct sockaddr_in);
7265 	sin.sin_family = AF_INET;
7266 	sin.sin_port = htons(port);
7267 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7268 	    (struct sockaddr *)&sin, curthread))) {
7269 		sctp_over_udp_stop();
7270 		return (ret);
7271 	}
7272 #endif
7273 #ifdef INET6
7274 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7275 	    SOCK_DGRAM, IPPROTO_UDP,
7276 	    curthread->td_ucred, curthread))) {
7277 		sctp_over_udp_stop();
7278 		return (ret);
7279 	}
7280 	/* Call the special UDP hook. */
7281 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7282 	    sctp_recv_udp_tunneled_packet,
7283 	    sctp_recv_icmp6_tunneled_packet,
7284 	    NULL))) {
7285 		sctp_over_udp_stop();
7286 		return (ret);
7287 	}
7288 	/* Ok, we have a socket, bind it to the port. */
7289 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7290 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7291 	sin6.sin6_family = AF_INET6;
7292 	sin6.sin6_port = htons(port);
7293 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7294 	    (struct sockaddr *)&sin6, curthread))) {
7295 		sctp_over_udp_stop();
7296 		return (ret);
7297 	}
7298 #endif
7299 	return (0);
7300 }
7301 
7302 /*
7303  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7304  * If all arguments are zero, zero is returned.
7305  */
7306 uint32_t
7307 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7308 {
7309 	if (mtu1 > 0) {
7310 		if (mtu2 > 0) {
7311 			if (mtu3 > 0) {
7312 				return (min(mtu1, min(mtu2, mtu3)));
7313 			} else {
7314 				return (min(mtu1, mtu2));
7315 			}
7316 		} else {
7317 			if (mtu3 > 0) {
7318 				return (min(mtu1, mtu3));
7319 			} else {
7320 				return (mtu1);
7321 			}
7322 		}
7323 	} else {
7324 		if (mtu2 > 0) {
7325 			if (mtu3 > 0) {
7326 				return (min(mtu2, mtu3));
7327 			} else {
7328 				return (mtu2);
7329 			}
7330 		} else {
7331 			return (mtu3);
7332 		}
7333 	}
7334 }
7335 
7336 void
7337 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7338 {
7339 	struct in_conninfo inc;
7340 
7341 	memset(&inc, 0, sizeof(struct in_conninfo));
7342 	inc.inc_fibnum = fibnum;
7343 	switch (addr->sa.sa_family) {
7344 #ifdef INET
7345 	case AF_INET:
7346 		inc.inc_faddr = addr->sin.sin_addr;
7347 		break;
7348 #endif
7349 #ifdef INET6
7350 	case AF_INET6:
7351 		inc.inc_flags |= INC_ISIPV6;
7352 		inc.inc6_faddr = addr->sin6.sin6_addr;
7353 		break;
7354 #endif
7355 	default:
7356 		return;
7357 	}
7358 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7359 }
7360 
7361 uint32_t
7362 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7363 {
7364 	struct in_conninfo inc;
7365 
7366 	memset(&inc, 0, sizeof(struct in_conninfo));
7367 	inc.inc_fibnum = fibnum;
7368 	switch (addr->sa.sa_family) {
7369 #ifdef INET
7370 	case AF_INET:
7371 		inc.inc_faddr = addr->sin.sin_addr;
7372 		break;
7373 #endif
7374 #ifdef INET6
7375 	case AF_INET6:
7376 		inc.inc_flags |= INC_ISIPV6;
7377 		inc.inc6_faddr = addr->sin6.sin6_addr;
7378 		break;
7379 #endif
7380 	default:
7381 		return (0);
7382 	}
7383 	return ((uint32_t)tcp_hc_getmtu(&inc));
7384 }
7385