xref: /freebsd/sys/netinet/sctputil.c (revision 5bf5ca772c6de2d53344a78cf461447cc322ccea)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #include <netinet6/sctp6_var.h>
45 #endif
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_output.h>
48 #include <netinet/sctp_uio.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_auth.h>
52 #include <netinet/sctp_asconf.h>
53 #include <netinet/sctp_bsd_addr.h>
54 #if defined(INET6) || defined(INET)
55 #include <netinet/tcp_var.h>
56 #endif
57 #include <netinet/udp.h>
58 #include <netinet/udp_var.h>
59 #include <sys/proc.h>
60 #ifdef INET6
61 #include <netinet/icmp6.h>
62 #endif
63 
64 
65 #ifndef KTR_SCTP
66 #define KTR_SCTP KTR_SUBSYS
67 #endif
68 
69 extern const struct sctp_cc_functions sctp_cc_functions[];
70 extern const struct sctp_ss_functions sctp_ss_functions[];
71 
72 void
73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
74 {
75 	struct sctp_cwnd_log sctp_clog;
76 
77 	sctp_clog.x.sb.stcb = stcb;
78 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
79 	if (stcb)
80 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
81 	else
82 		sctp_clog.x.sb.stcb_sbcc = 0;
83 	sctp_clog.x.sb.incr = incr;
84 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
85 	    SCTP_LOG_EVENT_SB,
86 	    from,
87 	    sctp_clog.x.misc.log1,
88 	    sctp_clog.x.misc.log2,
89 	    sctp_clog.x.misc.log3,
90 	    sctp_clog.x.misc.log4);
91 }
92 
93 void
94 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
95 {
96 	struct sctp_cwnd_log sctp_clog;
97 
98 	sctp_clog.x.close.inp = (void *)inp;
99 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
100 	if (stcb) {
101 		sctp_clog.x.close.stcb = (void *)stcb;
102 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
103 	} else {
104 		sctp_clog.x.close.stcb = 0;
105 		sctp_clog.x.close.state = 0;
106 	}
107 	sctp_clog.x.close.loc = loc;
108 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
109 	    SCTP_LOG_EVENT_CLOSE,
110 	    0,
111 	    sctp_clog.x.misc.log1,
112 	    sctp_clog.x.misc.log2,
113 	    sctp_clog.x.misc.log3,
114 	    sctp_clog.x.misc.log4);
115 }
116 
117 void
118 rto_logging(struct sctp_nets *net, int from)
119 {
120 	struct sctp_cwnd_log sctp_clog;
121 
122 	memset(&sctp_clog, 0, sizeof(sctp_clog));
123 	sctp_clog.x.rto.net = (void *)net;
124 	sctp_clog.x.rto.rtt = net->rtt / 1000;
125 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
126 	    SCTP_LOG_EVENT_RTT,
127 	    from,
128 	    sctp_clog.x.misc.log1,
129 	    sctp_clog.x.misc.log2,
130 	    sctp_clog.x.misc.log3,
131 	    sctp_clog.x.misc.log4);
132 }
133 
134 void
135 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
136 {
137 	struct sctp_cwnd_log sctp_clog;
138 
139 	sctp_clog.x.strlog.stcb = stcb;
140 	sctp_clog.x.strlog.n_tsn = tsn;
141 	sctp_clog.x.strlog.n_sseq = sseq;
142 	sctp_clog.x.strlog.e_tsn = 0;
143 	sctp_clog.x.strlog.e_sseq = 0;
144 	sctp_clog.x.strlog.strm = stream;
145 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
146 	    SCTP_LOG_EVENT_STRM,
147 	    from,
148 	    sctp_clog.x.misc.log1,
149 	    sctp_clog.x.misc.log2,
150 	    sctp_clog.x.misc.log3,
151 	    sctp_clog.x.misc.log4);
152 }
153 
154 void
155 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
156 {
157 	struct sctp_cwnd_log sctp_clog;
158 
159 	sctp_clog.x.nagle.stcb = (void *)stcb;
160 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
161 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
162 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
163 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
164 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
165 	    SCTP_LOG_EVENT_NAGLE,
166 	    action,
167 	    sctp_clog.x.misc.log1,
168 	    sctp_clog.x.misc.log2,
169 	    sctp_clog.x.misc.log3,
170 	    sctp_clog.x.misc.log4);
171 }
172 
173 void
174 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
175 {
176 	struct sctp_cwnd_log sctp_clog;
177 
178 	sctp_clog.x.sack.cumack = cumack;
179 	sctp_clog.x.sack.oldcumack = old_cumack;
180 	sctp_clog.x.sack.tsn = tsn;
181 	sctp_clog.x.sack.numGaps = gaps;
182 	sctp_clog.x.sack.numDups = dups;
183 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
184 	    SCTP_LOG_EVENT_SACK,
185 	    from,
186 	    sctp_clog.x.misc.log1,
187 	    sctp_clog.x.misc.log2,
188 	    sctp_clog.x.misc.log3,
189 	    sctp_clog.x.misc.log4);
190 }
191 
192 void
193 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
194 {
195 	struct sctp_cwnd_log sctp_clog;
196 
197 	memset(&sctp_clog, 0, sizeof(sctp_clog));
198 	sctp_clog.x.map.base = map;
199 	sctp_clog.x.map.cum = cum;
200 	sctp_clog.x.map.high = high;
201 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
202 	    SCTP_LOG_EVENT_MAP,
203 	    from,
204 	    sctp_clog.x.misc.log1,
205 	    sctp_clog.x.misc.log2,
206 	    sctp_clog.x.misc.log3,
207 	    sctp_clog.x.misc.log4);
208 }
209 
210 void
211 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
212 {
213 	struct sctp_cwnd_log sctp_clog;
214 
215 	memset(&sctp_clog, 0, sizeof(sctp_clog));
216 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
217 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
218 	sctp_clog.x.fr.tsn = tsn;
219 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
220 	    SCTP_LOG_EVENT_FR,
221 	    from,
222 	    sctp_clog.x.misc.log1,
223 	    sctp_clog.x.misc.log2,
224 	    sctp_clog.x.misc.log3,
225 	    sctp_clog.x.misc.log4);
226 }
227 
228 #ifdef SCTP_MBUF_LOGGING
229 void
230 sctp_log_mb(struct mbuf *m, int from)
231 {
232 	struct sctp_cwnd_log sctp_clog;
233 
234 	sctp_clog.x.mb.mp = m;
235 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
236 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
237 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
238 	if (SCTP_BUF_IS_EXTENDED(m)) {
239 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
240 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
241 	} else {
242 		sctp_clog.x.mb.ext = 0;
243 		sctp_clog.x.mb.refcnt = 0;
244 	}
245 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
246 	    SCTP_LOG_EVENT_MBUF,
247 	    from,
248 	    sctp_clog.x.misc.log1,
249 	    sctp_clog.x.misc.log2,
250 	    sctp_clog.x.misc.log3,
251 	    sctp_clog.x.misc.log4);
252 }
253 
254 void
255 sctp_log_mbc(struct mbuf *m, int from)
256 {
257 	struct mbuf *mat;
258 
259 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
260 		sctp_log_mb(mat, from);
261 	}
262 }
263 #endif
264 
265 void
266 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
267 {
268 	struct sctp_cwnd_log sctp_clog;
269 
270 	if (control == NULL) {
271 		SCTP_PRINTF("Gak log of NULL?\n");
272 		return;
273 	}
274 	sctp_clog.x.strlog.stcb = control->stcb;
275 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
276 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
277 	sctp_clog.x.strlog.strm = control->sinfo_stream;
278 	if (poschk != NULL) {
279 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
280 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
281 	} else {
282 		sctp_clog.x.strlog.e_tsn = 0;
283 		sctp_clog.x.strlog.e_sseq = 0;
284 	}
285 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
286 	    SCTP_LOG_EVENT_STRM,
287 	    from,
288 	    sctp_clog.x.misc.log1,
289 	    sctp_clog.x.misc.log2,
290 	    sctp_clog.x.misc.log3,
291 	    sctp_clog.x.misc.log4);
292 }
293 
294 void
295 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
296 {
297 	struct sctp_cwnd_log sctp_clog;
298 
299 	sctp_clog.x.cwnd.net = net;
300 	if (stcb->asoc.send_queue_cnt > 255)
301 		sctp_clog.x.cwnd.cnt_in_send = 255;
302 	else
303 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
304 	if (stcb->asoc.stream_queue_cnt > 255)
305 		sctp_clog.x.cwnd.cnt_in_str = 255;
306 	else
307 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
308 
309 	if (net) {
310 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
311 		sctp_clog.x.cwnd.inflight = net->flight_size;
312 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
313 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
314 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
315 	}
316 	if (SCTP_CWNDLOG_PRESEND == from) {
317 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
318 	}
319 	sctp_clog.x.cwnd.cwnd_augment = augment;
320 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
321 	    SCTP_LOG_EVENT_CWND,
322 	    from,
323 	    sctp_clog.x.misc.log1,
324 	    sctp_clog.x.misc.log2,
325 	    sctp_clog.x.misc.log3,
326 	    sctp_clog.x.misc.log4);
327 }
328 
329 void
330 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
331 {
332 	struct sctp_cwnd_log sctp_clog;
333 
334 	memset(&sctp_clog, 0, sizeof(sctp_clog));
335 	if (inp) {
336 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
337 
338 	} else {
339 		sctp_clog.x.lock.sock = (void *)NULL;
340 	}
341 	sctp_clog.x.lock.inp = (void *)inp;
342 	if (stcb) {
343 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
344 	} else {
345 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
346 	}
347 	if (inp) {
348 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
349 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
350 	} else {
351 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
352 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
353 	}
354 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
355 	if (inp && (inp->sctp_socket)) {
356 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
357 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
358 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
359 	} else {
360 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
361 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
362 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
363 	}
364 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
365 	    SCTP_LOG_LOCK_EVENT,
366 	    from,
367 	    sctp_clog.x.misc.log1,
368 	    sctp_clog.x.misc.log2,
369 	    sctp_clog.x.misc.log3,
370 	    sctp_clog.x.misc.log4);
371 }
372 
373 void
374 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
375 {
376 	struct sctp_cwnd_log sctp_clog;
377 
378 	memset(&sctp_clog, 0, sizeof(sctp_clog));
379 	sctp_clog.x.cwnd.net = net;
380 	sctp_clog.x.cwnd.cwnd_new_value = error;
381 	sctp_clog.x.cwnd.inflight = net->flight_size;
382 	sctp_clog.x.cwnd.cwnd_augment = burst;
383 	if (stcb->asoc.send_queue_cnt > 255)
384 		sctp_clog.x.cwnd.cnt_in_send = 255;
385 	else
386 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
387 	if (stcb->asoc.stream_queue_cnt > 255)
388 		sctp_clog.x.cwnd.cnt_in_str = 255;
389 	else
390 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
391 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
392 	    SCTP_LOG_EVENT_MAXBURST,
393 	    from,
394 	    sctp_clog.x.misc.log1,
395 	    sctp_clog.x.misc.log2,
396 	    sctp_clog.x.misc.log3,
397 	    sctp_clog.x.misc.log4);
398 }
399 
400 void
401 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
402 {
403 	struct sctp_cwnd_log sctp_clog;
404 
405 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
406 	sctp_clog.x.rwnd.send_size = snd_size;
407 	sctp_clog.x.rwnd.overhead = overhead;
408 	sctp_clog.x.rwnd.new_rwnd = 0;
409 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
410 	    SCTP_LOG_EVENT_RWND,
411 	    from,
412 	    sctp_clog.x.misc.log1,
413 	    sctp_clog.x.misc.log2,
414 	    sctp_clog.x.misc.log3,
415 	    sctp_clog.x.misc.log4);
416 }
417 
418 void
419 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
420 {
421 	struct sctp_cwnd_log sctp_clog;
422 
423 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
424 	sctp_clog.x.rwnd.send_size = flight_size;
425 	sctp_clog.x.rwnd.overhead = overhead;
426 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
427 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
428 	    SCTP_LOG_EVENT_RWND,
429 	    from,
430 	    sctp_clog.x.misc.log1,
431 	    sctp_clog.x.misc.log2,
432 	    sctp_clog.x.misc.log3,
433 	    sctp_clog.x.misc.log4);
434 }
435 
436 #ifdef SCTP_MBCNT_LOGGING
437 static void
438 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
439 {
440 	struct sctp_cwnd_log sctp_clog;
441 
442 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
443 	sctp_clog.x.mbcnt.size_change = book;
444 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
445 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
446 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
447 	    SCTP_LOG_EVENT_MBCNT,
448 	    from,
449 	    sctp_clog.x.misc.log1,
450 	    sctp_clog.x.misc.log2,
451 	    sctp_clog.x.misc.log3,
452 	    sctp_clog.x.misc.log4);
453 }
454 #endif
455 
456 void
457 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
458 {
459 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
460 	    SCTP_LOG_MISC_EVENT,
461 	    from,
462 	    a, b, c, d);
463 }
464 
465 void
466 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
467 {
468 	struct sctp_cwnd_log sctp_clog;
469 
470 	sctp_clog.x.wake.stcb = (void *)stcb;
471 	sctp_clog.x.wake.wake_cnt = wake_cnt;
472 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
473 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
474 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
475 
476 	if (stcb->asoc.stream_queue_cnt < 0xff)
477 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
478 	else
479 		sctp_clog.x.wake.stream_qcnt = 0xff;
480 
481 	if (stcb->asoc.chunks_on_out_queue < 0xff)
482 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
483 	else
484 		sctp_clog.x.wake.chunks_on_oque = 0xff;
485 
486 	sctp_clog.x.wake.sctpflags = 0;
487 	/* set in the defered mode stuff */
488 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
489 		sctp_clog.x.wake.sctpflags |= 1;
490 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
491 		sctp_clog.x.wake.sctpflags |= 2;
492 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
493 		sctp_clog.x.wake.sctpflags |= 4;
494 	/* what about the sb */
495 	if (stcb->sctp_socket) {
496 		struct socket *so = stcb->sctp_socket;
497 
498 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
499 	} else {
500 		sctp_clog.x.wake.sbflags = 0xff;
501 	}
502 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
503 	    SCTP_LOG_EVENT_WAKE,
504 	    from,
505 	    sctp_clog.x.misc.log1,
506 	    sctp_clog.x.misc.log2,
507 	    sctp_clog.x.misc.log3,
508 	    sctp_clog.x.misc.log4);
509 }
510 
511 void
512 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
513 {
514 	struct sctp_cwnd_log sctp_clog;
515 
516 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
517 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
518 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
519 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
520 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
521 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
522 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
523 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
524 	    SCTP_LOG_EVENT_BLOCK,
525 	    from,
526 	    sctp_clog.x.misc.log1,
527 	    sctp_clog.x.misc.log2,
528 	    sctp_clog.x.misc.log3,
529 	    sctp_clog.x.misc.log4);
530 }
531 
532 int
533 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
534 {
535 	/* May need to fix this if ktrdump does not work */
536 	return (0);
537 }
538 
539 #ifdef SCTP_AUDITING_ENABLED
540 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
541 static int sctp_audit_indx = 0;
542 
543 static
544 void
545 sctp_print_audit_report(void)
546 {
547 	int i;
548 	int cnt;
549 
550 	cnt = 0;
551 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
552 		if ((sctp_audit_data[i][0] == 0xe0) &&
553 		    (sctp_audit_data[i][1] == 0x01)) {
554 			cnt = 0;
555 			SCTP_PRINTF("\n");
556 		} else if (sctp_audit_data[i][0] == 0xf0) {
557 			cnt = 0;
558 			SCTP_PRINTF("\n");
559 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
560 		    (sctp_audit_data[i][1] == 0x01)) {
561 			SCTP_PRINTF("\n");
562 			cnt = 0;
563 		}
564 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
565 		    (uint32_t)sctp_audit_data[i][1]);
566 		cnt++;
567 		if ((cnt % 14) == 0)
568 			SCTP_PRINTF("\n");
569 	}
570 	for (i = 0; i < sctp_audit_indx; i++) {
571 		if ((sctp_audit_data[i][0] == 0xe0) &&
572 		    (sctp_audit_data[i][1] == 0x01)) {
573 			cnt = 0;
574 			SCTP_PRINTF("\n");
575 		} else if (sctp_audit_data[i][0] == 0xf0) {
576 			cnt = 0;
577 			SCTP_PRINTF("\n");
578 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
579 		    (sctp_audit_data[i][1] == 0x01)) {
580 			SCTP_PRINTF("\n");
581 			cnt = 0;
582 		}
583 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
584 		    (uint32_t)sctp_audit_data[i][1]);
585 		cnt++;
586 		if ((cnt % 14) == 0)
587 			SCTP_PRINTF("\n");
588 	}
589 	SCTP_PRINTF("\n");
590 }
591 
592 void
593 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
594     struct sctp_nets *net)
595 {
596 	int resend_cnt, tot_out, rep, tot_book_cnt;
597 	struct sctp_nets *lnet;
598 	struct sctp_tmit_chunk *chk;
599 
600 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
601 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
602 	sctp_audit_indx++;
603 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
604 		sctp_audit_indx = 0;
605 	}
606 	if (inp == NULL) {
607 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
608 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
609 		sctp_audit_indx++;
610 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
611 			sctp_audit_indx = 0;
612 		}
613 		return;
614 	}
615 	if (stcb == NULL) {
616 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
617 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
618 		sctp_audit_indx++;
619 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
620 			sctp_audit_indx = 0;
621 		}
622 		return;
623 	}
624 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
625 	sctp_audit_data[sctp_audit_indx][1] =
626 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
627 	sctp_audit_indx++;
628 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
629 		sctp_audit_indx = 0;
630 	}
631 	rep = 0;
632 	tot_book_cnt = 0;
633 	resend_cnt = tot_out = 0;
634 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
635 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
636 			resend_cnt++;
637 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
638 			tot_out += chk->book_size;
639 			tot_book_cnt++;
640 		}
641 	}
642 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
643 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
644 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
645 		sctp_audit_indx++;
646 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
647 			sctp_audit_indx = 0;
648 		}
649 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
650 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
651 		rep = 1;
652 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
653 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
654 		sctp_audit_data[sctp_audit_indx][1] =
655 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
656 		sctp_audit_indx++;
657 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
658 			sctp_audit_indx = 0;
659 		}
660 	}
661 	if (tot_out != stcb->asoc.total_flight) {
662 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
663 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
664 		sctp_audit_indx++;
665 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
666 			sctp_audit_indx = 0;
667 		}
668 		rep = 1;
669 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
670 		    (int)stcb->asoc.total_flight);
671 		stcb->asoc.total_flight = tot_out;
672 	}
673 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
674 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
675 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
676 		sctp_audit_indx++;
677 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
678 			sctp_audit_indx = 0;
679 		}
680 		rep = 1;
681 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
682 
683 		stcb->asoc.total_flight_count = tot_book_cnt;
684 	}
685 	tot_out = 0;
686 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
687 		tot_out += lnet->flight_size;
688 	}
689 	if (tot_out != stcb->asoc.total_flight) {
690 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
691 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
692 		sctp_audit_indx++;
693 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
694 			sctp_audit_indx = 0;
695 		}
696 		rep = 1;
697 		SCTP_PRINTF("real flight:%d net total was %d\n",
698 		    stcb->asoc.total_flight, tot_out);
699 		/* now corrective action */
700 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
701 
702 			tot_out = 0;
703 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
704 				if ((chk->whoTo == lnet) &&
705 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
706 					tot_out += chk->book_size;
707 				}
708 			}
709 			if (lnet->flight_size != tot_out) {
710 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
711 				    (void *)lnet, lnet->flight_size,
712 				    tot_out);
713 				lnet->flight_size = tot_out;
714 			}
715 		}
716 	}
717 	if (rep) {
718 		sctp_print_audit_report();
719 	}
720 }
721 
722 void
723 sctp_audit_log(uint8_t ev, uint8_t fd)
724 {
725 
726 	sctp_audit_data[sctp_audit_indx][0] = ev;
727 	sctp_audit_data[sctp_audit_indx][1] = fd;
728 	sctp_audit_indx++;
729 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
730 		sctp_audit_indx = 0;
731 	}
732 }
733 
734 #endif
735 
736 /*
737  * sctp_stop_timers_for_shutdown() should be called
738  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
739  * state to make sure that all timers are stopped.
740  */
741 void
742 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
743 {
744 	struct sctp_association *asoc;
745 	struct sctp_nets *net;
746 
747 	asoc = &stcb->asoc;
748 
749 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
750 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
751 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
752 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
753 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
754 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
755 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
756 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
757 	}
758 }
759 
760 /*
761  * a list of sizes based on typical mtu's, used only if next hop size not
762  * returned.
763  */
764 static uint32_t sctp_mtu_sizes[] = {
765 	68,
766 	296,
767 	508,
768 	512,
769 	544,
770 	576,
771 	1006,
772 	1492,
773 	1500,
774 	1536,
775 	2002,
776 	2048,
777 	4352,
778 	4464,
779 	8166,
780 	17914,
781 	32000,
782 	65535
783 };
784 
785 /*
786  * Return the largest MTU smaller than val. If there is no
787  * entry, just return val.
788  */
789 uint32_t
790 sctp_get_prev_mtu(uint32_t val)
791 {
792 	uint32_t i;
793 
794 	if (val <= sctp_mtu_sizes[0]) {
795 		return (val);
796 	}
797 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
798 		if (val <= sctp_mtu_sizes[i]) {
799 			break;
800 		}
801 	}
802 	return (sctp_mtu_sizes[i - 1]);
803 }
804 
805 /*
806  * Return the smallest MTU larger than val. If there is no
807  * entry, just return val.
808  */
809 uint32_t
810 sctp_get_next_mtu(uint32_t val)
811 {
812 	/* select another MTU that is just bigger than this one */
813 	uint32_t i;
814 
815 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
816 		if (val < sctp_mtu_sizes[i]) {
817 			return (sctp_mtu_sizes[i]);
818 		}
819 	}
820 	return (val);
821 }
822 
823 void
824 sctp_fill_random_store(struct sctp_pcb *m)
825 {
826 	/*
827 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
828 	 * our counter. The result becomes our good random numbers and we
829 	 * then setup to give these out. Note that we do no locking to
830 	 * protect this. This is ok, since if competing folks call this we
831 	 * will get more gobbled gook in the random store which is what we
832 	 * want. There is a danger that two guys will use the same random
833 	 * numbers, but thats ok too since that is random as well :->
834 	 */
835 	m->store_at = 0;
836 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
837 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
838 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
839 	m->random_counter++;
840 }
841 
842 uint32_t
843 sctp_select_initial_TSN(struct sctp_pcb *inp)
844 {
845 	/*
846 	 * A true implementation should use random selection process to get
847 	 * the initial stream sequence number, using RFC1750 as a good
848 	 * guideline
849 	 */
850 	uint32_t x, *xp;
851 	uint8_t *p;
852 	int store_at, new_store;
853 
854 	if (inp->initial_sequence_debug != 0) {
855 		uint32_t ret;
856 
857 		ret = inp->initial_sequence_debug;
858 		inp->initial_sequence_debug++;
859 		return (ret);
860 	}
861 retry:
862 	store_at = inp->store_at;
863 	new_store = store_at + sizeof(uint32_t);
864 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
865 		new_store = 0;
866 	}
867 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
868 		goto retry;
869 	}
870 	if (new_store == 0) {
871 		/* Refill the random store */
872 		sctp_fill_random_store(inp);
873 	}
874 	p = &inp->random_store[store_at];
875 	xp = (uint32_t *)p;
876 	x = *xp;
877 	return (x);
878 }
879 
880 uint32_t
881 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
882 {
883 	uint32_t x;
884 	struct timeval now;
885 
886 	if (check) {
887 		(void)SCTP_GETTIME_TIMEVAL(&now);
888 	}
889 	for (;;) {
890 		x = sctp_select_initial_TSN(&inp->sctp_ep);
891 		if (x == 0) {
892 			/* we never use 0 */
893 			continue;
894 		}
895 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
896 			break;
897 		}
898 	}
899 	return (x);
900 }
901 
902 int32_t
903 sctp_map_assoc_state(int kernel_state)
904 {
905 	int32_t user_state;
906 
907 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
908 		user_state = SCTP_CLOSED;
909 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
910 		user_state = SCTP_SHUTDOWN_PENDING;
911 	} else {
912 		switch (kernel_state & SCTP_STATE_MASK) {
913 		case SCTP_STATE_EMPTY:
914 			user_state = SCTP_CLOSED;
915 			break;
916 		case SCTP_STATE_INUSE:
917 			user_state = SCTP_CLOSED;
918 			break;
919 		case SCTP_STATE_COOKIE_WAIT:
920 			user_state = SCTP_COOKIE_WAIT;
921 			break;
922 		case SCTP_STATE_COOKIE_ECHOED:
923 			user_state = SCTP_COOKIE_ECHOED;
924 			break;
925 		case SCTP_STATE_OPEN:
926 			user_state = SCTP_ESTABLISHED;
927 			break;
928 		case SCTP_STATE_SHUTDOWN_SENT:
929 			user_state = SCTP_SHUTDOWN_SENT;
930 			break;
931 		case SCTP_STATE_SHUTDOWN_RECEIVED:
932 			user_state = SCTP_SHUTDOWN_RECEIVED;
933 			break;
934 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
935 			user_state = SCTP_SHUTDOWN_ACK_SENT;
936 			break;
937 		default:
938 			user_state = SCTP_CLOSED;
939 			break;
940 		}
941 	}
942 	return (user_state);
943 }
944 
945 int
946 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
947     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
948 {
949 	struct sctp_association *asoc;
950 
951 	/*
952 	 * Anything set to zero is taken care of by the allocation routine's
953 	 * bzero
954 	 */
955 
956 	/*
957 	 * Up front select what scoping to apply on addresses I tell my peer
958 	 * Not sure what to do with these right now, we will need to come up
959 	 * with a way to set them. We may need to pass them through from the
960 	 * caller in the sctp_aloc_assoc() function.
961 	 */
962 	int i;
963 #if defined(SCTP_DETAILED_STR_STATS)
964 	int j;
965 #endif
966 
967 	asoc = &stcb->asoc;
968 	/* init all variables to a known value. */
969 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
970 	asoc->max_burst = inp->sctp_ep.max_burst;
971 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
972 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
973 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
974 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
975 	asoc->ecn_supported = inp->ecn_supported;
976 	asoc->prsctp_supported = inp->prsctp_supported;
977 	asoc->idata_supported = inp->idata_supported;
978 	asoc->auth_supported = inp->auth_supported;
979 	asoc->asconf_supported = inp->asconf_supported;
980 	asoc->reconfig_supported = inp->reconfig_supported;
981 	asoc->nrsack_supported = inp->nrsack_supported;
982 	asoc->pktdrop_supported = inp->pktdrop_supported;
983 	asoc->idata_supported = inp->idata_supported;
984 	asoc->sctp_cmt_pf = (uint8_t)0;
985 	asoc->sctp_frag_point = inp->sctp_frag_point;
986 	asoc->sctp_features = inp->sctp_features;
987 	asoc->default_dscp = inp->sctp_ep.default_dscp;
988 	asoc->max_cwnd = inp->max_cwnd;
989 #ifdef INET6
990 	if (inp->sctp_ep.default_flowlabel) {
991 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
992 	} else {
993 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
994 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
995 			asoc->default_flowlabel &= 0x000fffff;
996 			asoc->default_flowlabel |= 0x80000000;
997 		} else {
998 			asoc->default_flowlabel = 0;
999 		}
1000 	}
1001 #endif
1002 	asoc->sb_send_resv = 0;
1003 	if (override_tag) {
1004 		asoc->my_vtag = override_tag;
1005 	} else {
1006 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1007 	}
1008 	/* Get the nonce tags */
1009 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1010 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1011 	asoc->vrf_id = vrf_id;
1012 
1013 #ifdef SCTP_ASOCLOG_OF_TSNS
1014 	asoc->tsn_in_at = 0;
1015 	asoc->tsn_out_at = 0;
1016 	asoc->tsn_in_wrapped = 0;
1017 	asoc->tsn_out_wrapped = 0;
1018 	asoc->cumack_log_at = 0;
1019 	asoc->cumack_log_atsnt = 0;
1020 #endif
1021 #ifdef SCTP_FS_SPEC_LOG
1022 	asoc->fs_index = 0;
1023 #endif
1024 	asoc->refcnt = 0;
1025 	asoc->assoc_up_sent = 0;
1026 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1027 	    sctp_select_initial_TSN(&inp->sctp_ep);
1028 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1029 	/* we are optimisitic here */
1030 	asoc->peer_supports_nat = 0;
1031 	asoc->sent_queue_retran_cnt = 0;
1032 
1033 	/* for CMT */
1034 	asoc->last_net_cmt_send_started = NULL;
1035 
1036 	/* This will need to be adjusted */
1037 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1038 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1039 	asoc->asconf_seq_in = asoc->last_acked_seq;
1040 
1041 	/* here we are different, we hold the next one we expect */
1042 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1043 
1044 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1045 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1046 
1047 	asoc->default_mtu = inp->sctp_ep.default_mtu;
1048 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1049 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1050 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1051 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1052 	asoc->free_chunk_cnt = 0;
1053 
1054 	asoc->iam_blocking = 0;
1055 	asoc->context = inp->sctp_context;
1056 	asoc->local_strreset_support = inp->local_strreset_support;
1057 	asoc->def_send = inp->def_send;
1058 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1059 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1060 	asoc->pr_sctp_cnt = 0;
1061 	asoc->total_output_queue_size = 0;
1062 
1063 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1064 		asoc->scope.ipv6_addr_legal = 1;
1065 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1066 			asoc->scope.ipv4_addr_legal = 1;
1067 		} else {
1068 			asoc->scope.ipv4_addr_legal = 0;
1069 		}
1070 	} else {
1071 		asoc->scope.ipv6_addr_legal = 0;
1072 		asoc->scope.ipv4_addr_legal = 1;
1073 	}
1074 
1075 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1076 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1077 
1078 	asoc->smallest_mtu = inp->sctp_frag_point;
1079 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1080 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1081 
1082 	asoc->stream_locked_on = 0;
1083 	asoc->ecn_echo_cnt_onq = 0;
1084 	asoc->stream_locked = 0;
1085 
1086 	asoc->send_sack = 1;
1087 
1088 	LIST_INIT(&asoc->sctp_restricted_addrs);
1089 
1090 	TAILQ_INIT(&asoc->nets);
1091 	TAILQ_INIT(&asoc->pending_reply_queue);
1092 	TAILQ_INIT(&asoc->asconf_ack_sent);
1093 	/* Setup to fill the hb random cache at first HB */
1094 	asoc->hb_random_idx = 4;
1095 
1096 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1097 
1098 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1099 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1100 
1101 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1102 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1103 
1104 	/*
1105 	 * Now the stream parameters, here we allocate space for all streams
1106 	 * that we request by default.
1107 	 */
1108 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1109 	    o_strms;
1110 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1111 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1112 	    SCTP_M_STRMO);
1113 	if (asoc->strmout == NULL) {
1114 		/* big trouble no memory */
1115 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1116 		return (ENOMEM);
1117 	}
1118 	for (i = 0; i < asoc->streamoutcnt; i++) {
1119 		/*
1120 		 * inbound side must be set to 0xffff, also NOTE when we get
1121 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1122 		 * count (streamoutcnt) but first check if we sent to any of
1123 		 * the upper streams that were dropped (if some were). Those
1124 		 * that were dropped must be notified to the upper layer as
1125 		 * failed to send.
1126 		 */
1127 		asoc->strmout[i].next_mid_ordered = 0;
1128 		asoc->strmout[i].next_mid_unordered = 0;
1129 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1130 		asoc->strmout[i].chunks_on_queues = 0;
1131 #if defined(SCTP_DETAILED_STR_STATS)
1132 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1133 			asoc->strmout[i].abandoned_sent[j] = 0;
1134 			asoc->strmout[i].abandoned_unsent[j] = 0;
1135 		}
1136 #else
1137 		asoc->strmout[i].abandoned_sent[0] = 0;
1138 		asoc->strmout[i].abandoned_unsent[0] = 0;
1139 #endif
1140 		asoc->strmout[i].sid = i;
1141 		asoc->strmout[i].last_msg_incomplete = 0;
1142 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1143 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1144 	}
1145 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1146 
1147 	/* Now the mapping array */
1148 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1149 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1150 	    SCTP_M_MAP);
1151 	if (asoc->mapping_array == NULL) {
1152 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1153 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1154 		return (ENOMEM);
1155 	}
1156 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1157 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1158 	    SCTP_M_MAP);
1159 	if (asoc->nr_mapping_array == NULL) {
1160 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1161 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1162 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1163 		return (ENOMEM);
1164 	}
1165 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1166 
1167 	/* Now the init of the other outqueues */
1168 	TAILQ_INIT(&asoc->free_chunks);
1169 	TAILQ_INIT(&asoc->control_send_queue);
1170 	TAILQ_INIT(&asoc->asconf_send_queue);
1171 	TAILQ_INIT(&asoc->send_queue);
1172 	TAILQ_INIT(&asoc->sent_queue);
1173 	TAILQ_INIT(&asoc->resetHead);
1174 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1175 	TAILQ_INIT(&asoc->asconf_queue);
1176 	/* authentication fields */
1177 	asoc->authinfo.random = NULL;
1178 	asoc->authinfo.active_keyid = 0;
1179 	asoc->authinfo.assoc_key = NULL;
1180 	asoc->authinfo.assoc_keyid = 0;
1181 	asoc->authinfo.recv_key = NULL;
1182 	asoc->authinfo.recv_keyid = 0;
1183 	LIST_INIT(&asoc->shared_keys);
1184 	asoc->marked_retrans = 0;
1185 	asoc->port = inp->sctp_ep.port;
1186 	asoc->timoinit = 0;
1187 	asoc->timodata = 0;
1188 	asoc->timosack = 0;
1189 	asoc->timoshutdown = 0;
1190 	asoc->timoheartbeat = 0;
1191 	asoc->timocookie = 0;
1192 	asoc->timoshutdownack = 0;
1193 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1194 	asoc->discontinuity_time = asoc->start_time;
1195 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1196 		asoc->abandoned_unsent[i] = 0;
1197 		asoc->abandoned_sent[i] = 0;
1198 	}
1199 	/*
1200 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1201 	 * freed later when the association is freed.
1202 	 */
1203 	return (0);
1204 }
1205 
1206 void
1207 sctp_print_mapping_array(struct sctp_association *asoc)
1208 {
1209 	unsigned int i, limit;
1210 
1211 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1212 	    asoc->mapping_array_size,
1213 	    asoc->mapping_array_base_tsn,
1214 	    asoc->cumulative_tsn,
1215 	    asoc->highest_tsn_inside_map,
1216 	    asoc->highest_tsn_inside_nr_map);
1217 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1218 		if (asoc->mapping_array[limit - 1] != 0) {
1219 			break;
1220 		}
1221 	}
1222 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1223 	for (i = 0; i < limit; i++) {
1224 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1225 	}
1226 	if (limit % 16)
1227 		SCTP_PRINTF("\n");
1228 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1229 		if (asoc->nr_mapping_array[limit - 1]) {
1230 			break;
1231 		}
1232 	}
1233 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1234 	for (i = 0; i < limit; i++) {
1235 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1236 	}
1237 	if (limit % 16)
1238 		SCTP_PRINTF("\n");
1239 }
1240 
1241 int
1242 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1243 {
1244 	/* mapping array needs to grow */
1245 	uint8_t *new_array1, *new_array2;
1246 	uint32_t new_size;
1247 
1248 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1249 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1250 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1251 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1252 		/* can't get more, forget it */
1253 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1254 		if (new_array1) {
1255 			SCTP_FREE(new_array1, SCTP_M_MAP);
1256 		}
1257 		if (new_array2) {
1258 			SCTP_FREE(new_array2, SCTP_M_MAP);
1259 		}
1260 		return (-1);
1261 	}
1262 	memset(new_array1, 0, new_size);
1263 	memset(new_array2, 0, new_size);
1264 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1265 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1266 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1267 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1268 	asoc->mapping_array = new_array1;
1269 	asoc->nr_mapping_array = new_array2;
1270 	asoc->mapping_array_size = new_size;
1271 	return (0);
1272 }
1273 
1274 
1275 static void
1276 sctp_iterator_work(struct sctp_iterator *it)
1277 {
1278 	int iteration_count = 0;
1279 	int inp_skip = 0;
1280 	int first_in = 1;
1281 	struct sctp_inpcb *tinp;
1282 
1283 	SCTP_INP_INFO_RLOCK();
1284 	SCTP_ITERATOR_LOCK();
1285 	sctp_it_ctl.cur_it = it;
1286 	if (it->inp) {
1287 		SCTP_INP_RLOCK(it->inp);
1288 		SCTP_INP_DECR_REF(it->inp);
1289 	}
1290 	if (it->inp == NULL) {
1291 		/* iterator is complete */
1292 done_with_iterator:
1293 		sctp_it_ctl.cur_it = NULL;
1294 		SCTP_ITERATOR_UNLOCK();
1295 		SCTP_INP_INFO_RUNLOCK();
1296 		if (it->function_atend != NULL) {
1297 			(*it->function_atend) (it->pointer, it->val);
1298 		}
1299 		SCTP_FREE(it, SCTP_M_ITER);
1300 		return;
1301 	}
1302 select_a_new_ep:
1303 	if (first_in) {
1304 		first_in = 0;
1305 	} else {
1306 		SCTP_INP_RLOCK(it->inp);
1307 	}
1308 	while (((it->pcb_flags) &&
1309 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1310 	    ((it->pcb_features) &&
1311 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1312 		/* endpoint flags or features don't match, so keep looking */
1313 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1314 			SCTP_INP_RUNLOCK(it->inp);
1315 			goto done_with_iterator;
1316 		}
1317 		tinp = it->inp;
1318 		it->inp = LIST_NEXT(it->inp, sctp_list);
1319 		SCTP_INP_RUNLOCK(tinp);
1320 		if (it->inp == NULL) {
1321 			goto done_with_iterator;
1322 		}
1323 		SCTP_INP_RLOCK(it->inp);
1324 	}
1325 	/* now go through each assoc which is in the desired state */
1326 	if (it->done_current_ep == 0) {
1327 		if (it->function_inp != NULL)
1328 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1329 		it->done_current_ep = 1;
1330 	}
1331 	if (it->stcb == NULL) {
1332 		/* run the per instance function */
1333 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1334 	}
1335 	if ((inp_skip) || it->stcb == NULL) {
1336 		if (it->function_inp_end != NULL) {
1337 			inp_skip = (*it->function_inp_end) (it->inp,
1338 			    it->pointer,
1339 			    it->val);
1340 		}
1341 		SCTP_INP_RUNLOCK(it->inp);
1342 		goto no_stcb;
1343 	}
1344 	while (it->stcb) {
1345 		SCTP_TCB_LOCK(it->stcb);
1346 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1347 			/* not in the right state... keep looking */
1348 			SCTP_TCB_UNLOCK(it->stcb);
1349 			goto next_assoc;
1350 		}
1351 		/* see if we have limited out the iterator loop */
1352 		iteration_count++;
1353 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1354 			/* Pause to let others grab the lock */
1355 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1356 			SCTP_TCB_UNLOCK(it->stcb);
1357 			SCTP_INP_INCR_REF(it->inp);
1358 			SCTP_INP_RUNLOCK(it->inp);
1359 			SCTP_ITERATOR_UNLOCK();
1360 			SCTP_INP_INFO_RUNLOCK();
1361 			SCTP_INP_INFO_RLOCK();
1362 			SCTP_ITERATOR_LOCK();
1363 			if (sctp_it_ctl.iterator_flags) {
1364 				/* We won't be staying here */
1365 				SCTP_INP_DECR_REF(it->inp);
1366 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1367 				if (sctp_it_ctl.iterator_flags &
1368 				    SCTP_ITERATOR_STOP_CUR_IT) {
1369 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1370 					goto done_with_iterator;
1371 				}
1372 				if (sctp_it_ctl.iterator_flags &
1373 				    SCTP_ITERATOR_STOP_CUR_INP) {
1374 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1375 					goto no_stcb;
1376 				}
1377 				/* If we reach here huh? */
1378 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1379 				    sctp_it_ctl.iterator_flags);
1380 				sctp_it_ctl.iterator_flags = 0;
1381 			}
1382 			SCTP_INP_RLOCK(it->inp);
1383 			SCTP_INP_DECR_REF(it->inp);
1384 			SCTP_TCB_LOCK(it->stcb);
1385 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1386 			iteration_count = 0;
1387 		}
1388 		/* run function on this one */
1389 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1390 
1391 		/*
1392 		 * we lie here, it really needs to have its own type but
1393 		 * first I must verify that this won't effect things :-0
1394 		 */
1395 		if (it->no_chunk_output == 0)
1396 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1397 
1398 		SCTP_TCB_UNLOCK(it->stcb);
1399 next_assoc:
1400 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1401 		if (it->stcb == NULL) {
1402 			/* Run last function */
1403 			if (it->function_inp_end != NULL) {
1404 				inp_skip = (*it->function_inp_end) (it->inp,
1405 				    it->pointer,
1406 				    it->val);
1407 			}
1408 		}
1409 	}
1410 	SCTP_INP_RUNLOCK(it->inp);
1411 no_stcb:
1412 	/* done with all assocs on this endpoint, move on to next endpoint */
1413 	it->done_current_ep = 0;
1414 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1415 		it->inp = NULL;
1416 	} else {
1417 		it->inp = LIST_NEXT(it->inp, sctp_list);
1418 	}
1419 	if (it->inp == NULL) {
1420 		goto done_with_iterator;
1421 	}
1422 	goto select_a_new_ep;
1423 }
1424 
1425 void
1426 sctp_iterator_worker(void)
1427 {
1428 	struct sctp_iterator *it, *nit;
1429 
1430 	/* This function is called with the WQ lock in place */
1431 
1432 	sctp_it_ctl.iterator_running = 1;
1433 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1434 		/* now lets work on this one */
1435 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1436 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1437 		CURVNET_SET(it->vn);
1438 		sctp_iterator_work(it);
1439 		CURVNET_RESTORE();
1440 		SCTP_IPI_ITERATOR_WQ_LOCK();
1441 		/* sa_ignore FREED_MEMORY */
1442 	}
1443 	sctp_it_ctl.iterator_running = 0;
1444 	return;
1445 }
1446 
1447 
1448 static void
1449 sctp_handle_addr_wq(void)
1450 {
1451 	/* deal with the ADDR wq from the rtsock calls */
1452 	struct sctp_laddr *wi, *nwi;
1453 	struct sctp_asconf_iterator *asc;
1454 
1455 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1456 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1457 	if (asc == NULL) {
1458 		/* Try later, no memory */
1459 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1460 		    (struct sctp_inpcb *)NULL,
1461 		    (struct sctp_tcb *)NULL,
1462 		    (struct sctp_nets *)NULL);
1463 		return;
1464 	}
1465 	LIST_INIT(&asc->list_of_work);
1466 	asc->cnt = 0;
1467 
1468 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1469 		LIST_REMOVE(wi, sctp_nxt_addr);
1470 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1471 		asc->cnt++;
1472 	}
1473 
1474 	if (asc->cnt == 0) {
1475 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1476 	} else {
1477 		int ret;
1478 
1479 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1480 		    sctp_asconf_iterator_stcb,
1481 		    NULL,	/* No ep end for boundall */
1482 		    SCTP_PCB_FLAGS_BOUNDALL,
1483 		    SCTP_PCB_ANY_FEATURES,
1484 		    SCTP_ASOC_ANY_STATE,
1485 		    (void *)asc, 0,
1486 		    sctp_asconf_iterator_end, NULL, 0);
1487 		if (ret) {
1488 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1489 			/*
1490 			 * Freeing if we are stopping or put back on the
1491 			 * addr_wq.
1492 			 */
1493 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1494 				sctp_asconf_iterator_end(asc, 0);
1495 			} else {
1496 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1497 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1498 				}
1499 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1500 			}
1501 		}
1502 	}
1503 }
1504 
1505 void
1506 sctp_timeout_handler(void *t)
1507 {
1508 	struct sctp_inpcb *inp;
1509 	struct sctp_tcb *stcb;
1510 	struct sctp_nets *net;
1511 	struct sctp_timer *tmr;
1512 	struct mbuf *op_err;
1513 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1514 	struct socket *so;
1515 #endif
1516 	int did_output;
1517 	int type;
1518 
1519 	tmr = (struct sctp_timer *)t;
1520 	inp = (struct sctp_inpcb *)tmr->ep;
1521 	stcb = (struct sctp_tcb *)tmr->tcb;
1522 	net = (struct sctp_nets *)tmr->net;
1523 	CURVNET_SET((struct vnet *)tmr->vnet);
1524 	did_output = 1;
1525 
1526 #ifdef SCTP_AUDITING_ENABLED
1527 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1528 	sctp_auditing(3, inp, stcb, net);
1529 #endif
1530 
1531 	/* sanity checks... */
1532 	if (tmr->self != (void *)tmr) {
1533 		/*
1534 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1535 		 * (void *)tmr);
1536 		 */
1537 		CURVNET_RESTORE();
1538 		return;
1539 	}
1540 	tmr->stopped_from = 0xa001;
1541 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1542 		/*
1543 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1544 		 * tmr->type);
1545 		 */
1546 		CURVNET_RESTORE();
1547 		return;
1548 	}
1549 	tmr->stopped_from = 0xa002;
1550 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1551 		CURVNET_RESTORE();
1552 		return;
1553 	}
1554 	/* if this is an iterator timeout, get the struct and clear inp */
1555 	tmr->stopped_from = 0xa003;
1556 	if (inp) {
1557 		SCTP_INP_INCR_REF(inp);
1558 		if ((inp->sctp_socket == NULL) &&
1559 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1560 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1561 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1562 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1563 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1564 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1565 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1566 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1567 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))) {
1568 			SCTP_INP_DECR_REF(inp);
1569 			CURVNET_RESTORE();
1570 			return;
1571 		}
1572 	}
1573 	tmr->stopped_from = 0xa004;
1574 	if (stcb) {
1575 		atomic_add_int(&stcb->asoc.refcnt, 1);
1576 		if (stcb->asoc.state == 0) {
1577 			atomic_add_int(&stcb->asoc.refcnt, -1);
1578 			if (inp) {
1579 				SCTP_INP_DECR_REF(inp);
1580 			}
1581 			CURVNET_RESTORE();
1582 			return;
1583 		}
1584 	}
1585 	type = tmr->type;
1586 	tmr->stopped_from = 0xa005;
1587 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1588 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1589 		if (inp) {
1590 			SCTP_INP_DECR_REF(inp);
1591 		}
1592 		if (stcb) {
1593 			atomic_add_int(&stcb->asoc.refcnt, -1);
1594 		}
1595 		CURVNET_RESTORE();
1596 		return;
1597 	}
1598 	tmr->stopped_from = 0xa006;
1599 
1600 	if (stcb) {
1601 		SCTP_TCB_LOCK(stcb);
1602 		atomic_add_int(&stcb->asoc.refcnt, -1);
1603 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1604 		    ((stcb->asoc.state == 0) ||
1605 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1606 			SCTP_TCB_UNLOCK(stcb);
1607 			if (inp) {
1608 				SCTP_INP_DECR_REF(inp);
1609 			}
1610 			CURVNET_RESTORE();
1611 			return;
1612 		}
1613 	} else if (inp != NULL) {
1614 		if (type != SCTP_TIMER_TYPE_INPKILL) {
1615 			SCTP_INP_WLOCK(inp);
1616 		}
1617 	} else {
1618 		SCTP_WQ_ADDR_LOCK();
1619 	}
1620 	/* record in stopped what t-o occurred */
1621 	tmr->stopped_from = type;
1622 
1623 	/* mark as being serviced now */
1624 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1625 		/*
1626 		 * Callout has been rescheduled.
1627 		 */
1628 		goto get_out;
1629 	}
1630 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1631 		/*
1632 		 * Not active, so no action.
1633 		 */
1634 		goto get_out;
1635 	}
1636 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1637 
1638 	/* call the handler for the appropriate timer type */
1639 	switch (type) {
1640 	case SCTP_TIMER_TYPE_ADDR_WQ:
1641 		sctp_handle_addr_wq();
1642 		break;
1643 	case SCTP_TIMER_TYPE_SEND:
1644 		if ((stcb == NULL) || (inp == NULL)) {
1645 			break;
1646 		}
1647 		SCTP_STAT_INCR(sctps_timodata);
1648 		stcb->asoc.timodata++;
1649 		stcb->asoc.num_send_timers_up--;
1650 		if (stcb->asoc.num_send_timers_up < 0) {
1651 			stcb->asoc.num_send_timers_up = 0;
1652 		}
1653 		SCTP_TCB_LOCK_ASSERT(stcb);
1654 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1655 			/* no need to unlock on tcb its gone */
1656 
1657 			goto out_decr;
1658 		}
1659 		SCTP_TCB_LOCK_ASSERT(stcb);
1660 #ifdef SCTP_AUDITING_ENABLED
1661 		sctp_auditing(4, inp, stcb, net);
1662 #endif
1663 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1664 		if ((stcb->asoc.num_send_timers_up == 0) &&
1665 		    (stcb->asoc.sent_queue_cnt > 0)) {
1666 			struct sctp_tmit_chunk *chk;
1667 
1668 			/*
1669 			 * safeguard. If there on some on the sent queue
1670 			 * somewhere but no timers running something is
1671 			 * wrong... so we start a timer on the first chunk
1672 			 * on the send queue on whatever net it is sent to.
1673 			 */
1674 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1675 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1676 			    chk->whoTo);
1677 		}
1678 		break;
1679 	case SCTP_TIMER_TYPE_INIT:
1680 		if ((stcb == NULL) || (inp == NULL)) {
1681 			break;
1682 		}
1683 		SCTP_STAT_INCR(sctps_timoinit);
1684 		stcb->asoc.timoinit++;
1685 		if (sctp_t1init_timer(inp, stcb, net)) {
1686 			/* no need to unlock on tcb its gone */
1687 			goto out_decr;
1688 		}
1689 		/* We do output but not here */
1690 		did_output = 0;
1691 		break;
1692 	case SCTP_TIMER_TYPE_RECV:
1693 		if ((stcb == NULL) || (inp == NULL)) {
1694 			break;
1695 		}
1696 		SCTP_STAT_INCR(sctps_timosack);
1697 		stcb->asoc.timosack++;
1698 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1699 #ifdef SCTP_AUDITING_ENABLED
1700 		sctp_auditing(4, inp, stcb, net);
1701 #endif
1702 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1703 		break;
1704 	case SCTP_TIMER_TYPE_SHUTDOWN:
1705 		if ((stcb == NULL) || (inp == NULL)) {
1706 			break;
1707 		}
1708 		if (sctp_shutdown_timer(inp, stcb, net)) {
1709 			/* no need to unlock on tcb its gone */
1710 			goto out_decr;
1711 		}
1712 		SCTP_STAT_INCR(sctps_timoshutdown);
1713 		stcb->asoc.timoshutdown++;
1714 #ifdef SCTP_AUDITING_ENABLED
1715 		sctp_auditing(4, inp, stcb, net);
1716 #endif
1717 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1718 		break;
1719 	case SCTP_TIMER_TYPE_HEARTBEAT:
1720 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1721 			break;
1722 		}
1723 		SCTP_STAT_INCR(sctps_timoheartbeat);
1724 		stcb->asoc.timoheartbeat++;
1725 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1726 			/* no need to unlock on tcb its gone */
1727 			goto out_decr;
1728 		}
1729 #ifdef SCTP_AUDITING_ENABLED
1730 		sctp_auditing(4, inp, stcb, net);
1731 #endif
1732 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1733 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1734 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1735 		}
1736 		break;
1737 	case SCTP_TIMER_TYPE_COOKIE:
1738 		if ((stcb == NULL) || (inp == NULL)) {
1739 			break;
1740 		}
1741 		if (sctp_cookie_timer(inp, stcb, net)) {
1742 			/* no need to unlock on tcb its gone */
1743 			goto out_decr;
1744 		}
1745 		SCTP_STAT_INCR(sctps_timocookie);
1746 		stcb->asoc.timocookie++;
1747 #ifdef SCTP_AUDITING_ENABLED
1748 		sctp_auditing(4, inp, stcb, net);
1749 #endif
1750 		/*
1751 		 * We consider T3 and Cookie timer pretty much the same with
1752 		 * respect to where from in chunk_output.
1753 		 */
1754 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1755 		break;
1756 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1757 		{
1758 			struct timeval tv;
1759 			int i, secret;
1760 
1761 			if (inp == NULL) {
1762 				break;
1763 			}
1764 			SCTP_STAT_INCR(sctps_timosecret);
1765 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1766 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1767 			inp->sctp_ep.last_secret_number =
1768 			    inp->sctp_ep.current_secret_number;
1769 			inp->sctp_ep.current_secret_number++;
1770 			if (inp->sctp_ep.current_secret_number >=
1771 			    SCTP_HOW_MANY_SECRETS) {
1772 				inp->sctp_ep.current_secret_number = 0;
1773 			}
1774 			secret = (int)inp->sctp_ep.current_secret_number;
1775 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1776 				inp->sctp_ep.secret_key[secret][i] =
1777 				    sctp_select_initial_TSN(&inp->sctp_ep);
1778 			}
1779 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1780 		}
1781 		did_output = 0;
1782 		break;
1783 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1784 		if ((stcb == NULL) || (inp == NULL)) {
1785 			break;
1786 		}
1787 		SCTP_STAT_INCR(sctps_timopathmtu);
1788 		sctp_pathmtu_timer(inp, stcb, net);
1789 		did_output = 0;
1790 		break;
1791 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1792 		if ((stcb == NULL) || (inp == NULL)) {
1793 			break;
1794 		}
1795 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1796 			/* no need to unlock on tcb its gone */
1797 			goto out_decr;
1798 		}
1799 		SCTP_STAT_INCR(sctps_timoshutdownack);
1800 		stcb->asoc.timoshutdownack++;
1801 #ifdef SCTP_AUDITING_ENABLED
1802 		sctp_auditing(4, inp, stcb, net);
1803 #endif
1804 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1805 		break;
1806 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1807 		if ((stcb == NULL) || (inp == NULL)) {
1808 			break;
1809 		}
1810 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1811 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1812 		    "Shutdown guard timer expired");
1813 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1814 		/* no need to unlock on tcb its gone */
1815 		goto out_decr;
1816 
1817 	case SCTP_TIMER_TYPE_STRRESET:
1818 		if ((stcb == NULL) || (inp == NULL)) {
1819 			break;
1820 		}
1821 		if (sctp_strreset_timer(inp, stcb, net)) {
1822 			/* no need to unlock on tcb its gone */
1823 			goto out_decr;
1824 		}
1825 		SCTP_STAT_INCR(sctps_timostrmrst);
1826 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1827 		break;
1828 	case SCTP_TIMER_TYPE_ASCONF:
1829 		if ((stcb == NULL) || (inp == NULL)) {
1830 			break;
1831 		}
1832 		if (sctp_asconf_timer(inp, stcb, net)) {
1833 			/* no need to unlock on tcb its gone */
1834 			goto out_decr;
1835 		}
1836 		SCTP_STAT_INCR(sctps_timoasconf);
1837 #ifdef SCTP_AUDITING_ENABLED
1838 		sctp_auditing(4, inp, stcb, net);
1839 #endif
1840 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1841 		break;
1842 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1843 		if ((stcb == NULL) || (inp == NULL)) {
1844 			break;
1845 		}
1846 		sctp_delete_prim_timer(inp, stcb, net);
1847 		SCTP_STAT_INCR(sctps_timodelprim);
1848 		break;
1849 
1850 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1851 		if ((stcb == NULL) || (inp == NULL)) {
1852 			break;
1853 		}
1854 		SCTP_STAT_INCR(sctps_timoautoclose);
1855 		sctp_autoclose_timer(inp, stcb, net);
1856 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1857 		did_output = 0;
1858 		break;
1859 	case SCTP_TIMER_TYPE_ASOCKILL:
1860 		if ((stcb == NULL) || (inp == NULL)) {
1861 			break;
1862 		}
1863 		SCTP_STAT_INCR(sctps_timoassockill);
1864 		/* Can we free it yet? */
1865 		SCTP_INP_DECR_REF(inp);
1866 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1867 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1868 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1869 		so = SCTP_INP_SO(inp);
1870 		atomic_add_int(&stcb->asoc.refcnt, 1);
1871 		SCTP_TCB_UNLOCK(stcb);
1872 		SCTP_SOCKET_LOCK(so, 1);
1873 		SCTP_TCB_LOCK(stcb);
1874 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1875 #endif
1876 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1877 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1878 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1879 		SCTP_SOCKET_UNLOCK(so, 1);
1880 #endif
1881 		/*
1882 		 * free asoc, always unlocks (or destroy's) so prevent
1883 		 * duplicate unlock or unlock of a free mtx :-0
1884 		 */
1885 		stcb = NULL;
1886 		goto out_no_decr;
1887 	case SCTP_TIMER_TYPE_INPKILL:
1888 		SCTP_STAT_INCR(sctps_timoinpkill);
1889 		if (inp == NULL) {
1890 			break;
1891 		}
1892 		/*
1893 		 * special case, take away our increment since WE are the
1894 		 * killer
1895 		 */
1896 		SCTP_INP_DECR_REF(inp);
1897 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1898 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1899 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1900 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1901 		inp = NULL;
1902 		goto out_no_decr;
1903 	default:
1904 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1905 		    type);
1906 		break;
1907 	}
1908 #ifdef SCTP_AUDITING_ENABLED
1909 	sctp_audit_log(0xF1, (uint8_t)type);
1910 	if (inp)
1911 		sctp_auditing(5, inp, stcb, net);
1912 #endif
1913 	if ((did_output) && stcb) {
1914 		/*
1915 		 * Now we need to clean up the control chunk chain if an
1916 		 * ECNE is on it. It must be marked as UNSENT again so next
1917 		 * call will continue to send it until such time that we get
1918 		 * a CWR, to remove it. It is, however, less likely that we
1919 		 * will find a ecn echo on the chain though.
1920 		 */
1921 		sctp_fix_ecn_echo(&stcb->asoc);
1922 	}
1923 get_out:
1924 	if (stcb) {
1925 		SCTP_TCB_UNLOCK(stcb);
1926 	} else if (inp != NULL) {
1927 		SCTP_INP_WUNLOCK(inp);
1928 	} else {
1929 		SCTP_WQ_ADDR_UNLOCK();
1930 	}
1931 
1932 out_decr:
1933 	if (inp) {
1934 		SCTP_INP_DECR_REF(inp);
1935 	}
1936 out_no_decr:
1937 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1938 	CURVNET_RESTORE();
1939 }
1940 
1941 void
1942 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1943     struct sctp_nets *net)
1944 {
1945 	uint32_t to_ticks;
1946 	struct sctp_timer *tmr;
1947 
1948 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1949 		return;
1950 
1951 	tmr = NULL;
1952 	if (stcb) {
1953 		SCTP_TCB_LOCK_ASSERT(stcb);
1954 	}
1955 	switch (t_type) {
1956 	case SCTP_TIMER_TYPE_ADDR_WQ:
1957 		/* Only 1 tick away :-) */
1958 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1959 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1960 		break;
1961 	case SCTP_TIMER_TYPE_SEND:
1962 		/* Here we use the RTO timer */
1963 		{
1964 			int rto_val;
1965 
1966 			if ((stcb == NULL) || (net == NULL)) {
1967 				return;
1968 			}
1969 			tmr = &net->rxt_timer;
1970 			if (net->RTO == 0) {
1971 				rto_val = stcb->asoc.initial_rto;
1972 			} else {
1973 				rto_val = net->RTO;
1974 			}
1975 			to_ticks = MSEC_TO_TICKS(rto_val);
1976 		}
1977 		break;
1978 	case SCTP_TIMER_TYPE_INIT:
1979 		/*
1980 		 * Here we use the INIT timer default usually about 1
1981 		 * minute.
1982 		 */
1983 		if ((stcb == NULL) || (net == NULL)) {
1984 			return;
1985 		}
1986 		tmr = &net->rxt_timer;
1987 		if (net->RTO == 0) {
1988 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1989 		} else {
1990 			to_ticks = MSEC_TO_TICKS(net->RTO);
1991 		}
1992 		break;
1993 	case SCTP_TIMER_TYPE_RECV:
1994 		/*
1995 		 * Here we use the Delayed-Ack timer value from the inp
1996 		 * ususually about 200ms.
1997 		 */
1998 		if (stcb == NULL) {
1999 			return;
2000 		}
2001 		tmr = &stcb->asoc.dack_timer;
2002 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2003 		break;
2004 	case SCTP_TIMER_TYPE_SHUTDOWN:
2005 		/* Here we use the RTO of the destination. */
2006 		if ((stcb == NULL) || (net == NULL)) {
2007 			return;
2008 		}
2009 		if (net->RTO == 0) {
2010 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2011 		} else {
2012 			to_ticks = MSEC_TO_TICKS(net->RTO);
2013 		}
2014 		tmr = &net->rxt_timer;
2015 		break;
2016 	case SCTP_TIMER_TYPE_HEARTBEAT:
2017 		/*
2018 		 * the net is used here so that we can add in the RTO. Even
2019 		 * though we use a different timer. We also add the HB timer
2020 		 * PLUS a random jitter.
2021 		 */
2022 		if ((stcb == NULL) || (net == NULL)) {
2023 			return;
2024 		} else {
2025 			uint32_t rndval;
2026 			uint32_t jitter;
2027 
2028 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2029 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2030 				return;
2031 			}
2032 			if (net->RTO == 0) {
2033 				to_ticks = stcb->asoc.initial_rto;
2034 			} else {
2035 				to_ticks = net->RTO;
2036 			}
2037 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2038 			jitter = rndval % to_ticks;
2039 			if (jitter >= (to_ticks >> 1)) {
2040 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2041 			} else {
2042 				to_ticks = to_ticks - jitter;
2043 			}
2044 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2045 			    !(net->dest_state & SCTP_ADDR_PF)) {
2046 				to_ticks += net->heart_beat_delay;
2047 			}
2048 			/*
2049 			 * Now we must convert the to_ticks that are now in
2050 			 * ms to ticks.
2051 			 */
2052 			to_ticks = MSEC_TO_TICKS(to_ticks);
2053 			tmr = &net->hb_timer;
2054 		}
2055 		break;
2056 	case SCTP_TIMER_TYPE_COOKIE:
2057 		/*
2058 		 * Here we can use the RTO timer from the network since one
2059 		 * RTT was compelete. If a retran happened then we will be
2060 		 * using the RTO initial value.
2061 		 */
2062 		if ((stcb == NULL) || (net == NULL)) {
2063 			return;
2064 		}
2065 		if (net->RTO == 0) {
2066 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2067 		} else {
2068 			to_ticks = MSEC_TO_TICKS(net->RTO);
2069 		}
2070 		tmr = &net->rxt_timer;
2071 		break;
2072 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2073 		/*
2074 		 * nothing needed but the endpoint here ususually about 60
2075 		 * minutes.
2076 		 */
2077 		tmr = &inp->sctp_ep.signature_change;
2078 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2079 		break;
2080 	case SCTP_TIMER_TYPE_ASOCKILL:
2081 		if (stcb == NULL) {
2082 			return;
2083 		}
2084 		tmr = &stcb->asoc.strreset_timer;
2085 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2086 		break;
2087 	case SCTP_TIMER_TYPE_INPKILL:
2088 		/*
2089 		 * The inp is setup to die. We re-use the signature_chage
2090 		 * timer since that has stopped and we are in the GONE
2091 		 * state.
2092 		 */
2093 		tmr = &inp->sctp_ep.signature_change;
2094 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2095 		break;
2096 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2097 		/*
2098 		 * Here we use the value found in the EP for PMTU ususually
2099 		 * about 10 minutes.
2100 		 */
2101 		if ((stcb == NULL) || (net == NULL)) {
2102 			return;
2103 		}
2104 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2105 			return;
2106 		}
2107 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2108 		tmr = &net->pmtu_timer;
2109 		break;
2110 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2111 		/* Here we use the RTO of the destination */
2112 		if ((stcb == NULL) || (net == NULL)) {
2113 			return;
2114 		}
2115 		if (net->RTO == 0) {
2116 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2117 		} else {
2118 			to_ticks = MSEC_TO_TICKS(net->RTO);
2119 		}
2120 		tmr = &net->rxt_timer;
2121 		break;
2122 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2123 		/*
2124 		 * Here we use the endpoints shutdown guard timer usually
2125 		 * about 3 minutes.
2126 		 */
2127 		if (stcb == NULL) {
2128 			return;
2129 		}
2130 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2131 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2132 		} else {
2133 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2134 		}
2135 		tmr = &stcb->asoc.shut_guard_timer;
2136 		break;
2137 	case SCTP_TIMER_TYPE_STRRESET:
2138 		/*
2139 		 * Here the timer comes from the stcb but its value is from
2140 		 * the net's RTO.
2141 		 */
2142 		if ((stcb == NULL) || (net == NULL)) {
2143 			return;
2144 		}
2145 		if (net->RTO == 0) {
2146 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2147 		} else {
2148 			to_ticks = MSEC_TO_TICKS(net->RTO);
2149 		}
2150 		tmr = &stcb->asoc.strreset_timer;
2151 		break;
2152 	case SCTP_TIMER_TYPE_ASCONF:
2153 		/*
2154 		 * Here the timer comes from the stcb but its value is from
2155 		 * the net's RTO.
2156 		 */
2157 		if ((stcb == NULL) || (net == NULL)) {
2158 			return;
2159 		}
2160 		if (net->RTO == 0) {
2161 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2162 		} else {
2163 			to_ticks = MSEC_TO_TICKS(net->RTO);
2164 		}
2165 		tmr = &stcb->asoc.asconf_timer;
2166 		break;
2167 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2168 		if ((stcb == NULL) || (net != NULL)) {
2169 			return;
2170 		}
2171 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2172 		tmr = &stcb->asoc.delete_prim_timer;
2173 		break;
2174 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2175 		if (stcb == NULL) {
2176 			return;
2177 		}
2178 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2179 			/*
2180 			 * Really an error since stcb is NOT set to
2181 			 * autoclose
2182 			 */
2183 			return;
2184 		}
2185 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2186 		tmr = &stcb->asoc.autoclose_timer;
2187 		break;
2188 	default:
2189 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2190 		    __func__, t_type);
2191 		return;
2192 		break;
2193 	}
2194 	if ((to_ticks <= 0) || (tmr == NULL)) {
2195 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2196 		    __func__, t_type, to_ticks, (void *)tmr);
2197 		return;
2198 	}
2199 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2200 		/*
2201 		 * we do NOT allow you to have it already running. if it is
2202 		 * we leave the current one up unchanged
2203 		 */
2204 		return;
2205 	}
2206 	/* At this point we can proceed */
2207 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2208 		stcb->asoc.num_send_timers_up++;
2209 	}
2210 	tmr->stopped_from = 0;
2211 	tmr->type = t_type;
2212 	tmr->ep = (void *)inp;
2213 	tmr->tcb = (void *)stcb;
2214 	tmr->net = (void *)net;
2215 	tmr->self = (void *)tmr;
2216 	tmr->vnet = (void *)curvnet;
2217 	tmr->ticks = sctp_get_tick_count();
2218 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2219 	return;
2220 }
2221 
2222 void
2223 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2224     struct sctp_nets *net, uint32_t from)
2225 {
2226 	struct sctp_timer *tmr;
2227 
2228 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2229 	    (inp == NULL))
2230 		return;
2231 
2232 	tmr = NULL;
2233 	if (stcb) {
2234 		SCTP_TCB_LOCK_ASSERT(stcb);
2235 	}
2236 	switch (t_type) {
2237 	case SCTP_TIMER_TYPE_ADDR_WQ:
2238 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2239 		break;
2240 	case SCTP_TIMER_TYPE_SEND:
2241 		if ((stcb == NULL) || (net == NULL)) {
2242 			return;
2243 		}
2244 		tmr = &net->rxt_timer;
2245 		break;
2246 	case SCTP_TIMER_TYPE_INIT:
2247 		if ((stcb == NULL) || (net == NULL)) {
2248 			return;
2249 		}
2250 		tmr = &net->rxt_timer;
2251 		break;
2252 	case SCTP_TIMER_TYPE_RECV:
2253 		if (stcb == NULL) {
2254 			return;
2255 		}
2256 		tmr = &stcb->asoc.dack_timer;
2257 		break;
2258 	case SCTP_TIMER_TYPE_SHUTDOWN:
2259 		if ((stcb == NULL) || (net == NULL)) {
2260 			return;
2261 		}
2262 		tmr = &net->rxt_timer;
2263 		break;
2264 	case SCTP_TIMER_TYPE_HEARTBEAT:
2265 		if ((stcb == NULL) || (net == NULL)) {
2266 			return;
2267 		}
2268 		tmr = &net->hb_timer;
2269 		break;
2270 	case SCTP_TIMER_TYPE_COOKIE:
2271 		if ((stcb == NULL) || (net == NULL)) {
2272 			return;
2273 		}
2274 		tmr = &net->rxt_timer;
2275 		break;
2276 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2277 		/* nothing needed but the endpoint here */
2278 		tmr = &inp->sctp_ep.signature_change;
2279 		/*
2280 		 * We re-use the newcookie timer for the INP kill timer. We
2281 		 * must assure that we do not kill it by accident.
2282 		 */
2283 		break;
2284 	case SCTP_TIMER_TYPE_ASOCKILL:
2285 		/*
2286 		 * Stop the asoc kill timer.
2287 		 */
2288 		if (stcb == NULL) {
2289 			return;
2290 		}
2291 		tmr = &stcb->asoc.strreset_timer;
2292 		break;
2293 
2294 	case SCTP_TIMER_TYPE_INPKILL:
2295 		/*
2296 		 * The inp is setup to die. We re-use the signature_chage
2297 		 * timer since that has stopped and we are in the GONE
2298 		 * state.
2299 		 */
2300 		tmr = &inp->sctp_ep.signature_change;
2301 		break;
2302 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2303 		if ((stcb == NULL) || (net == NULL)) {
2304 			return;
2305 		}
2306 		tmr = &net->pmtu_timer;
2307 		break;
2308 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2309 		if ((stcb == NULL) || (net == NULL)) {
2310 			return;
2311 		}
2312 		tmr = &net->rxt_timer;
2313 		break;
2314 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2315 		if (stcb == NULL) {
2316 			return;
2317 		}
2318 		tmr = &stcb->asoc.shut_guard_timer;
2319 		break;
2320 	case SCTP_TIMER_TYPE_STRRESET:
2321 		if (stcb == NULL) {
2322 			return;
2323 		}
2324 		tmr = &stcb->asoc.strreset_timer;
2325 		break;
2326 	case SCTP_TIMER_TYPE_ASCONF:
2327 		if (stcb == NULL) {
2328 			return;
2329 		}
2330 		tmr = &stcb->asoc.asconf_timer;
2331 		break;
2332 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2333 		if (stcb == NULL) {
2334 			return;
2335 		}
2336 		tmr = &stcb->asoc.delete_prim_timer;
2337 		break;
2338 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2339 		if (stcb == NULL) {
2340 			return;
2341 		}
2342 		tmr = &stcb->asoc.autoclose_timer;
2343 		break;
2344 	default:
2345 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2346 		    __func__, t_type);
2347 		break;
2348 	}
2349 	if (tmr == NULL) {
2350 		return;
2351 	}
2352 	if ((tmr->type != t_type) && tmr->type) {
2353 		/*
2354 		 * Ok we have a timer that is under joint use. Cookie timer
2355 		 * per chance with the SEND timer. We therefore are NOT
2356 		 * running the timer that the caller wants stopped.  So just
2357 		 * return.
2358 		 */
2359 		return;
2360 	}
2361 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2362 		stcb->asoc.num_send_timers_up--;
2363 		if (stcb->asoc.num_send_timers_up < 0) {
2364 			stcb->asoc.num_send_timers_up = 0;
2365 		}
2366 	}
2367 	tmr->self = NULL;
2368 	tmr->stopped_from = from;
2369 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2370 	return;
2371 }
2372 
2373 uint32_t
2374 sctp_calculate_len(struct mbuf *m)
2375 {
2376 	uint32_t tlen = 0;
2377 	struct mbuf *at;
2378 
2379 	at = m;
2380 	while (at) {
2381 		tlen += SCTP_BUF_LEN(at);
2382 		at = SCTP_BUF_NEXT(at);
2383 	}
2384 	return (tlen);
2385 }
2386 
2387 void
2388 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2389     struct sctp_association *asoc, uint32_t mtu)
2390 {
2391 	/*
2392 	 * Reset the P-MTU size on this association, this involves changing
2393 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2394 	 * allow the DF flag to be cleared.
2395 	 */
2396 	struct sctp_tmit_chunk *chk;
2397 	unsigned int eff_mtu, ovh;
2398 
2399 	asoc->smallest_mtu = mtu;
2400 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2401 		ovh = SCTP_MIN_OVERHEAD;
2402 	} else {
2403 		ovh = SCTP_MIN_V4_OVERHEAD;
2404 	}
2405 	eff_mtu = mtu - ovh;
2406 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2407 		if (chk->send_size > eff_mtu) {
2408 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2409 		}
2410 	}
2411 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2412 		if (chk->send_size > eff_mtu) {
2413 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2414 		}
2415 	}
2416 }
2417 
2418 
2419 /*
2420  * given an association and starting time of the current RTT period return
2421  * RTO in number of msecs net should point to the current network
2422  */
2423 
2424 uint32_t
2425 sctp_calculate_rto(struct sctp_tcb *stcb,
2426     struct sctp_association *asoc,
2427     struct sctp_nets *net,
2428     struct timeval *old,
2429     int rtt_from_sack)
2430 {
2431 	/*-
2432 	 * given an association and the starting time of the current RTT
2433 	 * period (in value1/value2) return RTO in number of msecs.
2434 	 */
2435 	int32_t rtt;		/* RTT in ms */
2436 	uint32_t new_rto;
2437 	int first_measure = 0;
2438 	struct timeval now;
2439 
2440 	/************************/
2441 	/* 1. calculate new RTT */
2442 	/************************/
2443 	/* get the current time */
2444 	if (stcb->asoc.use_precise_time) {
2445 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2446 	} else {
2447 		(void)SCTP_GETTIME_TIMEVAL(&now);
2448 	}
2449 	timevalsub(&now, old);
2450 	/* store the current RTT in us */
2451 	net->rtt = (uint64_t)1000000 *(uint64_t)now.tv_sec +
2452 	        (uint64_t)now.tv_usec;
2453 
2454 	/* compute rtt in ms */
2455 	rtt = (int32_t)(net->rtt / 1000);
2456 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2457 		/*
2458 		 * Tell the CC module that a new update has just occurred
2459 		 * from a sack
2460 		 */
2461 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2462 	}
2463 	/*
2464 	 * Do we need to determine the lan? We do this only on sacks i.e.
2465 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2466 	 */
2467 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2468 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2469 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2470 			net->lan_type = SCTP_LAN_INTERNET;
2471 		} else {
2472 			net->lan_type = SCTP_LAN_LOCAL;
2473 		}
2474 	}
2475 	/***************************/
2476 	/* 2. update RTTVAR & SRTT */
2477 	/***************************/
2478 	/*-
2479 	 * Compute the scaled average lastsa and the
2480 	 * scaled variance lastsv as described in van Jacobson
2481 	 * Paper "Congestion Avoidance and Control", Annex A.
2482 	 *
2483 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2484 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2485 	 */
2486 	if (net->RTO_measured) {
2487 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2488 		net->lastsa += rtt;
2489 		if (rtt < 0) {
2490 			rtt = -rtt;
2491 		}
2492 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2493 		net->lastsv += rtt;
2494 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2495 			rto_logging(net, SCTP_LOG_RTTVAR);
2496 		}
2497 	} else {
2498 		/* First RTO measurment */
2499 		net->RTO_measured = 1;
2500 		first_measure = 1;
2501 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2502 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2503 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2504 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2505 		}
2506 	}
2507 	if (net->lastsv == 0) {
2508 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2509 	}
2510 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2511 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2512 	    (stcb->asoc.sat_network_lockout == 0)) {
2513 		stcb->asoc.sat_network = 1;
2514 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2515 		stcb->asoc.sat_network = 0;
2516 		stcb->asoc.sat_network_lockout = 1;
2517 	}
2518 	/* bound it, per C6/C7 in Section 5.3.1 */
2519 	if (new_rto < stcb->asoc.minrto) {
2520 		new_rto = stcb->asoc.minrto;
2521 	}
2522 	if (new_rto > stcb->asoc.maxrto) {
2523 		new_rto = stcb->asoc.maxrto;
2524 	}
2525 	/* we are now returning the RTO */
2526 	return (new_rto);
2527 }
2528 
2529 /*
2530  * return a pointer to a contiguous piece of data from the given mbuf chain
2531  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2532  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2533  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2534  */
2535 caddr_t
2536 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2537 {
2538 	uint32_t count;
2539 	uint8_t *ptr;
2540 
2541 	ptr = in_ptr;
2542 	if ((off < 0) || (len <= 0))
2543 		return (NULL);
2544 
2545 	/* find the desired start location */
2546 	while ((m != NULL) && (off > 0)) {
2547 		if (off < SCTP_BUF_LEN(m))
2548 			break;
2549 		off -= SCTP_BUF_LEN(m);
2550 		m = SCTP_BUF_NEXT(m);
2551 	}
2552 	if (m == NULL)
2553 		return (NULL);
2554 
2555 	/* is the current mbuf large enough (eg. contiguous)? */
2556 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2557 		return (mtod(m, caddr_t)+off);
2558 	} else {
2559 		/* else, it spans more than one mbuf, so save a temp copy... */
2560 		while ((m != NULL) && (len > 0)) {
2561 			count = min(SCTP_BUF_LEN(m) - off, len);
2562 			memcpy(ptr, mtod(m, caddr_t)+off, count);
2563 			len -= count;
2564 			ptr += count;
2565 			off = 0;
2566 			m = SCTP_BUF_NEXT(m);
2567 		}
2568 		if ((m == NULL) && (len > 0))
2569 			return (NULL);
2570 		else
2571 			return ((caddr_t)in_ptr);
2572 	}
2573 }
2574 
2575 
2576 
2577 struct sctp_paramhdr *
2578 sctp_get_next_param(struct mbuf *m,
2579     int offset,
2580     struct sctp_paramhdr *pull,
2581     int pull_limit)
2582 {
2583 	/* This just provides a typed signature to Peter's Pull routine */
2584 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2585 	    (uint8_t *)pull));
2586 }
2587 
2588 
2589 struct mbuf *
2590 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2591 {
2592 	struct mbuf *m_last;
2593 	caddr_t dp;
2594 
2595 	if (padlen > 3) {
2596 		return (NULL);
2597 	}
2598 	if (padlen <= M_TRAILINGSPACE(m)) {
2599 		/*
2600 		 * The easy way. We hope the majority of the time we hit
2601 		 * here :)
2602 		 */
2603 		m_last = m;
2604 	} else {
2605 		/* Hard way we must grow the mbuf chain */
2606 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2607 		if (m_last == NULL) {
2608 			return (NULL);
2609 		}
2610 		SCTP_BUF_LEN(m_last) = 0;
2611 		SCTP_BUF_NEXT(m_last) = NULL;
2612 		SCTP_BUF_NEXT(m) = m_last;
2613 	}
2614 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2615 	SCTP_BUF_LEN(m_last) += padlen;
2616 	memset(dp, 0, padlen);
2617 	return (m_last);
2618 }
2619 
2620 struct mbuf *
2621 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2622 {
2623 	/* find the last mbuf in chain and pad it */
2624 	struct mbuf *m_at;
2625 
2626 	if (last_mbuf != NULL) {
2627 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2628 	} else {
2629 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2630 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2631 				return (sctp_add_pad_tombuf(m_at, padval));
2632 			}
2633 		}
2634 	}
2635 	return (NULL);
2636 }
2637 
2638 static void
2639 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2640     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2641 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2642     SCTP_UNUSED
2643 #endif
2644 )
2645 {
2646 	struct mbuf *m_notify;
2647 	struct sctp_assoc_change *sac;
2648 	struct sctp_queued_to_read *control;
2649 	unsigned int notif_len;
2650 	uint16_t abort_len;
2651 	unsigned int i;
2652 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2653 	struct socket *so;
2654 #endif
2655 
2656 	if (stcb == NULL) {
2657 		return;
2658 	}
2659 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2660 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2661 		if (abort != NULL) {
2662 			abort_len = ntohs(abort->ch.chunk_length);
2663 		} else {
2664 			abort_len = 0;
2665 		}
2666 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2667 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2668 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2669 			notif_len += abort_len;
2670 		}
2671 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2672 		if (m_notify == NULL) {
2673 			/* Retry with smaller value. */
2674 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2675 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2676 			if (m_notify == NULL) {
2677 				goto set_error;
2678 			}
2679 		}
2680 		SCTP_BUF_NEXT(m_notify) = NULL;
2681 		sac = mtod(m_notify, struct sctp_assoc_change *);
2682 		memset(sac, 0, notif_len);
2683 		sac->sac_type = SCTP_ASSOC_CHANGE;
2684 		sac->sac_flags = 0;
2685 		sac->sac_length = sizeof(struct sctp_assoc_change);
2686 		sac->sac_state = state;
2687 		sac->sac_error = error;
2688 		/* XXX verify these stream counts */
2689 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2690 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2691 		sac->sac_assoc_id = sctp_get_associd(stcb);
2692 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2693 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2694 				i = 0;
2695 				if (stcb->asoc.prsctp_supported == 1) {
2696 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2697 				}
2698 				if (stcb->asoc.auth_supported == 1) {
2699 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2700 				}
2701 				if (stcb->asoc.asconf_supported == 1) {
2702 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2703 				}
2704 				if (stcb->asoc.idata_supported == 1) {
2705 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2706 				}
2707 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2708 				if (stcb->asoc.reconfig_supported == 1) {
2709 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2710 				}
2711 				sac->sac_length += i;
2712 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2713 				memcpy(sac->sac_info, abort, abort_len);
2714 				sac->sac_length += abort_len;
2715 			}
2716 		}
2717 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2718 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2719 		    0, 0, stcb->asoc.context, 0, 0, 0,
2720 		    m_notify);
2721 		if (control != NULL) {
2722 			control->length = SCTP_BUF_LEN(m_notify);
2723 			control->spec_flags = M_NOTIFICATION;
2724 			/* not that we need this */
2725 			control->tail_mbuf = m_notify;
2726 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2727 			    control,
2728 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2729 			    so_locked);
2730 		} else {
2731 			sctp_m_freem(m_notify);
2732 		}
2733 	}
2734 	/*
2735 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2736 	 * comes in.
2737 	 */
2738 set_error:
2739 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2740 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2741 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2742 		SOCK_LOCK(stcb->sctp_socket);
2743 		if (from_peer) {
2744 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2745 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2746 				stcb->sctp_socket->so_error = ECONNREFUSED;
2747 			} else {
2748 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2749 				stcb->sctp_socket->so_error = ECONNRESET;
2750 			}
2751 		} else {
2752 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2753 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2754 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2755 				stcb->sctp_socket->so_error = ETIMEDOUT;
2756 			} else {
2757 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2758 				stcb->sctp_socket->so_error = ECONNABORTED;
2759 			}
2760 		}
2761 		SOCK_UNLOCK(stcb->sctp_socket);
2762 	}
2763 	/* Wake ANY sleepers */
2764 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2765 	so = SCTP_INP_SO(stcb->sctp_ep);
2766 	if (!so_locked) {
2767 		atomic_add_int(&stcb->asoc.refcnt, 1);
2768 		SCTP_TCB_UNLOCK(stcb);
2769 		SCTP_SOCKET_LOCK(so, 1);
2770 		SCTP_TCB_LOCK(stcb);
2771 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2772 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2773 			SCTP_SOCKET_UNLOCK(so, 1);
2774 			return;
2775 		}
2776 	}
2777 #endif
2778 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2779 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2780 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2781 		socantrcvmore(stcb->sctp_socket);
2782 	}
2783 	sorwakeup(stcb->sctp_socket);
2784 	sowwakeup(stcb->sctp_socket);
2785 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2786 	if (!so_locked) {
2787 		SCTP_SOCKET_UNLOCK(so, 1);
2788 	}
2789 #endif
2790 }
2791 
2792 static void
2793 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2794     struct sockaddr *sa, uint32_t error, int so_locked
2795 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2796     SCTP_UNUSED
2797 #endif
2798 )
2799 {
2800 	struct mbuf *m_notify;
2801 	struct sctp_paddr_change *spc;
2802 	struct sctp_queued_to_read *control;
2803 
2804 	if ((stcb == NULL) ||
2805 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2806 		/* event not enabled */
2807 		return;
2808 	}
2809 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2810 	if (m_notify == NULL)
2811 		return;
2812 	SCTP_BUF_LEN(m_notify) = 0;
2813 	spc = mtod(m_notify, struct sctp_paddr_change *);
2814 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2815 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2816 	spc->spc_flags = 0;
2817 	spc->spc_length = sizeof(struct sctp_paddr_change);
2818 	switch (sa->sa_family) {
2819 #ifdef INET
2820 	case AF_INET:
2821 #ifdef INET6
2822 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2823 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2824 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2825 		} else {
2826 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2827 		}
2828 #else
2829 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2830 #endif
2831 		break;
2832 #endif
2833 #ifdef INET6
2834 	case AF_INET6:
2835 		{
2836 			struct sockaddr_in6 *sin6;
2837 
2838 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2839 
2840 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2841 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2842 				if (sin6->sin6_scope_id == 0) {
2843 					/* recover scope_id for user */
2844 					(void)sa6_recoverscope(sin6);
2845 				} else {
2846 					/* clear embedded scope_id for user */
2847 					in6_clearscope(&sin6->sin6_addr);
2848 				}
2849 			}
2850 			break;
2851 		}
2852 #endif
2853 	default:
2854 		/* TSNH */
2855 		break;
2856 	}
2857 	spc->spc_state = state;
2858 	spc->spc_error = error;
2859 	spc->spc_assoc_id = sctp_get_associd(stcb);
2860 
2861 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2862 	SCTP_BUF_NEXT(m_notify) = NULL;
2863 
2864 	/* append to socket */
2865 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2866 	    0, 0, stcb->asoc.context, 0, 0, 0,
2867 	    m_notify);
2868 	if (control == NULL) {
2869 		/* no memory */
2870 		sctp_m_freem(m_notify);
2871 		return;
2872 	}
2873 	control->length = SCTP_BUF_LEN(m_notify);
2874 	control->spec_flags = M_NOTIFICATION;
2875 	/* not that we need this */
2876 	control->tail_mbuf = m_notify;
2877 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2878 	    control,
2879 	    &stcb->sctp_socket->so_rcv, 1,
2880 	    SCTP_READ_LOCK_NOT_HELD,
2881 	    so_locked);
2882 }
2883 
2884 
2885 static void
2886 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2887     struct sctp_tmit_chunk *chk, int so_locked
2888 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2889     SCTP_UNUSED
2890 #endif
2891 )
2892 {
2893 	struct mbuf *m_notify;
2894 	struct sctp_send_failed *ssf;
2895 	struct sctp_send_failed_event *ssfe;
2896 	struct sctp_queued_to_read *control;
2897 	struct sctp_chunkhdr *chkhdr;
2898 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
2899 
2900 	if ((stcb == NULL) ||
2901 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2902 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2903 		/* event not enabled */
2904 		return;
2905 	}
2906 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2907 		notifhdr_len = sizeof(struct sctp_send_failed_event);
2908 	} else {
2909 		notifhdr_len = sizeof(struct sctp_send_failed);
2910 	}
2911 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
2912 	if (m_notify == NULL)
2913 		/* no space left */
2914 		return;
2915 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
2916 	if (stcb->asoc.idata_supported) {
2917 		chkhdr_len = sizeof(struct sctp_idata_chunk);
2918 	} else {
2919 		chkhdr_len = sizeof(struct sctp_data_chunk);
2920 	}
2921 	/* Use some defaults in case we can't access the chunk header */
2922 	if (chk->send_size >= chkhdr_len) {
2923 		payload_len = chk->send_size - chkhdr_len;
2924 	} else {
2925 		payload_len = 0;
2926 	}
2927 	padding_len = 0;
2928 	if (chk->data != NULL) {
2929 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
2930 		if (chkhdr != NULL) {
2931 			chk_len = ntohs(chkhdr->chunk_length);
2932 			if ((chk_len >= chkhdr_len) &&
2933 			    (chk->send_size >= chk_len) &&
2934 			    (chk->send_size - chk_len < 4)) {
2935 				padding_len = chk->send_size - chk_len;
2936 				payload_len = chk->send_size - chkhdr_len - padding_len;
2937 			}
2938 		}
2939 	}
2940 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2941 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2942 		memset(ssfe, 0, notifhdr_len);
2943 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2944 		if (sent) {
2945 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2946 		} else {
2947 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2948 		}
2949 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
2950 		ssfe->ssfe_error = error;
2951 		/* not exactly what the user sent in, but should be close :) */
2952 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
2953 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2954 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
2955 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2956 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2957 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2958 	} else {
2959 		ssf = mtod(m_notify, struct sctp_send_failed *);
2960 		memset(ssf, 0, notifhdr_len);
2961 		ssf->ssf_type = SCTP_SEND_FAILED;
2962 		if (sent) {
2963 			ssf->ssf_flags = SCTP_DATA_SENT;
2964 		} else {
2965 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2966 		}
2967 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
2968 		ssf->ssf_error = error;
2969 		/* not exactly what the user sent in, but should be close :) */
2970 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
2971 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
2972 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2973 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
2974 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2975 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2976 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2977 	}
2978 	if (chk->data != NULL) {
2979 		/* Trim off the sctp chunk header (it should be there) */
2980 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
2981 			m_adj(chk->data, chkhdr_len);
2982 			m_adj(chk->data, -padding_len);
2983 			sctp_mbuf_crush(chk->data);
2984 			chk->send_size -= (chkhdr_len + padding_len);
2985 		}
2986 	}
2987 	SCTP_BUF_NEXT(m_notify) = chk->data;
2988 	/* Steal off the mbuf */
2989 	chk->data = NULL;
2990 	/*
2991 	 * For this case, we check the actual socket buffer, since the assoc
2992 	 * is going away we don't want to overfill the socket buffer for a
2993 	 * non-reader
2994 	 */
2995 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2996 		sctp_m_freem(m_notify);
2997 		return;
2998 	}
2999 	/* append to socket */
3000 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3001 	    0, 0, stcb->asoc.context, 0, 0, 0,
3002 	    m_notify);
3003 	if (control == NULL) {
3004 		/* no memory */
3005 		sctp_m_freem(m_notify);
3006 		return;
3007 	}
3008 	control->length = SCTP_BUF_LEN(m_notify);
3009 	control->spec_flags = M_NOTIFICATION;
3010 	/* not that we need this */
3011 	control->tail_mbuf = m_notify;
3012 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3013 	    control,
3014 	    &stcb->sctp_socket->so_rcv, 1,
3015 	    SCTP_READ_LOCK_NOT_HELD,
3016 	    so_locked);
3017 }
3018 
3019 
3020 static void
3021 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3022     struct sctp_stream_queue_pending *sp, int so_locked
3023 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3024     SCTP_UNUSED
3025 #endif
3026 )
3027 {
3028 	struct mbuf *m_notify;
3029 	struct sctp_send_failed *ssf;
3030 	struct sctp_send_failed_event *ssfe;
3031 	struct sctp_queued_to_read *control;
3032 	int notifhdr_len;
3033 
3034 	if ((stcb == NULL) ||
3035 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3036 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3037 		/* event not enabled */
3038 		return;
3039 	}
3040 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3041 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3042 	} else {
3043 		notifhdr_len = sizeof(struct sctp_send_failed);
3044 	}
3045 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3046 	if (m_notify == NULL) {
3047 		/* no space left */
3048 		return;
3049 	}
3050 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3051 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3052 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3053 		memset(ssfe, 0, notifhdr_len);
3054 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3055 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3056 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3057 		ssfe->ssfe_error = error;
3058 		/* not exactly what the user sent in, but should be close :) */
3059 		ssfe->ssfe_info.snd_sid = sp->sid;
3060 		if (sp->some_taken) {
3061 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3062 		} else {
3063 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3064 		}
3065 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3066 		ssfe->ssfe_info.snd_context = sp->context;
3067 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3068 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3069 	} else {
3070 		ssf = mtod(m_notify, struct sctp_send_failed *);
3071 		memset(ssf, 0, notifhdr_len);
3072 		ssf->ssf_type = SCTP_SEND_FAILED;
3073 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3074 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3075 		ssf->ssf_error = error;
3076 		/* not exactly what the user sent in, but should be close :) */
3077 		ssf->ssf_info.sinfo_stream = sp->sid;
3078 		ssf->ssf_info.sinfo_ssn = 0;
3079 		if (sp->some_taken) {
3080 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3081 		} else {
3082 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3083 		}
3084 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3085 		ssf->ssf_info.sinfo_context = sp->context;
3086 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3087 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3088 	}
3089 	SCTP_BUF_NEXT(m_notify) = sp->data;
3090 
3091 	/* Steal off the mbuf */
3092 	sp->data = NULL;
3093 	/*
3094 	 * For this case, we check the actual socket buffer, since the assoc
3095 	 * is going away we don't want to overfill the socket buffer for a
3096 	 * non-reader
3097 	 */
3098 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3099 		sctp_m_freem(m_notify);
3100 		return;
3101 	}
3102 	/* append to socket */
3103 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3104 	    0, 0, stcb->asoc.context, 0, 0, 0,
3105 	    m_notify);
3106 	if (control == NULL) {
3107 		/* no memory */
3108 		sctp_m_freem(m_notify);
3109 		return;
3110 	}
3111 	control->length = SCTP_BUF_LEN(m_notify);
3112 	control->spec_flags = M_NOTIFICATION;
3113 	/* not that we need this */
3114 	control->tail_mbuf = m_notify;
3115 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3116 	    control,
3117 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3118 }
3119 
3120 
3121 
3122 static void
3123 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3124 {
3125 	struct mbuf *m_notify;
3126 	struct sctp_adaptation_event *sai;
3127 	struct sctp_queued_to_read *control;
3128 
3129 	if ((stcb == NULL) ||
3130 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3131 		/* event not enabled */
3132 		return;
3133 	}
3134 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3135 	if (m_notify == NULL)
3136 		/* no space left */
3137 		return;
3138 	SCTP_BUF_LEN(m_notify) = 0;
3139 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3140 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3141 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3142 	sai->sai_flags = 0;
3143 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3144 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3145 	sai->sai_assoc_id = sctp_get_associd(stcb);
3146 
3147 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3148 	SCTP_BUF_NEXT(m_notify) = NULL;
3149 
3150 	/* append to socket */
3151 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3152 	    0, 0, stcb->asoc.context, 0, 0, 0,
3153 	    m_notify);
3154 	if (control == NULL) {
3155 		/* no memory */
3156 		sctp_m_freem(m_notify);
3157 		return;
3158 	}
3159 	control->length = SCTP_BUF_LEN(m_notify);
3160 	control->spec_flags = M_NOTIFICATION;
3161 	/* not that we need this */
3162 	control->tail_mbuf = m_notify;
3163 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3164 	    control,
3165 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3166 }
3167 
3168 /* This always must be called with the read-queue LOCKED in the INP */
3169 static void
3170 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3171     uint32_t val, int so_locked
3172 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3173     SCTP_UNUSED
3174 #endif
3175 )
3176 {
3177 	struct mbuf *m_notify;
3178 	struct sctp_pdapi_event *pdapi;
3179 	struct sctp_queued_to_read *control;
3180 	struct sockbuf *sb;
3181 
3182 	if ((stcb == NULL) ||
3183 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3184 		/* event not enabled */
3185 		return;
3186 	}
3187 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3188 		return;
3189 	}
3190 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3191 	if (m_notify == NULL)
3192 		/* no space left */
3193 		return;
3194 	SCTP_BUF_LEN(m_notify) = 0;
3195 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3196 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3197 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3198 	pdapi->pdapi_flags = 0;
3199 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3200 	pdapi->pdapi_indication = error;
3201 	pdapi->pdapi_stream = (val >> 16);
3202 	pdapi->pdapi_seq = (val & 0x0000ffff);
3203 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3204 
3205 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3206 	SCTP_BUF_NEXT(m_notify) = NULL;
3207 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3208 	    0, 0, stcb->asoc.context, 0, 0, 0,
3209 	    m_notify);
3210 	if (control == NULL) {
3211 		/* no memory */
3212 		sctp_m_freem(m_notify);
3213 		return;
3214 	}
3215 	control->length = SCTP_BUF_LEN(m_notify);
3216 	control->spec_flags = M_NOTIFICATION;
3217 	/* not that we need this */
3218 	control->tail_mbuf = m_notify;
3219 	sb = &stcb->sctp_socket->so_rcv;
3220 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3221 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3222 	}
3223 	sctp_sballoc(stcb, sb, m_notify);
3224 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3225 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3226 	}
3227 	control->end_added = 1;
3228 	if (stcb->asoc.control_pdapi)
3229 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3230 	else {
3231 		/* we really should not see this case */
3232 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3233 	}
3234 	if (stcb->sctp_ep && stcb->sctp_socket) {
3235 		/* This should always be the case */
3236 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3237 		struct socket *so;
3238 
3239 		so = SCTP_INP_SO(stcb->sctp_ep);
3240 		if (!so_locked) {
3241 			atomic_add_int(&stcb->asoc.refcnt, 1);
3242 			SCTP_TCB_UNLOCK(stcb);
3243 			SCTP_SOCKET_LOCK(so, 1);
3244 			SCTP_TCB_LOCK(stcb);
3245 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3246 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3247 				SCTP_SOCKET_UNLOCK(so, 1);
3248 				return;
3249 			}
3250 		}
3251 #endif
3252 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3253 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3254 		if (!so_locked) {
3255 			SCTP_SOCKET_UNLOCK(so, 1);
3256 		}
3257 #endif
3258 	}
3259 }
3260 
3261 static void
3262 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3263 {
3264 	struct mbuf *m_notify;
3265 	struct sctp_shutdown_event *sse;
3266 	struct sctp_queued_to_read *control;
3267 
3268 	/*
3269 	 * For TCP model AND UDP connected sockets we will send an error up
3270 	 * when an SHUTDOWN completes
3271 	 */
3272 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3273 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3274 		/* mark socket closed for read/write and wakeup! */
3275 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3276 		struct socket *so;
3277 
3278 		so = SCTP_INP_SO(stcb->sctp_ep);
3279 		atomic_add_int(&stcb->asoc.refcnt, 1);
3280 		SCTP_TCB_UNLOCK(stcb);
3281 		SCTP_SOCKET_LOCK(so, 1);
3282 		SCTP_TCB_LOCK(stcb);
3283 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3284 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3285 			SCTP_SOCKET_UNLOCK(so, 1);
3286 			return;
3287 		}
3288 #endif
3289 		socantsendmore(stcb->sctp_socket);
3290 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3291 		SCTP_SOCKET_UNLOCK(so, 1);
3292 #endif
3293 	}
3294 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3295 		/* event not enabled */
3296 		return;
3297 	}
3298 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3299 	if (m_notify == NULL)
3300 		/* no space left */
3301 		return;
3302 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3303 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3304 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3305 	sse->sse_flags = 0;
3306 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3307 	sse->sse_assoc_id = sctp_get_associd(stcb);
3308 
3309 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3310 	SCTP_BUF_NEXT(m_notify) = NULL;
3311 
3312 	/* append to socket */
3313 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3314 	    0, 0, stcb->asoc.context, 0, 0, 0,
3315 	    m_notify);
3316 	if (control == NULL) {
3317 		/* no memory */
3318 		sctp_m_freem(m_notify);
3319 		return;
3320 	}
3321 	control->length = SCTP_BUF_LEN(m_notify);
3322 	control->spec_flags = M_NOTIFICATION;
3323 	/* not that we need this */
3324 	control->tail_mbuf = m_notify;
3325 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3326 	    control,
3327 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3328 }
3329 
3330 static void
3331 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3332     int so_locked
3333 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3334     SCTP_UNUSED
3335 #endif
3336 )
3337 {
3338 	struct mbuf *m_notify;
3339 	struct sctp_sender_dry_event *event;
3340 	struct sctp_queued_to_read *control;
3341 
3342 	if ((stcb == NULL) ||
3343 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3344 		/* event not enabled */
3345 		return;
3346 	}
3347 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3348 	if (m_notify == NULL) {
3349 		/* no space left */
3350 		return;
3351 	}
3352 	SCTP_BUF_LEN(m_notify) = 0;
3353 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3354 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3355 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3356 	event->sender_dry_flags = 0;
3357 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3358 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3359 
3360 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3361 	SCTP_BUF_NEXT(m_notify) = NULL;
3362 
3363 	/* append to socket */
3364 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3365 	    0, 0, stcb->asoc.context, 0, 0, 0,
3366 	    m_notify);
3367 	if (control == NULL) {
3368 		/* no memory */
3369 		sctp_m_freem(m_notify);
3370 		return;
3371 	}
3372 	control->length = SCTP_BUF_LEN(m_notify);
3373 	control->spec_flags = M_NOTIFICATION;
3374 	/* not that we need this */
3375 	control->tail_mbuf = m_notify;
3376 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3377 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3378 }
3379 
3380 
3381 void
3382 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3383 {
3384 	struct mbuf *m_notify;
3385 	struct sctp_queued_to_read *control;
3386 	struct sctp_stream_change_event *stradd;
3387 
3388 	if ((stcb == NULL) ||
3389 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3390 		/* event not enabled */
3391 		return;
3392 	}
3393 	if ((stcb->asoc.peer_req_out) && flag) {
3394 		/* Peer made the request, don't tell the local user */
3395 		stcb->asoc.peer_req_out = 0;
3396 		return;
3397 	}
3398 	stcb->asoc.peer_req_out = 0;
3399 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3400 	if (m_notify == NULL)
3401 		/* no space left */
3402 		return;
3403 	SCTP_BUF_LEN(m_notify) = 0;
3404 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3405 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3406 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3407 	stradd->strchange_flags = flag;
3408 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3409 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3410 	stradd->strchange_instrms = numberin;
3411 	stradd->strchange_outstrms = numberout;
3412 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3413 	SCTP_BUF_NEXT(m_notify) = NULL;
3414 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3415 		/* no space */
3416 		sctp_m_freem(m_notify);
3417 		return;
3418 	}
3419 	/* append to socket */
3420 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3421 	    0, 0, stcb->asoc.context, 0, 0, 0,
3422 	    m_notify);
3423 	if (control == NULL) {
3424 		/* no memory */
3425 		sctp_m_freem(m_notify);
3426 		return;
3427 	}
3428 	control->length = SCTP_BUF_LEN(m_notify);
3429 	control->spec_flags = M_NOTIFICATION;
3430 	/* not that we need this */
3431 	control->tail_mbuf = m_notify;
3432 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3433 	    control,
3434 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3435 }
3436 
3437 void
3438 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3439 {
3440 	struct mbuf *m_notify;
3441 	struct sctp_queued_to_read *control;
3442 	struct sctp_assoc_reset_event *strasoc;
3443 
3444 	if ((stcb == NULL) ||
3445 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3446 		/* event not enabled */
3447 		return;
3448 	}
3449 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3450 	if (m_notify == NULL)
3451 		/* no space left */
3452 		return;
3453 	SCTP_BUF_LEN(m_notify) = 0;
3454 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3455 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3456 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3457 	strasoc->assocreset_flags = flag;
3458 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3459 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3460 	strasoc->assocreset_local_tsn = sending_tsn;
3461 	strasoc->assocreset_remote_tsn = recv_tsn;
3462 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3463 	SCTP_BUF_NEXT(m_notify) = NULL;
3464 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3465 		/* no space */
3466 		sctp_m_freem(m_notify);
3467 		return;
3468 	}
3469 	/* append to socket */
3470 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3471 	    0, 0, stcb->asoc.context, 0, 0, 0,
3472 	    m_notify);
3473 	if (control == NULL) {
3474 		/* no memory */
3475 		sctp_m_freem(m_notify);
3476 		return;
3477 	}
3478 	control->length = SCTP_BUF_LEN(m_notify);
3479 	control->spec_flags = M_NOTIFICATION;
3480 	/* not that we need this */
3481 	control->tail_mbuf = m_notify;
3482 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3483 	    control,
3484 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3485 }
3486 
3487 
3488 
3489 static void
3490 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3491     int number_entries, uint16_t *list, int flag)
3492 {
3493 	struct mbuf *m_notify;
3494 	struct sctp_queued_to_read *control;
3495 	struct sctp_stream_reset_event *strreset;
3496 	int len;
3497 
3498 	if ((stcb == NULL) ||
3499 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3500 		/* event not enabled */
3501 		return;
3502 	}
3503 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3504 	if (m_notify == NULL)
3505 		/* no space left */
3506 		return;
3507 	SCTP_BUF_LEN(m_notify) = 0;
3508 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3509 	if (len > M_TRAILINGSPACE(m_notify)) {
3510 		/* never enough room */
3511 		sctp_m_freem(m_notify);
3512 		return;
3513 	}
3514 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3515 	memset(strreset, 0, len);
3516 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3517 	strreset->strreset_flags = flag;
3518 	strreset->strreset_length = len;
3519 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3520 	if (number_entries) {
3521 		int i;
3522 
3523 		for (i = 0; i < number_entries; i++) {
3524 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3525 		}
3526 	}
3527 	SCTP_BUF_LEN(m_notify) = len;
3528 	SCTP_BUF_NEXT(m_notify) = NULL;
3529 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3530 		/* no space */
3531 		sctp_m_freem(m_notify);
3532 		return;
3533 	}
3534 	/* append to socket */
3535 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3536 	    0, 0, stcb->asoc.context, 0, 0, 0,
3537 	    m_notify);
3538 	if (control == NULL) {
3539 		/* no memory */
3540 		sctp_m_freem(m_notify);
3541 		return;
3542 	}
3543 	control->length = SCTP_BUF_LEN(m_notify);
3544 	control->spec_flags = M_NOTIFICATION;
3545 	/* not that we need this */
3546 	control->tail_mbuf = m_notify;
3547 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3548 	    control,
3549 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3550 }
3551 
3552 
3553 static void
3554 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3555 {
3556 	struct mbuf *m_notify;
3557 	struct sctp_remote_error *sre;
3558 	struct sctp_queued_to_read *control;
3559 	unsigned int notif_len;
3560 	uint16_t chunk_len;
3561 
3562 	if ((stcb == NULL) ||
3563 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3564 		return;
3565 	}
3566 	if (chunk != NULL) {
3567 		chunk_len = ntohs(chunk->ch.chunk_length);
3568 	} else {
3569 		chunk_len = 0;
3570 	}
3571 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3572 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3573 	if (m_notify == NULL) {
3574 		/* Retry with smaller value. */
3575 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3576 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3577 		if (m_notify == NULL) {
3578 			return;
3579 		}
3580 	}
3581 	SCTP_BUF_NEXT(m_notify) = NULL;
3582 	sre = mtod(m_notify, struct sctp_remote_error *);
3583 	memset(sre, 0, notif_len);
3584 	sre->sre_type = SCTP_REMOTE_ERROR;
3585 	sre->sre_flags = 0;
3586 	sre->sre_length = sizeof(struct sctp_remote_error);
3587 	sre->sre_error = error;
3588 	sre->sre_assoc_id = sctp_get_associd(stcb);
3589 	if (notif_len > sizeof(struct sctp_remote_error)) {
3590 		memcpy(sre->sre_data, chunk, chunk_len);
3591 		sre->sre_length += chunk_len;
3592 	}
3593 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3594 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3595 	    0, 0, stcb->asoc.context, 0, 0, 0,
3596 	    m_notify);
3597 	if (control != NULL) {
3598 		control->length = SCTP_BUF_LEN(m_notify);
3599 		control->spec_flags = M_NOTIFICATION;
3600 		/* not that we need this */
3601 		control->tail_mbuf = m_notify;
3602 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3603 		    control,
3604 		    &stcb->sctp_socket->so_rcv, 1,
3605 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3606 	} else {
3607 		sctp_m_freem(m_notify);
3608 	}
3609 }
3610 
3611 
3612 void
3613 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3614     uint32_t error, void *data, int so_locked
3615 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3616     SCTP_UNUSED
3617 #endif
3618 )
3619 {
3620 	if ((stcb == NULL) ||
3621 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3622 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3623 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3624 		/* If the socket is gone we are out of here */
3625 		return;
3626 	}
3627 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3628 		return;
3629 	}
3630 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3631 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3632 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3633 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3634 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3635 			/* Don't report these in front states */
3636 			return;
3637 		}
3638 	}
3639 	switch (notification) {
3640 	case SCTP_NOTIFY_ASSOC_UP:
3641 		if (stcb->asoc.assoc_up_sent == 0) {
3642 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3643 			stcb->asoc.assoc_up_sent = 1;
3644 		}
3645 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3646 			sctp_notify_adaptation_layer(stcb);
3647 		}
3648 		if (stcb->asoc.auth_supported == 0) {
3649 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3650 			    NULL, so_locked);
3651 		}
3652 		break;
3653 	case SCTP_NOTIFY_ASSOC_DOWN:
3654 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3655 		break;
3656 	case SCTP_NOTIFY_INTERFACE_DOWN:
3657 		{
3658 			struct sctp_nets *net;
3659 
3660 			net = (struct sctp_nets *)data;
3661 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3662 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3663 			break;
3664 		}
3665 	case SCTP_NOTIFY_INTERFACE_UP:
3666 		{
3667 			struct sctp_nets *net;
3668 
3669 			net = (struct sctp_nets *)data;
3670 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3671 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3672 			break;
3673 		}
3674 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3675 		{
3676 			struct sctp_nets *net;
3677 
3678 			net = (struct sctp_nets *)data;
3679 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3680 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3681 			break;
3682 		}
3683 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3684 		sctp_notify_send_failed2(stcb, error,
3685 		    (struct sctp_stream_queue_pending *)data, so_locked);
3686 		break;
3687 	case SCTP_NOTIFY_SENT_DG_FAIL:
3688 		sctp_notify_send_failed(stcb, 1, error,
3689 		    (struct sctp_tmit_chunk *)data, so_locked);
3690 		break;
3691 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3692 		sctp_notify_send_failed(stcb, 0, error,
3693 		    (struct sctp_tmit_chunk *)data, so_locked);
3694 		break;
3695 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3696 		{
3697 			uint32_t val;
3698 
3699 			val = *((uint32_t *)data);
3700 
3701 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3702 			break;
3703 		}
3704 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3705 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3706 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3707 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3708 		} else {
3709 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3710 		}
3711 		break;
3712 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3713 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3714 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3715 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3716 		} else {
3717 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3718 		}
3719 		break;
3720 	case SCTP_NOTIFY_ASSOC_RESTART:
3721 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3722 		if (stcb->asoc.auth_supported == 0) {
3723 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3724 			    NULL, so_locked);
3725 		}
3726 		break;
3727 	case SCTP_NOTIFY_STR_RESET_SEND:
3728 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3729 		break;
3730 	case SCTP_NOTIFY_STR_RESET_RECV:
3731 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3732 		break;
3733 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3734 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3735 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3736 		break;
3737 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3738 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3739 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3740 		break;
3741 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3742 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3743 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3744 		break;
3745 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3746 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3747 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3748 		break;
3749 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3750 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3751 		    error, so_locked);
3752 		break;
3753 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3754 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3755 		    error, so_locked);
3756 		break;
3757 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3758 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3759 		    error, so_locked);
3760 		break;
3761 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3762 		sctp_notify_shutdown_event(stcb);
3763 		break;
3764 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3765 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3766 		    (uint16_t)(uintptr_t)data,
3767 		    so_locked);
3768 		break;
3769 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3770 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3771 		    (uint16_t)(uintptr_t)data,
3772 		    so_locked);
3773 		break;
3774 	case SCTP_NOTIFY_NO_PEER_AUTH:
3775 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3776 		    (uint16_t)(uintptr_t)data,
3777 		    so_locked);
3778 		break;
3779 	case SCTP_NOTIFY_SENDER_DRY:
3780 		sctp_notify_sender_dry_event(stcb, so_locked);
3781 		break;
3782 	case SCTP_NOTIFY_REMOTE_ERROR:
3783 		sctp_notify_remote_error(stcb, error, data);
3784 		break;
3785 	default:
3786 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3787 		    __func__, notification, notification);
3788 		break;
3789 	}			/* end switch */
3790 }
3791 
3792 void
3793 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3794 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3795     SCTP_UNUSED
3796 #endif
3797 )
3798 {
3799 	struct sctp_association *asoc;
3800 	struct sctp_stream_out *outs;
3801 	struct sctp_tmit_chunk *chk, *nchk;
3802 	struct sctp_stream_queue_pending *sp, *nsp;
3803 	int i;
3804 
3805 	if (stcb == NULL) {
3806 		return;
3807 	}
3808 	asoc = &stcb->asoc;
3809 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3810 		/* already being freed */
3811 		return;
3812 	}
3813 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3814 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3815 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3816 		return;
3817 	}
3818 	/* now through all the gunk freeing chunks */
3819 	if (holds_lock == 0) {
3820 		SCTP_TCB_SEND_LOCK(stcb);
3821 	}
3822 	/* sent queue SHOULD be empty */
3823 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3824 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3825 		asoc->sent_queue_cnt--;
3826 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3827 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3828 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3829 #ifdef INVARIANTS
3830 			} else {
3831 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3832 #endif
3833 			}
3834 		}
3835 		if (chk->data != NULL) {
3836 			sctp_free_bufspace(stcb, asoc, chk, 1);
3837 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3838 			    error, chk, so_locked);
3839 			if (chk->data) {
3840 				sctp_m_freem(chk->data);
3841 				chk->data = NULL;
3842 			}
3843 		}
3844 		sctp_free_a_chunk(stcb, chk, so_locked);
3845 		/* sa_ignore FREED_MEMORY */
3846 	}
3847 	/* pending send queue SHOULD be empty */
3848 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3849 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3850 		asoc->send_queue_cnt--;
3851 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3852 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3853 #ifdef INVARIANTS
3854 		} else {
3855 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3856 #endif
3857 		}
3858 		if (chk->data != NULL) {
3859 			sctp_free_bufspace(stcb, asoc, chk, 1);
3860 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3861 			    error, chk, so_locked);
3862 			if (chk->data) {
3863 				sctp_m_freem(chk->data);
3864 				chk->data = NULL;
3865 			}
3866 		}
3867 		sctp_free_a_chunk(stcb, chk, so_locked);
3868 		/* sa_ignore FREED_MEMORY */
3869 	}
3870 	for (i = 0; i < asoc->streamoutcnt; i++) {
3871 		/* For each stream */
3872 		outs = &asoc->strmout[i];
3873 		/* clean up any sends there */
3874 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3875 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
3876 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3877 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock);
3878 			sctp_free_spbufspace(stcb, asoc, sp);
3879 			if (sp->data) {
3880 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3881 				    error, (void *)sp, so_locked);
3882 				if (sp->data) {
3883 					sctp_m_freem(sp->data);
3884 					sp->data = NULL;
3885 					sp->tail_mbuf = NULL;
3886 					sp->length = 0;
3887 				}
3888 			}
3889 			if (sp->net) {
3890 				sctp_free_remote_addr(sp->net);
3891 				sp->net = NULL;
3892 			}
3893 			/* Free the chunk */
3894 			sctp_free_a_strmoq(stcb, sp, so_locked);
3895 			/* sa_ignore FREED_MEMORY */
3896 		}
3897 	}
3898 
3899 	if (holds_lock == 0) {
3900 		SCTP_TCB_SEND_UNLOCK(stcb);
3901 	}
3902 }
3903 
3904 void
3905 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3906     struct sctp_abort_chunk *abort, int so_locked
3907 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3908     SCTP_UNUSED
3909 #endif
3910 )
3911 {
3912 	if (stcb == NULL) {
3913 		return;
3914 	}
3915 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3916 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3917 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3918 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3919 	}
3920 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3921 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3922 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3923 		return;
3924 	}
3925 	/* Tell them we lost the asoc */
3926 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3927 	if (from_peer) {
3928 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3929 	} else {
3930 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3931 	}
3932 }
3933 
3934 void
3935 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3936     struct mbuf *m, int iphlen,
3937     struct sockaddr *src, struct sockaddr *dst,
3938     struct sctphdr *sh, struct mbuf *op_err,
3939     uint8_t mflowtype, uint32_t mflowid,
3940     uint32_t vrf_id, uint16_t port)
3941 {
3942 	uint32_t vtag;
3943 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3944 	struct socket *so;
3945 #endif
3946 
3947 	vtag = 0;
3948 	if (stcb != NULL) {
3949 		vtag = stcb->asoc.peer_vtag;
3950 		vrf_id = stcb->asoc.vrf_id;
3951 	}
3952 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3953 	    mflowtype, mflowid, inp->fibnum,
3954 	    vrf_id, port);
3955 	if (stcb != NULL) {
3956 		/* We have a TCB to abort, send notification too */
3957 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3958 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3959 		/* Ok, now lets free it */
3960 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3961 		so = SCTP_INP_SO(inp);
3962 		atomic_add_int(&stcb->asoc.refcnt, 1);
3963 		SCTP_TCB_UNLOCK(stcb);
3964 		SCTP_SOCKET_LOCK(so, 1);
3965 		SCTP_TCB_LOCK(stcb);
3966 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3967 #endif
3968 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3969 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3970 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3971 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3972 		}
3973 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
3974 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3975 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3976 		SCTP_SOCKET_UNLOCK(so, 1);
3977 #endif
3978 	}
3979 }
3980 #ifdef SCTP_ASOCLOG_OF_TSNS
3981 void
3982 sctp_print_out_track_log(struct sctp_tcb *stcb)
3983 {
3984 #ifdef NOSIY_PRINTS
3985 	int i;
3986 
3987 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3988 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3989 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3990 		SCTP_PRINTF("None rcvd\n");
3991 		goto none_in;
3992 	}
3993 	if (stcb->asoc.tsn_in_wrapped) {
3994 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3995 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3996 			    stcb->asoc.in_tsnlog[i].tsn,
3997 			    stcb->asoc.in_tsnlog[i].strm,
3998 			    stcb->asoc.in_tsnlog[i].seq,
3999 			    stcb->asoc.in_tsnlog[i].flgs,
4000 			    stcb->asoc.in_tsnlog[i].sz);
4001 		}
4002 	}
4003 	if (stcb->asoc.tsn_in_at) {
4004 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4005 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4006 			    stcb->asoc.in_tsnlog[i].tsn,
4007 			    stcb->asoc.in_tsnlog[i].strm,
4008 			    stcb->asoc.in_tsnlog[i].seq,
4009 			    stcb->asoc.in_tsnlog[i].flgs,
4010 			    stcb->asoc.in_tsnlog[i].sz);
4011 		}
4012 	}
4013 none_in:
4014 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4015 	if ((stcb->asoc.tsn_out_at == 0) &&
4016 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4017 		SCTP_PRINTF("None sent\n");
4018 	}
4019 	if (stcb->asoc.tsn_out_wrapped) {
4020 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4021 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4022 			    stcb->asoc.out_tsnlog[i].tsn,
4023 			    stcb->asoc.out_tsnlog[i].strm,
4024 			    stcb->asoc.out_tsnlog[i].seq,
4025 			    stcb->asoc.out_tsnlog[i].flgs,
4026 			    stcb->asoc.out_tsnlog[i].sz);
4027 		}
4028 	}
4029 	if (stcb->asoc.tsn_out_at) {
4030 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4031 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4032 			    stcb->asoc.out_tsnlog[i].tsn,
4033 			    stcb->asoc.out_tsnlog[i].strm,
4034 			    stcb->asoc.out_tsnlog[i].seq,
4035 			    stcb->asoc.out_tsnlog[i].flgs,
4036 			    stcb->asoc.out_tsnlog[i].sz);
4037 		}
4038 	}
4039 #endif
4040 }
4041 #endif
4042 
4043 void
4044 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4045     struct mbuf *op_err,
4046     int so_locked
4047 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4048     SCTP_UNUSED
4049 #endif
4050 )
4051 {
4052 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4053 	struct socket *so;
4054 #endif
4055 
4056 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4057 	so = SCTP_INP_SO(inp);
4058 #endif
4059 	if (stcb == NULL) {
4060 		/* Got to have a TCB */
4061 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4062 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4063 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4064 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4065 			}
4066 		}
4067 		return;
4068 	} else {
4069 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4070 	}
4071 	/* notify the peer */
4072 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4073 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4074 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4075 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4076 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4077 	}
4078 	/* notify the ulp */
4079 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4080 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4081 	}
4082 	/* now free the asoc */
4083 #ifdef SCTP_ASOCLOG_OF_TSNS
4084 	sctp_print_out_track_log(stcb);
4085 #endif
4086 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4087 	if (!so_locked) {
4088 		atomic_add_int(&stcb->asoc.refcnt, 1);
4089 		SCTP_TCB_UNLOCK(stcb);
4090 		SCTP_SOCKET_LOCK(so, 1);
4091 		SCTP_TCB_LOCK(stcb);
4092 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4093 	}
4094 #endif
4095 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4096 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4097 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4098 	if (!so_locked) {
4099 		SCTP_SOCKET_UNLOCK(so, 1);
4100 	}
4101 #endif
4102 }
4103 
4104 void
4105 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4106     struct sockaddr *src, struct sockaddr *dst,
4107     struct sctphdr *sh, struct sctp_inpcb *inp,
4108     struct mbuf *cause,
4109     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4110     uint32_t vrf_id, uint16_t port)
4111 {
4112 	struct sctp_chunkhdr *ch, chunk_buf;
4113 	unsigned int chk_length;
4114 	int contains_init_chunk;
4115 
4116 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4117 	/* Generate a TO address for future reference */
4118 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4119 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4120 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4121 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4122 		}
4123 	}
4124 	contains_init_chunk = 0;
4125 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4126 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4127 	while (ch != NULL) {
4128 		chk_length = ntohs(ch->chunk_length);
4129 		if (chk_length < sizeof(*ch)) {
4130 			/* break to abort land */
4131 			break;
4132 		}
4133 		switch (ch->chunk_type) {
4134 		case SCTP_INIT:
4135 			contains_init_chunk = 1;
4136 			break;
4137 		case SCTP_PACKET_DROPPED:
4138 			/* we don't respond to pkt-dropped */
4139 			return;
4140 		case SCTP_ABORT_ASSOCIATION:
4141 			/* we don't respond with an ABORT to an ABORT */
4142 			return;
4143 		case SCTP_SHUTDOWN_COMPLETE:
4144 			/*
4145 			 * we ignore it since we are not waiting for it and
4146 			 * peer is gone
4147 			 */
4148 			return;
4149 		case SCTP_SHUTDOWN_ACK:
4150 			sctp_send_shutdown_complete2(src, dst, sh,
4151 			    mflowtype, mflowid, fibnum,
4152 			    vrf_id, port);
4153 			return;
4154 		default:
4155 			break;
4156 		}
4157 		offset += SCTP_SIZE32(chk_length);
4158 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4159 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4160 	}
4161 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4162 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4163 	    (contains_init_chunk == 0))) {
4164 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4165 		    mflowtype, mflowid, fibnum,
4166 		    vrf_id, port);
4167 	}
4168 }
4169 
4170 /*
4171  * check the inbound datagram to make sure there is not an abort inside it,
4172  * if there is return 1, else return 0.
4173  */
4174 int
4175 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4176 {
4177 	struct sctp_chunkhdr *ch;
4178 	struct sctp_init_chunk *init_chk, chunk_buf;
4179 	int offset;
4180 	unsigned int chk_length;
4181 
4182 	offset = iphlen + sizeof(struct sctphdr);
4183 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4184 	    (uint8_t *)&chunk_buf);
4185 	while (ch != NULL) {
4186 		chk_length = ntohs(ch->chunk_length);
4187 		if (chk_length < sizeof(*ch)) {
4188 			/* packet is probably corrupt */
4189 			break;
4190 		}
4191 		/* we seem to be ok, is it an abort? */
4192 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4193 			/* yep, tell them */
4194 			return (1);
4195 		}
4196 		if (ch->chunk_type == SCTP_INITIATION) {
4197 			/* need to update the Vtag */
4198 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4199 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4200 			if (init_chk != NULL) {
4201 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4202 			}
4203 		}
4204 		/* Nope, move to the next chunk */
4205 		offset += SCTP_SIZE32(chk_length);
4206 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4207 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4208 	}
4209 	return (0);
4210 }
4211 
4212 /*
4213  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4214  * set (i.e. it's 0) so, create this function to compare link local scopes
4215  */
4216 #ifdef INET6
4217 uint32_t
4218 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4219 {
4220 	struct sockaddr_in6 a, b;
4221 
4222 	/* save copies */
4223 	a = *addr1;
4224 	b = *addr2;
4225 
4226 	if (a.sin6_scope_id == 0)
4227 		if (sa6_recoverscope(&a)) {
4228 			/* can't get scope, so can't match */
4229 			return (0);
4230 		}
4231 	if (b.sin6_scope_id == 0)
4232 		if (sa6_recoverscope(&b)) {
4233 			/* can't get scope, so can't match */
4234 			return (0);
4235 		}
4236 	if (a.sin6_scope_id != b.sin6_scope_id)
4237 		return (0);
4238 
4239 	return (1);
4240 }
4241 
4242 /*
4243  * returns a sockaddr_in6 with embedded scope recovered and removed
4244  */
4245 struct sockaddr_in6 *
4246 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4247 {
4248 	/* check and strip embedded scope junk */
4249 	if (addr->sin6_family == AF_INET6) {
4250 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4251 			if (addr->sin6_scope_id == 0) {
4252 				*store = *addr;
4253 				if (!sa6_recoverscope(store)) {
4254 					/* use the recovered scope */
4255 					addr = store;
4256 				}
4257 			} else {
4258 				/* else, return the original "to" addr */
4259 				in6_clearscope(&addr->sin6_addr);
4260 			}
4261 		}
4262 	}
4263 	return (addr);
4264 }
4265 #endif
4266 
4267 /*
4268  * are the two addresses the same?  currently a "scopeless" check returns: 1
4269  * if same, 0 if not
4270  */
4271 int
4272 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4273 {
4274 
4275 	/* must be valid */
4276 	if (sa1 == NULL || sa2 == NULL)
4277 		return (0);
4278 
4279 	/* must be the same family */
4280 	if (sa1->sa_family != sa2->sa_family)
4281 		return (0);
4282 
4283 	switch (sa1->sa_family) {
4284 #ifdef INET6
4285 	case AF_INET6:
4286 		{
4287 			/* IPv6 addresses */
4288 			struct sockaddr_in6 *sin6_1, *sin6_2;
4289 
4290 			sin6_1 = (struct sockaddr_in6 *)sa1;
4291 			sin6_2 = (struct sockaddr_in6 *)sa2;
4292 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4293 			    sin6_2));
4294 		}
4295 #endif
4296 #ifdef INET
4297 	case AF_INET:
4298 		{
4299 			/* IPv4 addresses */
4300 			struct sockaddr_in *sin_1, *sin_2;
4301 
4302 			sin_1 = (struct sockaddr_in *)sa1;
4303 			sin_2 = (struct sockaddr_in *)sa2;
4304 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4305 		}
4306 #endif
4307 	default:
4308 		/* we don't do these... */
4309 		return (0);
4310 	}
4311 }
4312 
4313 void
4314 sctp_print_address(struct sockaddr *sa)
4315 {
4316 #ifdef INET6
4317 	char ip6buf[INET6_ADDRSTRLEN];
4318 #endif
4319 
4320 	switch (sa->sa_family) {
4321 #ifdef INET6
4322 	case AF_INET6:
4323 		{
4324 			struct sockaddr_in6 *sin6;
4325 
4326 			sin6 = (struct sockaddr_in6 *)sa;
4327 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4328 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4329 			    ntohs(sin6->sin6_port),
4330 			    sin6->sin6_scope_id);
4331 			break;
4332 		}
4333 #endif
4334 #ifdef INET
4335 	case AF_INET:
4336 		{
4337 			struct sockaddr_in *sin;
4338 			unsigned char *p;
4339 
4340 			sin = (struct sockaddr_in *)sa;
4341 			p = (unsigned char *)&sin->sin_addr;
4342 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4343 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4344 			break;
4345 		}
4346 #endif
4347 	default:
4348 		SCTP_PRINTF("?\n");
4349 		break;
4350 	}
4351 }
4352 
4353 void
4354 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4355     struct sctp_inpcb *new_inp,
4356     struct sctp_tcb *stcb,
4357     int waitflags)
4358 {
4359 	/*
4360 	 * go through our old INP and pull off any control structures that
4361 	 * belong to stcb and move then to the new inp.
4362 	 */
4363 	struct socket *old_so, *new_so;
4364 	struct sctp_queued_to_read *control, *nctl;
4365 	struct sctp_readhead tmp_queue;
4366 	struct mbuf *m;
4367 	int error = 0;
4368 
4369 	old_so = old_inp->sctp_socket;
4370 	new_so = new_inp->sctp_socket;
4371 	TAILQ_INIT(&tmp_queue);
4372 	error = sblock(&old_so->so_rcv, waitflags);
4373 	if (error) {
4374 		/*
4375 		 * Gak, can't get sblock, we have a problem. data will be
4376 		 * left stranded.. and we don't dare look at it since the
4377 		 * other thread may be reading something. Oh well, its a
4378 		 * screwed up app that does a peeloff OR a accept while
4379 		 * reading from the main socket... actually its only the
4380 		 * peeloff() case, since I think read will fail on a
4381 		 * listening socket..
4382 		 */
4383 		return;
4384 	}
4385 	/* lock the socket buffers */
4386 	SCTP_INP_READ_LOCK(old_inp);
4387 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4388 		/* Pull off all for out target stcb */
4389 		if (control->stcb == stcb) {
4390 			/* remove it we want it */
4391 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4392 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4393 			m = control->data;
4394 			while (m) {
4395 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4396 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4397 				}
4398 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4399 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4400 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4401 				}
4402 				m = SCTP_BUF_NEXT(m);
4403 			}
4404 		}
4405 	}
4406 	SCTP_INP_READ_UNLOCK(old_inp);
4407 	/* Remove the sb-lock on the old socket */
4408 
4409 	sbunlock(&old_so->so_rcv);
4410 	/* Now we move them over to the new socket buffer */
4411 	SCTP_INP_READ_LOCK(new_inp);
4412 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4413 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4414 		m = control->data;
4415 		while (m) {
4416 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4417 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4418 			}
4419 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4420 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4421 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4422 			}
4423 			m = SCTP_BUF_NEXT(m);
4424 		}
4425 	}
4426 	SCTP_INP_READ_UNLOCK(new_inp);
4427 }
4428 
4429 void
4430 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4431     struct sctp_tcb *stcb,
4432     int so_locked
4433 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4434     SCTP_UNUSED
4435 #endif
4436 )
4437 {
4438 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4439 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4440 		struct socket *so;
4441 
4442 		so = SCTP_INP_SO(inp);
4443 		if (!so_locked) {
4444 			if (stcb) {
4445 				atomic_add_int(&stcb->asoc.refcnt, 1);
4446 				SCTP_TCB_UNLOCK(stcb);
4447 			}
4448 			SCTP_SOCKET_LOCK(so, 1);
4449 			if (stcb) {
4450 				SCTP_TCB_LOCK(stcb);
4451 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4452 			}
4453 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4454 				SCTP_SOCKET_UNLOCK(so, 1);
4455 				return;
4456 			}
4457 		}
4458 #endif
4459 		sctp_sorwakeup(inp, inp->sctp_socket);
4460 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4461 		if (!so_locked) {
4462 			SCTP_SOCKET_UNLOCK(so, 1);
4463 		}
4464 #endif
4465 	}
4466 }
4467 
4468 void
4469 sctp_add_to_readq(struct sctp_inpcb *inp,
4470     struct sctp_tcb *stcb,
4471     struct sctp_queued_to_read *control,
4472     struct sockbuf *sb,
4473     int end,
4474     int inp_read_lock_held,
4475     int so_locked
4476 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4477     SCTP_UNUSED
4478 #endif
4479 )
4480 {
4481 	/*
4482 	 * Here we must place the control on the end of the socket read
4483 	 * queue AND increment sb_cc so that select will work properly on
4484 	 * read.
4485 	 */
4486 	struct mbuf *m, *prev = NULL;
4487 
4488 	if (inp == NULL) {
4489 		/* Gak, TSNH!! */
4490 #ifdef INVARIANTS
4491 		panic("Gak, inp NULL on add_to_readq");
4492 #endif
4493 		return;
4494 	}
4495 	if (inp_read_lock_held == 0)
4496 		SCTP_INP_READ_LOCK(inp);
4497 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4498 		sctp_free_remote_addr(control->whoFrom);
4499 		if (control->data) {
4500 			sctp_m_freem(control->data);
4501 			control->data = NULL;
4502 		}
4503 		sctp_free_a_readq(stcb, control);
4504 		if (inp_read_lock_held == 0)
4505 			SCTP_INP_READ_UNLOCK(inp);
4506 		return;
4507 	}
4508 	if (!(control->spec_flags & M_NOTIFICATION)) {
4509 		atomic_add_int(&inp->total_recvs, 1);
4510 		if (!control->do_not_ref_stcb) {
4511 			atomic_add_int(&stcb->total_recvs, 1);
4512 		}
4513 	}
4514 	m = control->data;
4515 	control->held_length = 0;
4516 	control->length = 0;
4517 	while (m) {
4518 		if (SCTP_BUF_LEN(m) == 0) {
4519 			/* Skip mbufs with NO length */
4520 			if (prev == NULL) {
4521 				/* First one */
4522 				control->data = sctp_m_free(m);
4523 				m = control->data;
4524 			} else {
4525 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4526 				m = SCTP_BUF_NEXT(prev);
4527 			}
4528 			if (m == NULL) {
4529 				control->tail_mbuf = prev;
4530 			}
4531 			continue;
4532 		}
4533 		prev = m;
4534 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4535 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4536 		}
4537 		sctp_sballoc(stcb, sb, m);
4538 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4539 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4540 		}
4541 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4542 		m = SCTP_BUF_NEXT(m);
4543 	}
4544 	if (prev != NULL) {
4545 		control->tail_mbuf = prev;
4546 	} else {
4547 		/* Everything got collapsed out?? */
4548 		sctp_free_remote_addr(control->whoFrom);
4549 		sctp_free_a_readq(stcb, control);
4550 		if (inp_read_lock_held == 0)
4551 			SCTP_INP_READ_UNLOCK(inp);
4552 		return;
4553 	}
4554 	if (end) {
4555 		control->end_added = 1;
4556 	}
4557 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4558 	control->on_read_q = 1;
4559 	if (inp_read_lock_held == 0)
4560 		SCTP_INP_READ_UNLOCK(inp);
4561 	if (inp && inp->sctp_socket) {
4562 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4563 	}
4564 }
4565 
4566 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4567  *************ALTERNATE ROUTING CODE
4568  */
4569 
4570 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4571  *************ALTERNATE ROUTING CODE
4572  */
4573 
4574 struct mbuf *
4575 sctp_generate_cause(uint16_t code, char *info)
4576 {
4577 	struct mbuf *m;
4578 	struct sctp_gen_error_cause *cause;
4579 	size_t info_len;
4580 	uint16_t len;
4581 
4582 	if ((code == 0) || (info == NULL)) {
4583 		return (NULL);
4584 	}
4585 	info_len = strlen(info);
4586 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4587 		return (NULL);
4588 	}
4589 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4590 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4591 	if (m != NULL) {
4592 		SCTP_BUF_LEN(m) = len;
4593 		cause = mtod(m, struct sctp_gen_error_cause *);
4594 		cause->code = htons(code);
4595 		cause->length = htons(len);
4596 		memcpy(cause->info, info, info_len);
4597 	}
4598 	return (m);
4599 }
4600 
4601 struct mbuf *
4602 sctp_generate_no_user_data_cause(uint32_t tsn)
4603 {
4604 	struct mbuf *m;
4605 	struct sctp_error_no_user_data *no_user_data_cause;
4606 	uint16_t len;
4607 
4608 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4609 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4610 	if (m != NULL) {
4611 		SCTP_BUF_LEN(m) = len;
4612 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4613 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4614 		no_user_data_cause->cause.length = htons(len);
4615 		no_user_data_cause->tsn = htonl(tsn);
4616 	}
4617 	return (m);
4618 }
4619 
4620 #ifdef SCTP_MBCNT_LOGGING
4621 void
4622 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4623     struct sctp_tmit_chunk *tp1, int chk_cnt)
4624 {
4625 	if (tp1->data == NULL) {
4626 		return;
4627 	}
4628 	asoc->chunks_on_out_queue -= chk_cnt;
4629 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4630 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4631 		    asoc->total_output_queue_size,
4632 		    tp1->book_size,
4633 		    0,
4634 		    tp1->mbcnt);
4635 	}
4636 	if (asoc->total_output_queue_size >= tp1->book_size) {
4637 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4638 	} else {
4639 		asoc->total_output_queue_size = 0;
4640 	}
4641 
4642 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4643 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4644 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4645 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4646 		} else {
4647 			stcb->sctp_socket->so_snd.sb_cc = 0;
4648 
4649 		}
4650 	}
4651 }
4652 
4653 #endif
4654 
4655 int
4656 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4657     uint8_t sent, int so_locked
4658 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4659     SCTP_UNUSED
4660 #endif
4661 )
4662 {
4663 	struct sctp_stream_out *strq;
4664 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4665 	struct sctp_stream_queue_pending *sp;
4666 	uint32_t mid;
4667 	uint16_t sid;
4668 	uint8_t foundeom = 0;
4669 	int ret_sz = 0;
4670 	int notdone;
4671 	int do_wakeup_routine = 0;
4672 
4673 	sid = tp1->rec.data.sid;
4674 	mid = tp1->rec.data.mid;
4675 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4676 		stcb->asoc.abandoned_sent[0]++;
4677 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4678 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
4679 #if defined(SCTP_DETAILED_STR_STATS)
4680 		stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4681 #endif
4682 	} else {
4683 		stcb->asoc.abandoned_unsent[0]++;
4684 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4685 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4686 #if defined(SCTP_DETAILED_STR_STATS)
4687 		stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4688 #endif
4689 	}
4690 	do {
4691 		ret_sz += tp1->book_size;
4692 		if (tp1->data != NULL) {
4693 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4694 				sctp_flight_size_decrease(tp1);
4695 				sctp_total_flight_decrease(stcb, tp1);
4696 			}
4697 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4698 			stcb->asoc.peers_rwnd += tp1->send_size;
4699 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4700 			if (sent) {
4701 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4702 			} else {
4703 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4704 			}
4705 			if (tp1->data) {
4706 				sctp_m_freem(tp1->data);
4707 				tp1->data = NULL;
4708 			}
4709 			do_wakeup_routine = 1;
4710 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4711 				stcb->asoc.sent_queue_cnt_removeable--;
4712 			}
4713 		}
4714 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4715 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4716 		    SCTP_DATA_NOT_FRAG) {
4717 			/* not frag'ed we ae done   */
4718 			notdone = 0;
4719 			foundeom = 1;
4720 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4721 			/* end of frag, we are done */
4722 			notdone = 0;
4723 			foundeom = 1;
4724 		} else {
4725 			/*
4726 			 * Its a begin or middle piece, we must mark all of
4727 			 * it
4728 			 */
4729 			notdone = 1;
4730 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4731 		}
4732 	} while (tp1 && notdone);
4733 	if (foundeom == 0) {
4734 		/*
4735 		 * The multi-part message was scattered across the send and
4736 		 * sent queue.
4737 		 */
4738 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4739 			if ((tp1->rec.data.sid != sid) ||
4740 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4741 				break;
4742 			}
4743 			/*
4744 			 * save to chk in case we have some on stream out
4745 			 * queue. If so and we have an un-transmitted one we
4746 			 * don't have to fudge the TSN.
4747 			 */
4748 			chk = tp1;
4749 			ret_sz += tp1->book_size;
4750 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4751 			if (sent) {
4752 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4753 			} else {
4754 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4755 			}
4756 			if (tp1->data) {
4757 				sctp_m_freem(tp1->data);
4758 				tp1->data = NULL;
4759 			}
4760 			/* No flight involved here book the size to 0 */
4761 			tp1->book_size = 0;
4762 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4763 				foundeom = 1;
4764 			}
4765 			do_wakeup_routine = 1;
4766 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4767 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4768 			/*
4769 			 * on to the sent queue so we can wait for it to be
4770 			 * passed by.
4771 			 */
4772 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4773 			    sctp_next);
4774 			stcb->asoc.send_queue_cnt--;
4775 			stcb->asoc.sent_queue_cnt++;
4776 		}
4777 	}
4778 	if (foundeom == 0) {
4779 		/*
4780 		 * Still no eom found. That means there is stuff left on the
4781 		 * stream out queue.. yuck.
4782 		 */
4783 		SCTP_TCB_SEND_LOCK(stcb);
4784 		strq = &stcb->asoc.strmout[sid];
4785 		sp = TAILQ_FIRST(&strq->outqueue);
4786 		if (sp != NULL) {
4787 			sp->discard_rest = 1;
4788 			/*
4789 			 * We may need to put a chunk on the queue that
4790 			 * holds the TSN that would have been sent with the
4791 			 * LAST bit.
4792 			 */
4793 			if (chk == NULL) {
4794 				/* Yep, we have to */
4795 				sctp_alloc_a_chunk(stcb, chk);
4796 				if (chk == NULL) {
4797 					/*
4798 					 * we are hosed. All we can do is
4799 					 * nothing.. which will cause an
4800 					 * abort if the peer is paying
4801 					 * attention.
4802 					 */
4803 					goto oh_well;
4804 				}
4805 				memset(chk, 0, sizeof(*chk));
4806 				chk->rec.data.rcv_flags = 0;
4807 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4808 				chk->asoc = &stcb->asoc;
4809 				if (stcb->asoc.idata_supported == 0) {
4810 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4811 						chk->rec.data.mid = 0;
4812 					} else {
4813 						chk->rec.data.mid = strq->next_mid_ordered;
4814 					}
4815 				} else {
4816 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4817 						chk->rec.data.mid = strq->next_mid_unordered;
4818 					} else {
4819 						chk->rec.data.mid = strq->next_mid_ordered;
4820 					}
4821 				}
4822 				chk->rec.data.sid = sp->sid;
4823 				chk->rec.data.ppid = sp->ppid;
4824 				chk->rec.data.context = sp->context;
4825 				chk->flags = sp->act_flags;
4826 				chk->whoTo = NULL;
4827 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4828 				strq->chunks_on_queues++;
4829 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4830 				stcb->asoc.sent_queue_cnt++;
4831 				stcb->asoc.pr_sctp_cnt++;
4832 			}
4833 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4834 			if (sp->sinfo_flags & SCTP_UNORDERED) {
4835 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4836 			}
4837 			if (stcb->asoc.idata_supported == 0) {
4838 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4839 					strq->next_mid_ordered++;
4840 				}
4841 			} else {
4842 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4843 					strq->next_mid_unordered++;
4844 				} else {
4845 					strq->next_mid_ordered++;
4846 				}
4847 			}
4848 	oh_well:
4849 			if (sp->data) {
4850 				/*
4851 				 * Pull any data to free up the SB and allow
4852 				 * sender to "add more" while we will throw
4853 				 * away :-)
4854 				 */
4855 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4856 				ret_sz += sp->length;
4857 				do_wakeup_routine = 1;
4858 				sp->some_taken = 1;
4859 				sctp_m_freem(sp->data);
4860 				sp->data = NULL;
4861 				sp->tail_mbuf = NULL;
4862 				sp->length = 0;
4863 			}
4864 		}
4865 		SCTP_TCB_SEND_UNLOCK(stcb);
4866 	}
4867 	if (do_wakeup_routine) {
4868 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4869 		struct socket *so;
4870 
4871 		so = SCTP_INP_SO(stcb->sctp_ep);
4872 		if (!so_locked) {
4873 			atomic_add_int(&stcb->asoc.refcnt, 1);
4874 			SCTP_TCB_UNLOCK(stcb);
4875 			SCTP_SOCKET_LOCK(so, 1);
4876 			SCTP_TCB_LOCK(stcb);
4877 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4878 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4879 				/* assoc was freed while we were unlocked */
4880 				SCTP_SOCKET_UNLOCK(so, 1);
4881 				return (ret_sz);
4882 			}
4883 		}
4884 #endif
4885 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4886 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4887 		if (!so_locked) {
4888 			SCTP_SOCKET_UNLOCK(so, 1);
4889 		}
4890 #endif
4891 	}
4892 	return (ret_sz);
4893 }
4894 
4895 /*
4896  * checks to see if the given address, sa, is one that is currently known by
4897  * the kernel note: can't distinguish the same address on multiple interfaces
4898  * and doesn't handle multiple addresses with different zone/scope id's note:
4899  * ifa_ifwithaddr() compares the entire sockaddr struct
4900  */
4901 struct sctp_ifa *
4902 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4903     int holds_lock)
4904 {
4905 	struct sctp_laddr *laddr;
4906 
4907 	if (holds_lock == 0) {
4908 		SCTP_INP_RLOCK(inp);
4909 	}
4910 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4911 		if (laddr->ifa == NULL)
4912 			continue;
4913 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4914 			continue;
4915 #ifdef INET
4916 		if (addr->sa_family == AF_INET) {
4917 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4918 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4919 				/* found him. */
4920 				if (holds_lock == 0) {
4921 					SCTP_INP_RUNLOCK(inp);
4922 				}
4923 				return (laddr->ifa);
4924 				break;
4925 			}
4926 		}
4927 #endif
4928 #ifdef INET6
4929 		if (addr->sa_family == AF_INET6) {
4930 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4931 			    &laddr->ifa->address.sin6)) {
4932 				/* found him. */
4933 				if (holds_lock == 0) {
4934 					SCTP_INP_RUNLOCK(inp);
4935 				}
4936 				return (laddr->ifa);
4937 				break;
4938 			}
4939 		}
4940 #endif
4941 	}
4942 	if (holds_lock == 0) {
4943 		SCTP_INP_RUNLOCK(inp);
4944 	}
4945 	return (NULL);
4946 }
4947 
4948 uint32_t
4949 sctp_get_ifa_hash_val(struct sockaddr *addr)
4950 {
4951 	switch (addr->sa_family) {
4952 #ifdef INET
4953 	case AF_INET:
4954 		{
4955 			struct sockaddr_in *sin;
4956 
4957 			sin = (struct sockaddr_in *)addr;
4958 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4959 		}
4960 #endif
4961 #ifdef INET6
4962 	case AF_INET6:
4963 		{
4964 			struct sockaddr_in6 *sin6;
4965 			uint32_t hash_of_addr;
4966 
4967 			sin6 = (struct sockaddr_in6 *)addr;
4968 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4969 			    sin6->sin6_addr.s6_addr32[1] +
4970 			    sin6->sin6_addr.s6_addr32[2] +
4971 			    sin6->sin6_addr.s6_addr32[3]);
4972 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4973 			return (hash_of_addr);
4974 		}
4975 #endif
4976 	default:
4977 		break;
4978 	}
4979 	return (0);
4980 }
4981 
4982 struct sctp_ifa *
4983 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4984 {
4985 	struct sctp_ifa *sctp_ifap;
4986 	struct sctp_vrf *vrf;
4987 	struct sctp_ifalist *hash_head;
4988 	uint32_t hash_of_addr;
4989 
4990 	if (holds_lock == 0)
4991 		SCTP_IPI_ADDR_RLOCK();
4992 
4993 	vrf = sctp_find_vrf(vrf_id);
4994 	if (vrf == NULL) {
4995 		if (holds_lock == 0)
4996 			SCTP_IPI_ADDR_RUNLOCK();
4997 		return (NULL);
4998 	}
4999 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5000 
5001 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5002 	if (hash_head == NULL) {
5003 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5004 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5005 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5006 		sctp_print_address(addr);
5007 		SCTP_PRINTF("No such bucket for address\n");
5008 		if (holds_lock == 0)
5009 			SCTP_IPI_ADDR_RUNLOCK();
5010 
5011 		return (NULL);
5012 	}
5013 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5014 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5015 			continue;
5016 #ifdef INET
5017 		if (addr->sa_family == AF_INET) {
5018 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5019 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5020 				/* found him. */
5021 				if (holds_lock == 0)
5022 					SCTP_IPI_ADDR_RUNLOCK();
5023 				return (sctp_ifap);
5024 				break;
5025 			}
5026 		}
5027 #endif
5028 #ifdef INET6
5029 		if (addr->sa_family == AF_INET6) {
5030 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5031 			    &sctp_ifap->address.sin6)) {
5032 				/* found him. */
5033 				if (holds_lock == 0)
5034 					SCTP_IPI_ADDR_RUNLOCK();
5035 				return (sctp_ifap);
5036 				break;
5037 			}
5038 		}
5039 #endif
5040 	}
5041 	if (holds_lock == 0)
5042 		SCTP_IPI_ADDR_RUNLOCK();
5043 	return (NULL);
5044 }
5045 
5046 static void
5047 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5048     uint32_t rwnd_req)
5049 {
5050 	/* User pulled some data, do we need a rwnd update? */
5051 	int r_unlocked = 0;
5052 	uint32_t dif, rwnd;
5053 	struct socket *so = NULL;
5054 
5055 	if (stcb == NULL)
5056 		return;
5057 
5058 	atomic_add_int(&stcb->asoc.refcnt, 1);
5059 
5060 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5061 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5062 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5063 		/* Pre-check If we are freeing no update */
5064 		goto no_lock;
5065 	}
5066 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5067 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5068 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5069 		goto out;
5070 	}
5071 	so = stcb->sctp_socket;
5072 	if (so == NULL) {
5073 		goto out;
5074 	}
5075 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5076 	/* Have you have freed enough to look */
5077 	*freed_so_far = 0;
5078 	/* Yep, its worth a look and the lock overhead */
5079 
5080 	/* Figure out what the rwnd would be */
5081 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5082 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5083 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5084 	} else {
5085 		dif = 0;
5086 	}
5087 	if (dif >= rwnd_req) {
5088 		if (hold_rlock) {
5089 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5090 			r_unlocked = 1;
5091 		}
5092 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5093 			/*
5094 			 * One last check before we allow the guy possibly
5095 			 * to get in. There is a race, where the guy has not
5096 			 * reached the gate. In that case
5097 			 */
5098 			goto out;
5099 		}
5100 		SCTP_TCB_LOCK(stcb);
5101 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5102 			/* No reports here */
5103 			SCTP_TCB_UNLOCK(stcb);
5104 			goto out;
5105 		}
5106 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5107 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5108 
5109 		sctp_chunk_output(stcb->sctp_ep, stcb,
5110 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5111 		/* make sure no timer is running */
5112 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5113 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5114 		SCTP_TCB_UNLOCK(stcb);
5115 	} else {
5116 		/* Update how much we have pending */
5117 		stcb->freed_by_sorcv_sincelast = dif;
5118 	}
5119 out:
5120 	if (so && r_unlocked && hold_rlock) {
5121 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5122 	}
5123 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5124 no_lock:
5125 	atomic_add_int(&stcb->asoc.refcnt, -1);
5126 	return;
5127 }
5128 
5129 int
5130 sctp_sorecvmsg(struct socket *so,
5131     struct uio *uio,
5132     struct mbuf **mp,
5133     struct sockaddr *from,
5134     int fromlen,
5135     int *msg_flags,
5136     struct sctp_sndrcvinfo *sinfo,
5137     int filling_sinfo)
5138 {
5139 	/*
5140 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5141 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5142 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5143 	 * On the way out we may send out any combination of:
5144 	 * MSG_NOTIFICATION MSG_EOR
5145 	 *
5146 	 */
5147 	struct sctp_inpcb *inp = NULL;
5148 	int my_len = 0;
5149 	int cp_len = 0, error = 0;
5150 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5151 	struct mbuf *m = NULL;
5152 	struct sctp_tcb *stcb = NULL;
5153 	int wakeup_read_socket = 0;
5154 	int freecnt_applied = 0;
5155 	int out_flags = 0, in_flags = 0;
5156 	int block_allowed = 1;
5157 	uint32_t freed_so_far = 0;
5158 	uint32_t copied_so_far = 0;
5159 	int in_eeor_mode = 0;
5160 	int no_rcv_needed = 0;
5161 	uint32_t rwnd_req = 0;
5162 	int hold_sblock = 0;
5163 	int hold_rlock = 0;
5164 	ssize_t slen = 0;
5165 	uint32_t held_length = 0;
5166 	int sockbuf_lock = 0;
5167 
5168 	if (uio == NULL) {
5169 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5170 		return (EINVAL);
5171 	}
5172 	if (msg_flags) {
5173 		in_flags = *msg_flags;
5174 		if (in_flags & MSG_PEEK)
5175 			SCTP_STAT_INCR(sctps_read_peeks);
5176 	} else {
5177 		in_flags = 0;
5178 	}
5179 	slen = uio->uio_resid;
5180 
5181 	/* Pull in and set up our int flags */
5182 	if (in_flags & MSG_OOB) {
5183 		/* Out of band's NOT supported */
5184 		return (EOPNOTSUPP);
5185 	}
5186 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5187 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5188 		return (EINVAL);
5189 	}
5190 	if ((in_flags & (MSG_DONTWAIT
5191 	    | MSG_NBIO
5192 	    )) ||
5193 	    SCTP_SO_IS_NBIO(so)) {
5194 		block_allowed = 0;
5195 	}
5196 	/* setup the endpoint */
5197 	inp = (struct sctp_inpcb *)so->so_pcb;
5198 	if (inp == NULL) {
5199 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5200 		return (EFAULT);
5201 	}
5202 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5203 	/* Must be at least a MTU's worth */
5204 	if (rwnd_req < SCTP_MIN_RWND)
5205 		rwnd_req = SCTP_MIN_RWND;
5206 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5207 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5208 		sctp_misc_ints(SCTP_SORECV_ENTER,
5209 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5210 	}
5211 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5212 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5213 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5214 	}
5215 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5216 	if (error) {
5217 		goto release_unlocked;
5218 	}
5219 	sockbuf_lock = 1;
5220 restart:
5221 
5222 
5223 restart_nosblocks:
5224 	if (hold_sblock == 0) {
5225 		SOCKBUF_LOCK(&so->so_rcv);
5226 		hold_sblock = 1;
5227 	}
5228 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5229 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5230 		goto out;
5231 	}
5232 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5233 		if (so->so_error) {
5234 			error = so->so_error;
5235 			if ((in_flags & MSG_PEEK) == 0)
5236 				so->so_error = 0;
5237 			goto out;
5238 		} else {
5239 			if (so->so_rcv.sb_cc == 0) {
5240 				/* indicate EOF */
5241 				error = 0;
5242 				goto out;
5243 			}
5244 		}
5245 	}
5246 	if (so->so_rcv.sb_cc <= held_length) {
5247 		if (so->so_error) {
5248 			error = so->so_error;
5249 			if ((in_flags & MSG_PEEK) == 0) {
5250 				so->so_error = 0;
5251 			}
5252 			goto out;
5253 		}
5254 		if ((so->so_rcv.sb_cc == 0) &&
5255 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5256 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5257 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5258 				/*
5259 				 * For active open side clear flags for
5260 				 * re-use passive open is blocked by
5261 				 * connect.
5262 				 */
5263 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5264 					/*
5265 					 * You were aborted, passive side
5266 					 * always hits here
5267 					 */
5268 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5269 					error = ECONNRESET;
5270 				}
5271 				so->so_state &= ~(SS_ISCONNECTING |
5272 				    SS_ISDISCONNECTING |
5273 				    SS_ISCONFIRMING |
5274 				    SS_ISCONNECTED);
5275 				if (error == 0) {
5276 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5277 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5278 						error = ENOTCONN;
5279 					}
5280 				}
5281 				goto out;
5282 			}
5283 		}
5284 		if (block_allowed) {
5285 			error = sbwait(&so->so_rcv);
5286 			if (error) {
5287 				goto out;
5288 			}
5289 			held_length = 0;
5290 			goto restart_nosblocks;
5291 		} else {
5292 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5293 			error = EWOULDBLOCK;
5294 			goto out;
5295 		}
5296 	}
5297 	if (hold_sblock == 1) {
5298 		SOCKBUF_UNLOCK(&so->so_rcv);
5299 		hold_sblock = 0;
5300 	}
5301 	/* we possibly have data we can read */
5302 	/* sa_ignore FREED_MEMORY */
5303 	control = TAILQ_FIRST(&inp->read_queue);
5304 	if (control == NULL) {
5305 		/*
5306 		 * This could be happening since the appender did the
5307 		 * increment but as not yet did the tailq insert onto the
5308 		 * read_queue
5309 		 */
5310 		if (hold_rlock == 0) {
5311 			SCTP_INP_READ_LOCK(inp);
5312 		}
5313 		control = TAILQ_FIRST(&inp->read_queue);
5314 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5315 #ifdef INVARIANTS
5316 			panic("Huh, its non zero and nothing on control?");
5317 #endif
5318 			so->so_rcv.sb_cc = 0;
5319 		}
5320 		SCTP_INP_READ_UNLOCK(inp);
5321 		hold_rlock = 0;
5322 		goto restart;
5323 	}
5324 	if ((control->length == 0) &&
5325 	    (control->do_not_ref_stcb)) {
5326 		/*
5327 		 * Clean up code for freeing assoc that left behind a
5328 		 * pdapi.. maybe a peer in EEOR that just closed after
5329 		 * sending and never indicated a EOR.
5330 		 */
5331 		if (hold_rlock == 0) {
5332 			hold_rlock = 1;
5333 			SCTP_INP_READ_LOCK(inp);
5334 		}
5335 		control->held_length = 0;
5336 		if (control->data) {
5337 			/* Hmm there is data here .. fix */
5338 			struct mbuf *m_tmp;
5339 			int cnt = 0;
5340 
5341 			m_tmp = control->data;
5342 			while (m_tmp) {
5343 				cnt += SCTP_BUF_LEN(m_tmp);
5344 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5345 					control->tail_mbuf = m_tmp;
5346 					control->end_added = 1;
5347 				}
5348 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5349 			}
5350 			control->length = cnt;
5351 		} else {
5352 			/* remove it */
5353 			TAILQ_REMOVE(&inp->read_queue, control, next);
5354 			/* Add back any hiddend data */
5355 			sctp_free_remote_addr(control->whoFrom);
5356 			sctp_free_a_readq(stcb, control);
5357 		}
5358 		if (hold_rlock) {
5359 			hold_rlock = 0;
5360 			SCTP_INP_READ_UNLOCK(inp);
5361 		}
5362 		goto restart;
5363 	}
5364 	if ((control->length == 0) &&
5365 	    (control->end_added == 1)) {
5366 		/*
5367 		 * Do we also need to check for (control->pdapi_aborted ==
5368 		 * 1)?
5369 		 */
5370 		if (hold_rlock == 0) {
5371 			hold_rlock = 1;
5372 			SCTP_INP_READ_LOCK(inp);
5373 		}
5374 		TAILQ_REMOVE(&inp->read_queue, control, next);
5375 		if (control->data) {
5376 #ifdef INVARIANTS
5377 			panic("control->data not null but control->length == 0");
5378 #else
5379 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5380 			sctp_m_freem(control->data);
5381 			control->data = NULL;
5382 #endif
5383 		}
5384 		if (control->aux_data) {
5385 			sctp_m_free(control->aux_data);
5386 			control->aux_data = NULL;
5387 		}
5388 #ifdef INVARIANTS
5389 		if (control->on_strm_q) {
5390 			panic("About to free ctl:%p so:%p and its in %d",
5391 			    control, so, control->on_strm_q);
5392 		}
5393 #endif
5394 		sctp_free_remote_addr(control->whoFrom);
5395 		sctp_free_a_readq(stcb, control);
5396 		if (hold_rlock) {
5397 			hold_rlock = 0;
5398 			SCTP_INP_READ_UNLOCK(inp);
5399 		}
5400 		goto restart;
5401 	}
5402 	if (control->length == 0) {
5403 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5404 		    (filling_sinfo)) {
5405 			/* find a more suitable one then this */
5406 			ctl = TAILQ_NEXT(control, next);
5407 			while (ctl) {
5408 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5409 				    (ctl->some_taken ||
5410 				    (ctl->spec_flags & M_NOTIFICATION) ||
5411 				    ((ctl->do_not_ref_stcb == 0) &&
5412 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5413 				    ) {
5414 					/*-
5415 					 * If we have a different TCB next, and there is data
5416 					 * present. If we have already taken some (pdapi), OR we can
5417 					 * ref the tcb and no delivery as started on this stream, we
5418 					 * take it. Note we allow a notification on a different
5419 					 * assoc to be delivered..
5420 					 */
5421 					control = ctl;
5422 					goto found_one;
5423 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5424 					    (ctl->length) &&
5425 					    ((ctl->some_taken) ||
5426 					    ((ctl->do_not_ref_stcb == 0) &&
5427 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5428 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5429 					/*-
5430 					 * If we have the same tcb, and there is data present, and we
5431 					 * have the strm interleave feature present. Then if we have
5432 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5433 					 * not started a delivery for this stream, we can take it.
5434 					 * Note we do NOT allow a notificaiton on the same assoc to
5435 					 * be delivered.
5436 					 */
5437 					control = ctl;
5438 					goto found_one;
5439 				}
5440 				ctl = TAILQ_NEXT(ctl, next);
5441 			}
5442 		}
5443 		/*
5444 		 * if we reach here, not suitable replacement is available
5445 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5446 		 * into the our held count, and its time to sleep again.
5447 		 */
5448 		held_length = so->so_rcv.sb_cc;
5449 		control->held_length = so->so_rcv.sb_cc;
5450 		goto restart;
5451 	}
5452 	/* Clear the held length since there is something to read */
5453 	control->held_length = 0;
5454 found_one:
5455 	/*
5456 	 * If we reach here, control has a some data for us to read off.
5457 	 * Note that stcb COULD be NULL.
5458 	 */
5459 	if (hold_rlock == 0) {
5460 		hold_rlock = 1;
5461 		SCTP_INP_READ_LOCK(inp);
5462 	}
5463 	control->some_taken++;
5464 	stcb = control->stcb;
5465 	if (stcb) {
5466 		if ((control->do_not_ref_stcb == 0) &&
5467 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5468 			if (freecnt_applied == 0)
5469 				stcb = NULL;
5470 		} else if (control->do_not_ref_stcb == 0) {
5471 			/* you can't free it on me please */
5472 			/*
5473 			 * The lock on the socket buffer protects us so the
5474 			 * free code will stop. But since we used the
5475 			 * socketbuf lock and the sender uses the tcb_lock
5476 			 * to increment, we need to use the atomic add to
5477 			 * the refcnt
5478 			 */
5479 			if (freecnt_applied) {
5480 #ifdef INVARIANTS
5481 				panic("refcnt already incremented");
5482 #else
5483 				SCTP_PRINTF("refcnt already incremented?\n");
5484 #endif
5485 			} else {
5486 				atomic_add_int(&stcb->asoc.refcnt, 1);
5487 				freecnt_applied = 1;
5488 			}
5489 			/*
5490 			 * Setup to remember how much we have not yet told
5491 			 * the peer our rwnd has opened up. Note we grab the
5492 			 * value from the tcb from last time. Note too that
5493 			 * sack sending clears this when a sack is sent,
5494 			 * which is fine. Once we hit the rwnd_req, we then
5495 			 * will go to the sctp_user_rcvd() that will not
5496 			 * lock until it KNOWs it MUST send a WUP-SACK.
5497 			 */
5498 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5499 			stcb->freed_by_sorcv_sincelast = 0;
5500 		}
5501 	}
5502 	if (stcb &&
5503 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5504 	    control->do_not_ref_stcb == 0) {
5505 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5506 	}
5507 	/* First lets get off the sinfo and sockaddr info */
5508 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5509 		sinfo->sinfo_stream = control->sinfo_stream;
5510 		sinfo->sinfo_ssn = (uint16_t)control->mid;
5511 		sinfo->sinfo_flags = control->sinfo_flags;
5512 		sinfo->sinfo_ppid = control->sinfo_ppid;
5513 		sinfo->sinfo_context = control->sinfo_context;
5514 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5515 		sinfo->sinfo_tsn = control->sinfo_tsn;
5516 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5517 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5518 		nxt = TAILQ_NEXT(control, next);
5519 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5520 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5521 			struct sctp_extrcvinfo *s_extra;
5522 
5523 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5524 			if ((nxt) &&
5525 			    (nxt->length)) {
5526 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5527 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5528 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5529 				}
5530 				if (nxt->spec_flags & M_NOTIFICATION) {
5531 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5532 				}
5533 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5534 				s_extra->serinfo_next_length = nxt->length;
5535 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5536 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5537 				if (nxt->tail_mbuf != NULL) {
5538 					if (nxt->end_added) {
5539 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5540 					}
5541 				}
5542 			} else {
5543 				/*
5544 				 * we explicitly 0 this, since the memcpy
5545 				 * got some other things beyond the older
5546 				 * sinfo_ that is on the control's structure
5547 				 * :-D
5548 				 */
5549 				nxt = NULL;
5550 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5551 				s_extra->serinfo_next_aid = 0;
5552 				s_extra->serinfo_next_length = 0;
5553 				s_extra->serinfo_next_ppid = 0;
5554 				s_extra->serinfo_next_stream = 0;
5555 			}
5556 		}
5557 		/*
5558 		 * update off the real current cum-ack, if we have an stcb.
5559 		 */
5560 		if ((control->do_not_ref_stcb == 0) && stcb)
5561 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5562 		/*
5563 		 * mask off the high bits, we keep the actual chunk bits in
5564 		 * there.
5565 		 */
5566 		sinfo->sinfo_flags &= 0x00ff;
5567 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5568 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5569 		}
5570 	}
5571 #ifdef SCTP_ASOCLOG_OF_TSNS
5572 	{
5573 		int index, newindex;
5574 		struct sctp_pcbtsn_rlog *entry;
5575 
5576 		do {
5577 			index = inp->readlog_index;
5578 			newindex = index + 1;
5579 			if (newindex >= SCTP_READ_LOG_SIZE) {
5580 				newindex = 0;
5581 			}
5582 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5583 		entry = &inp->readlog[index];
5584 		entry->vtag = control->sinfo_assoc_id;
5585 		entry->strm = control->sinfo_stream;
5586 		entry->seq = (uint16_t)control->mid;
5587 		entry->sz = control->length;
5588 		entry->flgs = control->sinfo_flags;
5589 	}
5590 #endif
5591 	if ((fromlen > 0) && (from != NULL)) {
5592 		union sctp_sockstore store;
5593 		size_t len;
5594 
5595 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5596 #ifdef INET6
5597 		case AF_INET6:
5598 			len = sizeof(struct sockaddr_in6);
5599 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5600 			store.sin6.sin6_port = control->port_from;
5601 			break;
5602 #endif
5603 #ifdef INET
5604 		case AF_INET:
5605 #ifdef INET6
5606 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5607 				len = sizeof(struct sockaddr_in6);
5608 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5609 				    &store.sin6);
5610 				store.sin6.sin6_port = control->port_from;
5611 			} else {
5612 				len = sizeof(struct sockaddr_in);
5613 				store.sin = control->whoFrom->ro._l_addr.sin;
5614 				store.sin.sin_port = control->port_from;
5615 			}
5616 #else
5617 			len = sizeof(struct sockaddr_in);
5618 			store.sin = control->whoFrom->ro._l_addr.sin;
5619 			store.sin.sin_port = control->port_from;
5620 #endif
5621 			break;
5622 #endif
5623 		default:
5624 			len = 0;
5625 			break;
5626 		}
5627 		memcpy(from, &store, min((size_t)fromlen, len));
5628 #ifdef INET6
5629 		{
5630 			struct sockaddr_in6 lsa6, *from6;
5631 
5632 			from6 = (struct sockaddr_in6 *)from;
5633 			sctp_recover_scope_mac(from6, (&lsa6));
5634 		}
5635 #endif
5636 	}
5637 	if (hold_rlock) {
5638 		SCTP_INP_READ_UNLOCK(inp);
5639 		hold_rlock = 0;
5640 	}
5641 	if (hold_sblock) {
5642 		SOCKBUF_UNLOCK(&so->so_rcv);
5643 		hold_sblock = 0;
5644 	}
5645 	/* now copy out what data we can */
5646 	if (mp == NULL) {
5647 		/* copy out each mbuf in the chain up to length */
5648 get_more_data:
5649 		m = control->data;
5650 		while (m) {
5651 			/* Move out all we can */
5652 			cp_len = (int)uio->uio_resid;
5653 			my_len = (int)SCTP_BUF_LEN(m);
5654 			if (cp_len > my_len) {
5655 				/* not enough in this buf */
5656 				cp_len = my_len;
5657 			}
5658 			if (hold_rlock) {
5659 				SCTP_INP_READ_UNLOCK(inp);
5660 				hold_rlock = 0;
5661 			}
5662 			if (cp_len > 0)
5663 				error = uiomove(mtod(m, char *), cp_len, uio);
5664 			/* re-read */
5665 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5666 				goto release;
5667 			}
5668 			if ((control->do_not_ref_stcb == 0) && stcb &&
5669 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5670 				no_rcv_needed = 1;
5671 			}
5672 			if (error) {
5673 				/* error we are out of here */
5674 				goto release;
5675 			}
5676 			SCTP_INP_READ_LOCK(inp);
5677 			hold_rlock = 1;
5678 			if (cp_len == SCTP_BUF_LEN(m)) {
5679 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5680 				    (control->end_added)) {
5681 					out_flags |= MSG_EOR;
5682 					if ((control->do_not_ref_stcb == 0) &&
5683 					    (control->stcb != NULL) &&
5684 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5685 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5686 				}
5687 				if (control->spec_flags & M_NOTIFICATION) {
5688 					out_flags |= MSG_NOTIFICATION;
5689 				}
5690 				/* we ate up the mbuf */
5691 				if (in_flags & MSG_PEEK) {
5692 					/* just looking */
5693 					m = SCTP_BUF_NEXT(m);
5694 					copied_so_far += cp_len;
5695 				} else {
5696 					/* dispose of the mbuf */
5697 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5698 						sctp_sblog(&so->so_rcv,
5699 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5700 					}
5701 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5702 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5703 						sctp_sblog(&so->so_rcv,
5704 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5705 					}
5706 					copied_so_far += cp_len;
5707 					freed_so_far += cp_len;
5708 					freed_so_far += MSIZE;
5709 					atomic_subtract_int(&control->length, cp_len);
5710 					control->data = sctp_m_free(m);
5711 					m = control->data;
5712 					/*
5713 					 * been through it all, must hold sb
5714 					 * lock ok to null tail
5715 					 */
5716 					if (control->data == NULL) {
5717 #ifdef INVARIANTS
5718 						if ((control->end_added == 0) ||
5719 						    (TAILQ_NEXT(control, next) == NULL)) {
5720 							/*
5721 							 * If the end is not
5722 							 * added, OR the
5723 							 * next is NOT null
5724 							 * we MUST have the
5725 							 * lock.
5726 							 */
5727 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5728 								panic("Hmm we don't own the lock?");
5729 							}
5730 						}
5731 #endif
5732 						control->tail_mbuf = NULL;
5733 #ifdef INVARIANTS
5734 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5735 							panic("end_added, nothing left and no MSG_EOR");
5736 						}
5737 #endif
5738 					}
5739 				}
5740 			} else {
5741 				/* Do we need to trim the mbuf? */
5742 				if (control->spec_flags & M_NOTIFICATION) {
5743 					out_flags |= MSG_NOTIFICATION;
5744 				}
5745 				if ((in_flags & MSG_PEEK) == 0) {
5746 					SCTP_BUF_RESV_UF(m, cp_len);
5747 					SCTP_BUF_LEN(m) -= cp_len;
5748 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5749 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5750 					}
5751 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5752 					if ((control->do_not_ref_stcb == 0) &&
5753 					    stcb) {
5754 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5755 					}
5756 					copied_so_far += cp_len;
5757 					freed_so_far += cp_len;
5758 					freed_so_far += MSIZE;
5759 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5760 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5761 						    SCTP_LOG_SBRESULT, 0);
5762 					}
5763 					atomic_subtract_int(&control->length, cp_len);
5764 				} else {
5765 					copied_so_far += cp_len;
5766 				}
5767 			}
5768 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5769 				break;
5770 			}
5771 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5772 			    (control->do_not_ref_stcb == 0) &&
5773 			    (freed_so_far >= rwnd_req)) {
5774 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5775 			}
5776 		}		/* end while(m) */
5777 		/*
5778 		 * At this point we have looked at it all and we either have
5779 		 * a MSG_EOR/or read all the user wants... <OR>
5780 		 * control->length == 0.
5781 		 */
5782 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5783 			/* we are done with this control */
5784 			if (control->length == 0) {
5785 				if (control->data) {
5786 #ifdef INVARIANTS
5787 					panic("control->data not null at read eor?");
5788 #else
5789 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5790 					sctp_m_freem(control->data);
5791 					control->data = NULL;
5792 #endif
5793 				}
5794 		done_with_control:
5795 				if (hold_rlock == 0) {
5796 					SCTP_INP_READ_LOCK(inp);
5797 					hold_rlock = 1;
5798 				}
5799 				TAILQ_REMOVE(&inp->read_queue, control, next);
5800 				/* Add back any hiddend data */
5801 				if (control->held_length) {
5802 					held_length = 0;
5803 					control->held_length = 0;
5804 					wakeup_read_socket = 1;
5805 				}
5806 				if (control->aux_data) {
5807 					sctp_m_free(control->aux_data);
5808 					control->aux_data = NULL;
5809 				}
5810 				no_rcv_needed = control->do_not_ref_stcb;
5811 				sctp_free_remote_addr(control->whoFrom);
5812 				control->data = NULL;
5813 #ifdef INVARIANTS
5814 				if (control->on_strm_q) {
5815 					panic("About to free ctl:%p so:%p and its in %d",
5816 					    control, so, control->on_strm_q);
5817 				}
5818 #endif
5819 				sctp_free_a_readq(stcb, control);
5820 				control = NULL;
5821 				if ((freed_so_far >= rwnd_req) &&
5822 				    (no_rcv_needed == 0))
5823 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5824 
5825 			} else {
5826 				/*
5827 				 * The user did not read all of this
5828 				 * message, turn off the returned MSG_EOR
5829 				 * since we are leaving more behind on the
5830 				 * control to read.
5831 				 */
5832 #ifdef INVARIANTS
5833 				if (control->end_added &&
5834 				    (control->data == NULL) &&
5835 				    (control->tail_mbuf == NULL)) {
5836 					panic("Gak, control->length is corrupt?");
5837 				}
5838 #endif
5839 				no_rcv_needed = control->do_not_ref_stcb;
5840 				out_flags &= ~MSG_EOR;
5841 			}
5842 		}
5843 		if (out_flags & MSG_EOR) {
5844 			goto release;
5845 		}
5846 		if ((uio->uio_resid == 0) ||
5847 		    ((in_eeor_mode) &&
5848 		    (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
5849 			goto release;
5850 		}
5851 		/*
5852 		 * If I hit here the receiver wants more and this message is
5853 		 * NOT done (pd-api). So two questions. Can we block? if not
5854 		 * we are done. Did the user NOT set MSG_WAITALL?
5855 		 */
5856 		if (block_allowed == 0) {
5857 			goto release;
5858 		}
5859 		/*
5860 		 * We need to wait for more data a few things: - We don't
5861 		 * sbunlock() so we don't get someone else reading. - We
5862 		 * must be sure to account for the case where what is added
5863 		 * is NOT to our control when we wakeup.
5864 		 */
5865 
5866 		/*
5867 		 * Do we need to tell the transport a rwnd update might be
5868 		 * needed before we go to sleep?
5869 		 */
5870 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5871 		    ((freed_so_far >= rwnd_req) &&
5872 		    (control->do_not_ref_stcb == 0) &&
5873 		    (no_rcv_needed == 0))) {
5874 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5875 		}
5876 wait_some_more:
5877 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5878 			goto release;
5879 		}
5880 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5881 			goto release;
5882 
5883 		if (hold_rlock == 1) {
5884 			SCTP_INP_READ_UNLOCK(inp);
5885 			hold_rlock = 0;
5886 		}
5887 		if (hold_sblock == 0) {
5888 			SOCKBUF_LOCK(&so->so_rcv);
5889 			hold_sblock = 1;
5890 		}
5891 		if ((copied_so_far) && (control->length == 0) &&
5892 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5893 			goto release;
5894 		}
5895 		if (so->so_rcv.sb_cc <= control->held_length) {
5896 			error = sbwait(&so->so_rcv);
5897 			if (error) {
5898 				goto release;
5899 			}
5900 			control->held_length = 0;
5901 		}
5902 		if (hold_sblock) {
5903 			SOCKBUF_UNLOCK(&so->so_rcv);
5904 			hold_sblock = 0;
5905 		}
5906 		if (control->length == 0) {
5907 			/* still nothing here */
5908 			if (control->end_added == 1) {
5909 				/* he aborted, or is done i.e.did a shutdown */
5910 				out_flags |= MSG_EOR;
5911 				if (control->pdapi_aborted) {
5912 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5913 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5914 
5915 					out_flags |= MSG_TRUNC;
5916 				} else {
5917 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5918 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5919 				}
5920 				goto done_with_control;
5921 			}
5922 			if (so->so_rcv.sb_cc > held_length) {
5923 				control->held_length = so->so_rcv.sb_cc;
5924 				held_length = 0;
5925 			}
5926 			goto wait_some_more;
5927 		} else if (control->data == NULL) {
5928 			/*
5929 			 * we must re-sync since data is probably being
5930 			 * added
5931 			 */
5932 			SCTP_INP_READ_LOCK(inp);
5933 			if ((control->length > 0) && (control->data == NULL)) {
5934 				/*
5935 				 * big trouble.. we have the lock and its
5936 				 * corrupt?
5937 				 */
5938 #ifdef INVARIANTS
5939 				panic("Impossible data==NULL length !=0");
5940 #endif
5941 				out_flags |= MSG_EOR;
5942 				out_flags |= MSG_TRUNC;
5943 				control->length = 0;
5944 				SCTP_INP_READ_UNLOCK(inp);
5945 				goto done_with_control;
5946 			}
5947 			SCTP_INP_READ_UNLOCK(inp);
5948 			/* We will fall around to get more data */
5949 		}
5950 		goto get_more_data;
5951 	} else {
5952 		/*-
5953 		 * Give caller back the mbuf chain,
5954 		 * store in uio_resid the length
5955 		 */
5956 		wakeup_read_socket = 0;
5957 		if ((control->end_added == 0) ||
5958 		    (TAILQ_NEXT(control, next) == NULL)) {
5959 			/* Need to get rlock */
5960 			if (hold_rlock == 0) {
5961 				SCTP_INP_READ_LOCK(inp);
5962 				hold_rlock = 1;
5963 			}
5964 		}
5965 		if (control->end_added) {
5966 			out_flags |= MSG_EOR;
5967 			if ((control->do_not_ref_stcb == 0) &&
5968 			    (control->stcb != NULL) &&
5969 			    ((control->spec_flags & M_NOTIFICATION) == 0))
5970 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5971 		}
5972 		if (control->spec_flags & M_NOTIFICATION) {
5973 			out_flags |= MSG_NOTIFICATION;
5974 		}
5975 		uio->uio_resid = control->length;
5976 		*mp = control->data;
5977 		m = control->data;
5978 		while (m) {
5979 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5980 				sctp_sblog(&so->so_rcv,
5981 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5982 			}
5983 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5984 			freed_so_far += SCTP_BUF_LEN(m);
5985 			freed_so_far += MSIZE;
5986 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5987 				sctp_sblog(&so->so_rcv,
5988 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5989 			}
5990 			m = SCTP_BUF_NEXT(m);
5991 		}
5992 		control->data = control->tail_mbuf = NULL;
5993 		control->length = 0;
5994 		if (out_flags & MSG_EOR) {
5995 			/* Done with this control */
5996 			goto done_with_control;
5997 		}
5998 	}
5999 release:
6000 	if (hold_rlock == 1) {
6001 		SCTP_INP_READ_UNLOCK(inp);
6002 		hold_rlock = 0;
6003 	}
6004 	if (hold_sblock == 1) {
6005 		SOCKBUF_UNLOCK(&so->so_rcv);
6006 		hold_sblock = 0;
6007 	}
6008 	sbunlock(&so->so_rcv);
6009 	sockbuf_lock = 0;
6010 
6011 release_unlocked:
6012 	if (hold_sblock) {
6013 		SOCKBUF_UNLOCK(&so->so_rcv);
6014 		hold_sblock = 0;
6015 	}
6016 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6017 		if ((freed_so_far >= rwnd_req) &&
6018 		    (control && (control->do_not_ref_stcb == 0)) &&
6019 		    (no_rcv_needed == 0))
6020 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6021 	}
6022 out:
6023 	if (msg_flags) {
6024 		*msg_flags = out_flags;
6025 	}
6026 	if (((out_flags & MSG_EOR) == 0) &&
6027 	    ((in_flags & MSG_PEEK) == 0) &&
6028 	    (sinfo) &&
6029 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6030 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6031 		struct sctp_extrcvinfo *s_extra;
6032 
6033 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6034 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6035 	}
6036 	if (hold_rlock == 1) {
6037 		SCTP_INP_READ_UNLOCK(inp);
6038 	}
6039 	if (hold_sblock) {
6040 		SOCKBUF_UNLOCK(&so->so_rcv);
6041 	}
6042 	if (sockbuf_lock) {
6043 		sbunlock(&so->so_rcv);
6044 	}
6045 	if (freecnt_applied) {
6046 		/*
6047 		 * The lock on the socket buffer protects us so the free
6048 		 * code will stop. But since we used the socketbuf lock and
6049 		 * the sender uses the tcb_lock to increment, we need to use
6050 		 * the atomic add to the refcnt.
6051 		 */
6052 		if (stcb == NULL) {
6053 #ifdef INVARIANTS
6054 			panic("stcb for refcnt has gone NULL?");
6055 			goto stage_left;
6056 #else
6057 			goto stage_left;
6058 #endif
6059 		}
6060 		/* Save the value back for next time */
6061 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6062 		atomic_add_int(&stcb->asoc.refcnt, -1);
6063 	}
6064 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6065 		if (stcb) {
6066 			sctp_misc_ints(SCTP_SORECV_DONE,
6067 			    freed_so_far,
6068 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6069 			    stcb->asoc.my_rwnd,
6070 			    so->so_rcv.sb_cc);
6071 		} else {
6072 			sctp_misc_ints(SCTP_SORECV_DONE,
6073 			    freed_so_far,
6074 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6075 			    0,
6076 			    so->so_rcv.sb_cc);
6077 		}
6078 	}
6079 stage_left:
6080 	if (wakeup_read_socket) {
6081 		sctp_sorwakeup(inp, so);
6082 	}
6083 	return (error);
6084 }
6085 
6086 
6087 #ifdef SCTP_MBUF_LOGGING
6088 struct mbuf *
6089 sctp_m_free(struct mbuf *m)
6090 {
6091 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6092 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6093 	}
6094 	return (m_free(m));
6095 }
6096 
6097 void
6098 sctp_m_freem(struct mbuf *mb)
6099 {
6100 	while (mb != NULL)
6101 		mb = sctp_m_free(mb);
6102 }
6103 
6104 #endif
6105 
6106 int
6107 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6108 {
6109 	/*
6110 	 * Given a local address. For all associations that holds the
6111 	 * address, request a peer-set-primary.
6112 	 */
6113 	struct sctp_ifa *ifa;
6114 	struct sctp_laddr *wi;
6115 
6116 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6117 	if (ifa == NULL) {
6118 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6119 		return (EADDRNOTAVAIL);
6120 	}
6121 	/*
6122 	 * Now that we have the ifa we must awaken the iterator with this
6123 	 * message.
6124 	 */
6125 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6126 	if (wi == NULL) {
6127 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6128 		return (ENOMEM);
6129 	}
6130 	/* Now incr the count and int wi structure */
6131 	SCTP_INCR_LADDR_COUNT();
6132 	memset(wi, 0, sizeof(*wi));
6133 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6134 	wi->ifa = ifa;
6135 	wi->action = SCTP_SET_PRIM_ADDR;
6136 	atomic_add_int(&ifa->refcount, 1);
6137 
6138 	/* Now add it to the work queue */
6139 	SCTP_WQ_ADDR_LOCK();
6140 	/*
6141 	 * Should this really be a tailq? As it is we will process the
6142 	 * newest first :-0
6143 	 */
6144 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6145 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6146 	    (struct sctp_inpcb *)NULL,
6147 	    (struct sctp_tcb *)NULL,
6148 	    (struct sctp_nets *)NULL);
6149 	SCTP_WQ_ADDR_UNLOCK();
6150 	return (0);
6151 }
6152 
6153 
6154 int
6155 sctp_soreceive(struct socket *so,
6156     struct sockaddr **psa,
6157     struct uio *uio,
6158     struct mbuf **mp0,
6159     struct mbuf **controlp,
6160     int *flagsp)
6161 {
6162 	int error, fromlen;
6163 	uint8_t sockbuf[256];
6164 	struct sockaddr *from;
6165 	struct sctp_extrcvinfo sinfo;
6166 	int filling_sinfo = 1;
6167 	int flags;
6168 	struct sctp_inpcb *inp;
6169 
6170 	inp = (struct sctp_inpcb *)so->so_pcb;
6171 	/* pickup the assoc we are reading from */
6172 	if (inp == NULL) {
6173 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6174 		return (EINVAL);
6175 	}
6176 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6177 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6178 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6179 	    (controlp == NULL)) {
6180 		/* user does not want the sndrcv ctl */
6181 		filling_sinfo = 0;
6182 	}
6183 	if (psa) {
6184 		from = (struct sockaddr *)sockbuf;
6185 		fromlen = sizeof(sockbuf);
6186 		from->sa_len = 0;
6187 	} else {
6188 		from = NULL;
6189 		fromlen = 0;
6190 	}
6191 
6192 	if (filling_sinfo) {
6193 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6194 	}
6195 	if (flagsp != NULL) {
6196 		flags = *flagsp;
6197 	} else {
6198 		flags = 0;
6199 	}
6200 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags,
6201 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6202 	if (flagsp != NULL) {
6203 		*flagsp = flags;
6204 	}
6205 	if (controlp != NULL) {
6206 		/* copy back the sinfo in a CMSG format */
6207 		if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) {
6208 			*controlp = sctp_build_ctl_nchunk(inp,
6209 			    (struct sctp_sndrcvinfo *)&sinfo);
6210 		} else {
6211 			*controlp = NULL;
6212 		}
6213 	}
6214 	if (psa) {
6215 		/* copy back the address info */
6216 		if (from && from->sa_len) {
6217 			*psa = sodupsockaddr(from, M_NOWAIT);
6218 		} else {
6219 			*psa = NULL;
6220 		}
6221 	}
6222 	return (error);
6223 }
6224 
6225 
6226 
6227 
6228 
6229 int
6230 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6231     int totaddr, int *error)
6232 {
6233 	int added = 0;
6234 	int i;
6235 	struct sctp_inpcb *inp;
6236 	struct sockaddr *sa;
6237 	size_t incr = 0;
6238 #ifdef INET
6239 	struct sockaddr_in *sin;
6240 #endif
6241 #ifdef INET6
6242 	struct sockaddr_in6 *sin6;
6243 #endif
6244 
6245 	sa = addr;
6246 	inp = stcb->sctp_ep;
6247 	*error = 0;
6248 	for (i = 0; i < totaddr; i++) {
6249 		switch (sa->sa_family) {
6250 #ifdef INET
6251 		case AF_INET:
6252 			incr = sizeof(struct sockaddr_in);
6253 			sin = (struct sockaddr_in *)sa;
6254 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6255 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6256 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6257 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6258 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6259 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6260 				*error = EINVAL;
6261 				goto out_now;
6262 			}
6263 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6264 			    SCTP_DONOT_SETSCOPE,
6265 			    SCTP_ADDR_IS_CONFIRMED)) {
6266 				/* assoc gone no un-lock */
6267 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6268 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6269 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6270 				*error = ENOBUFS;
6271 				goto out_now;
6272 			}
6273 			added++;
6274 			break;
6275 #endif
6276 #ifdef INET6
6277 		case AF_INET6:
6278 			incr = sizeof(struct sockaddr_in6);
6279 			sin6 = (struct sockaddr_in6 *)sa;
6280 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6281 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6282 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6283 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6284 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6285 				*error = EINVAL;
6286 				goto out_now;
6287 			}
6288 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6289 			    SCTP_DONOT_SETSCOPE,
6290 			    SCTP_ADDR_IS_CONFIRMED)) {
6291 				/* assoc gone no un-lock */
6292 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6293 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6294 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6295 				*error = ENOBUFS;
6296 				goto out_now;
6297 			}
6298 			added++;
6299 			break;
6300 #endif
6301 		default:
6302 			break;
6303 		}
6304 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6305 	}
6306 out_now:
6307 	return (added);
6308 }
6309 
6310 struct sctp_tcb *
6311 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6312     unsigned int *totaddr,
6313     unsigned int *num_v4, unsigned int *num_v6, int *error,
6314     unsigned int limit, int *bad_addr)
6315 {
6316 	struct sockaddr *sa;
6317 	struct sctp_tcb *stcb = NULL;
6318 	unsigned int incr, at, i;
6319 
6320 	at = 0;
6321 	sa = addr;
6322 	*error = *num_v6 = *num_v4 = 0;
6323 	/* account and validate addresses */
6324 	for (i = 0; i < *totaddr; i++) {
6325 		switch (sa->sa_family) {
6326 #ifdef INET
6327 		case AF_INET:
6328 			incr = (unsigned int)sizeof(struct sockaddr_in);
6329 			if (sa->sa_len != incr) {
6330 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6331 				*error = EINVAL;
6332 				*bad_addr = 1;
6333 				return (NULL);
6334 			}
6335 			(*num_v4) += 1;
6336 			break;
6337 #endif
6338 #ifdef INET6
6339 		case AF_INET6:
6340 			{
6341 				struct sockaddr_in6 *sin6;
6342 
6343 				sin6 = (struct sockaddr_in6 *)sa;
6344 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6345 					/* Must be non-mapped for connectx */
6346 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6347 					*error = EINVAL;
6348 					*bad_addr = 1;
6349 					return (NULL);
6350 				}
6351 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6352 				if (sa->sa_len != incr) {
6353 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6354 					*error = EINVAL;
6355 					*bad_addr = 1;
6356 					return (NULL);
6357 				}
6358 				(*num_v6) += 1;
6359 				break;
6360 			}
6361 #endif
6362 		default:
6363 			*totaddr = i;
6364 			incr = 0;
6365 			/* we are done */
6366 			break;
6367 		}
6368 		if (i == *totaddr) {
6369 			break;
6370 		}
6371 		SCTP_INP_INCR_REF(inp);
6372 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6373 		if (stcb != NULL) {
6374 			/* Already have or am bring up an association */
6375 			return (stcb);
6376 		} else {
6377 			SCTP_INP_DECR_REF(inp);
6378 		}
6379 		if ((at + incr) > limit) {
6380 			*totaddr = i;
6381 			break;
6382 		}
6383 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6384 	}
6385 	return ((struct sctp_tcb *)NULL);
6386 }
6387 
6388 /*
6389  * sctp_bindx(ADD) for one address.
6390  * assumes all arguments are valid/checked by caller.
6391  */
6392 void
6393 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6394     struct sockaddr *sa, sctp_assoc_t assoc_id,
6395     uint32_t vrf_id, int *error, void *p)
6396 {
6397 	struct sockaddr *addr_touse;
6398 #if defined(INET) && defined(INET6)
6399 	struct sockaddr_in sin;
6400 #endif
6401 
6402 	/* see if we're bound all already! */
6403 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6404 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6405 		*error = EINVAL;
6406 		return;
6407 	}
6408 	addr_touse = sa;
6409 #ifdef INET6
6410 	if (sa->sa_family == AF_INET6) {
6411 #ifdef INET
6412 		struct sockaddr_in6 *sin6;
6413 
6414 #endif
6415 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6416 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6417 			*error = EINVAL;
6418 			return;
6419 		}
6420 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6421 			/* can only bind v6 on PF_INET6 sockets */
6422 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6423 			*error = EINVAL;
6424 			return;
6425 		}
6426 #ifdef INET
6427 		sin6 = (struct sockaddr_in6 *)addr_touse;
6428 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6429 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6430 			    SCTP_IPV6_V6ONLY(inp)) {
6431 				/* can't bind v4-mapped on PF_INET sockets */
6432 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6433 				*error = EINVAL;
6434 				return;
6435 			}
6436 			in6_sin6_2_sin(&sin, sin6);
6437 			addr_touse = (struct sockaddr *)&sin;
6438 		}
6439 #endif
6440 	}
6441 #endif
6442 #ifdef INET
6443 	if (sa->sa_family == AF_INET) {
6444 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6445 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6446 			*error = EINVAL;
6447 			return;
6448 		}
6449 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6450 		    SCTP_IPV6_V6ONLY(inp)) {
6451 			/* can't bind v4 on PF_INET sockets */
6452 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6453 			*error = EINVAL;
6454 			return;
6455 		}
6456 	}
6457 #endif
6458 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6459 		if (p == NULL) {
6460 			/* Can't get proc for Net/Open BSD */
6461 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6462 			*error = EINVAL;
6463 			return;
6464 		}
6465 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6466 		return;
6467 	}
6468 	/*
6469 	 * No locks required here since bind and mgmt_ep_sa all do their own
6470 	 * locking. If we do something for the FIX: below we may need to
6471 	 * lock in that case.
6472 	 */
6473 	if (assoc_id == 0) {
6474 		/* add the address */
6475 		struct sctp_inpcb *lep;
6476 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6477 
6478 		/* validate the incoming port */
6479 		if ((lsin->sin_port != 0) &&
6480 		    (lsin->sin_port != inp->sctp_lport)) {
6481 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6482 			*error = EINVAL;
6483 			return;
6484 		} else {
6485 			/* user specified 0 port, set it to existing port */
6486 			lsin->sin_port = inp->sctp_lport;
6487 		}
6488 
6489 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6490 		if (lep != NULL) {
6491 			/*
6492 			 * We must decrement the refcount since we have the
6493 			 * ep already and are binding. No remove going on
6494 			 * here.
6495 			 */
6496 			SCTP_INP_DECR_REF(lep);
6497 		}
6498 		if (lep == inp) {
6499 			/* already bound to it.. ok */
6500 			return;
6501 		} else if (lep == NULL) {
6502 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6503 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6504 			    SCTP_ADD_IP_ADDRESS,
6505 			    vrf_id, NULL);
6506 		} else {
6507 			*error = EADDRINUSE;
6508 		}
6509 		if (*error)
6510 			return;
6511 	} else {
6512 		/*
6513 		 * FIX: decide whether we allow assoc based bindx
6514 		 */
6515 	}
6516 }
6517 
6518 /*
6519  * sctp_bindx(DELETE) for one address.
6520  * assumes all arguments are valid/checked by caller.
6521  */
6522 void
6523 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6524     struct sockaddr *sa, sctp_assoc_t assoc_id,
6525     uint32_t vrf_id, int *error)
6526 {
6527 	struct sockaddr *addr_touse;
6528 #if defined(INET) && defined(INET6)
6529 	struct sockaddr_in sin;
6530 #endif
6531 
6532 	/* see if we're bound all already! */
6533 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6534 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6535 		*error = EINVAL;
6536 		return;
6537 	}
6538 	addr_touse = sa;
6539 #ifdef INET6
6540 	if (sa->sa_family == AF_INET6) {
6541 #ifdef INET
6542 		struct sockaddr_in6 *sin6;
6543 #endif
6544 
6545 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6546 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6547 			*error = EINVAL;
6548 			return;
6549 		}
6550 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6551 			/* can only bind v6 on PF_INET6 sockets */
6552 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6553 			*error = EINVAL;
6554 			return;
6555 		}
6556 #ifdef INET
6557 		sin6 = (struct sockaddr_in6 *)addr_touse;
6558 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6559 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6560 			    SCTP_IPV6_V6ONLY(inp)) {
6561 				/* can't bind mapped-v4 on PF_INET sockets */
6562 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6563 				*error = EINVAL;
6564 				return;
6565 			}
6566 			in6_sin6_2_sin(&sin, sin6);
6567 			addr_touse = (struct sockaddr *)&sin;
6568 		}
6569 #endif
6570 	}
6571 #endif
6572 #ifdef INET
6573 	if (sa->sa_family == AF_INET) {
6574 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6575 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6576 			*error = EINVAL;
6577 			return;
6578 		}
6579 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6580 		    SCTP_IPV6_V6ONLY(inp)) {
6581 			/* can't bind v4 on PF_INET sockets */
6582 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6583 			*error = EINVAL;
6584 			return;
6585 		}
6586 	}
6587 #endif
6588 	/*
6589 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6590 	 * below is ever changed we may need to lock before calling
6591 	 * association level binding.
6592 	 */
6593 	if (assoc_id == 0) {
6594 		/* delete the address */
6595 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6596 		    SCTP_DEL_IP_ADDRESS,
6597 		    vrf_id, NULL);
6598 	} else {
6599 		/*
6600 		 * FIX: decide whether we allow assoc based bindx
6601 		 */
6602 	}
6603 }
6604 
6605 /*
6606  * returns the valid local address count for an assoc, taking into account
6607  * all scoping rules
6608  */
6609 int
6610 sctp_local_addr_count(struct sctp_tcb *stcb)
6611 {
6612 	int loopback_scope;
6613 #if defined(INET)
6614 	int ipv4_local_scope, ipv4_addr_legal;
6615 #endif
6616 #if defined (INET6)
6617 	int local_scope, site_scope, ipv6_addr_legal;
6618 #endif
6619 	struct sctp_vrf *vrf;
6620 	struct sctp_ifn *sctp_ifn;
6621 	struct sctp_ifa *sctp_ifa;
6622 	int count = 0;
6623 
6624 	/* Turn on all the appropriate scopes */
6625 	loopback_scope = stcb->asoc.scope.loopback_scope;
6626 #if defined(INET)
6627 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6628 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6629 #endif
6630 #if defined(INET6)
6631 	local_scope = stcb->asoc.scope.local_scope;
6632 	site_scope = stcb->asoc.scope.site_scope;
6633 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6634 #endif
6635 	SCTP_IPI_ADDR_RLOCK();
6636 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6637 	if (vrf == NULL) {
6638 		/* no vrf, no addresses */
6639 		SCTP_IPI_ADDR_RUNLOCK();
6640 		return (0);
6641 	}
6642 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6643 		/*
6644 		 * bound all case: go through all ifns on the vrf
6645 		 */
6646 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6647 			if ((loopback_scope == 0) &&
6648 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6649 				continue;
6650 			}
6651 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6652 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6653 					continue;
6654 				switch (sctp_ifa->address.sa.sa_family) {
6655 #ifdef INET
6656 				case AF_INET:
6657 					if (ipv4_addr_legal) {
6658 						struct sockaddr_in *sin;
6659 
6660 						sin = &sctp_ifa->address.sin;
6661 						if (sin->sin_addr.s_addr == 0) {
6662 							/*
6663 							 * skip unspecified
6664 							 * addrs
6665 							 */
6666 							continue;
6667 						}
6668 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6669 						    &sin->sin_addr) != 0) {
6670 							continue;
6671 						}
6672 						if ((ipv4_local_scope == 0) &&
6673 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6674 							continue;
6675 						}
6676 						/* count this one */
6677 						count++;
6678 					} else {
6679 						continue;
6680 					}
6681 					break;
6682 #endif
6683 #ifdef INET6
6684 				case AF_INET6:
6685 					if (ipv6_addr_legal) {
6686 						struct sockaddr_in6 *sin6;
6687 
6688 						sin6 = &sctp_ifa->address.sin6;
6689 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6690 							continue;
6691 						}
6692 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6693 						    &sin6->sin6_addr) != 0) {
6694 							continue;
6695 						}
6696 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6697 							if (local_scope == 0)
6698 								continue;
6699 							if (sin6->sin6_scope_id == 0) {
6700 								if (sa6_recoverscope(sin6) != 0)
6701 									/*
6702 									 *
6703 									 * bad
6704 									 * link
6705 									 *
6706 									 * local
6707 									 *
6708 									 * address
6709 									 */
6710 									continue;
6711 							}
6712 						}
6713 						if ((site_scope == 0) &&
6714 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6715 							continue;
6716 						}
6717 						/* count this one */
6718 						count++;
6719 					}
6720 					break;
6721 #endif
6722 				default:
6723 					/* TSNH */
6724 					break;
6725 				}
6726 			}
6727 		}
6728 	} else {
6729 		/*
6730 		 * subset bound case
6731 		 */
6732 		struct sctp_laddr *laddr;
6733 
6734 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6735 		    sctp_nxt_addr) {
6736 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6737 				continue;
6738 			}
6739 			/* count this one */
6740 			count++;
6741 		}
6742 	}
6743 	SCTP_IPI_ADDR_RUNLOCK();
6744 	return (count);
6745 }
6746 
6747 #if defined(SCTP_LOCAL_TRACE_BUF)
6748 
6749 void
6750 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6751 {
6752 	uint32_t saveindex, newindex;
6753 
6754 	do {
6755 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6756 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6757 			newindex = 1;
6758 		} else {
6759 			newindex = saveindex + 1;
6760 		}
6761 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6762 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6763 		saveindex = 0;
6764 	}
6765 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6766 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6767 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6768 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6769 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6770 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6771 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6772 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6773 }
6774 
6775 #endif
6776 static void
6777 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6778     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6779 {
6780 	struct ip *iph;
6781 #ifdef INET6
6782 	struct ip6_hdr *ip6;
6783 #endif
6784 	struct mbuf *sp, *last;
6785 	struct udphdr *uhdr;
6786 	uint16_t port;
6787 
6788 	if ((m->m_flags & M_PKTHDR) == 0) {
6789 		/* Can't handle one that is not a pkt hdr */
6790 		goto out;
6791 	}
6792 	/* Pull the src port */
6793 	iph = mtod(m, struct ip *);
6794 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6795 	port = uhdr->uh_sport;
6796 	/*
6797 	 * Split out the mbuf chain. Leave the IP header in m, place the
6798 	 * rest in the sp.
6799 	 */
6800 	sp = m_split(m, off, M_NOWAIT);
6801 	if (sp == NULL) {
6802 		/* Gak, drop packet, we can't do a split */
6803 		goto out;
6804 	}
6805 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6806 		/* Gak, packet can't have an SCTP header in it - too small */
6807 		m_freem(sp);
6808 		goto out;
6809 	}
6810 	/* Now pull up the UDP header and SCTP header together */
6811 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6812 	if (sp == NULL) {
6813 		/* Gak pullup failed */
6814 		goto out;
6815 	}
6816 	/* Trim out the UDP header */
6817 	m_adj(sp, sizeof(struct udphdr));
6818 
6819 	/* Now reconstruct the mbuf chain */
6820 	for (last = m; last->m_next; last = last->m_next);
6821 	last->m_next = sp;
6822 	m->m_pkthdr.len += sp->m_pkthdr.len;
6823 	/*
6824 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6825 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6826 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6827 	 * SCTP checksum. Therefore, clear the bit.
6828 	 */
6829 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6830 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6831 	    m->m_pkthdr.len,
6832 	    if_name(m->m_pkthdr.rcvif),
6833 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6834 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6835 	iph = mtod(m, struct ip *);
6836 	switch (iph->ip_v) {
6837 #ifdef INET
6838 	case IPVERSION:
6839 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6840 		sctp_input_with_port(m, off, port);
6841 		break;
6842 #endif
6843 #ifdef INET6
6844 	case IPV6_VERSION >> 4:
6845 		ip6 = mtod(m, struct ip6_hdr *);
6846 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6847 		sctp6_input_with_port(&m, &off, port);
6848 		break;
6849 #endif
6850 	default:
6851 		goto out;
6852 		break;
6853 	}
6854 	return;
6855 out:
6856 	m_freem(m);
6857 }
6858 
6859 #ifdef INET
6860 static void
6861 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6862 {
6863 	struct ip *outer_ip, *inner_ip;
6864 	struct sctphdr *sh;
6865 	struct icmp *icmp;
6866 	struct udphdr *udp;
6867 	struct sctp_inpcb *inp;
6868 	struct sctp_tcb *stcb;
6869 	struct sctp_nets *net;
6870 	struct sctp_init_chunk *ch;
6871 	struct sockaddr_in src, dst;
6872 	uint8_t type, code;
6873 
6874 	inner_ip = (struct ip *)vip;
6875 	icmp = (struct icmp *)((caddr_t)inner_ip -
6876 	    (sizeof(struct icmp) - sizeof(struct ip)));
6877 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6878 	if (ntohs(outer_ip->ip_len) <
6879 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6880 		return;
6881 	}
6882 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6883 	sh = (struct sctphdr *)(udp + 1);
6884 	memset(&src, 0, sizeof(struct sockaddr_in));
6885 	src.sin_family = AF_INET;
6886 	src.sin_len = sizeof(struct sockaddr_in);
6887 	src.sin_port = sh->src_port;
6888 	src.sin_addr = inner_ip->ip_src;
6889 	memset(&dst, 0, sizeof(struct sockaddr_in));
6890 	dst.sin_family = AF_INET;
6891 	dst.sin_len = sizeof(struct sockaddr_in);
6892 	dst.sin_port = sh->dest_port;
6893 	dst.sin_addr = inner_ip->ip_dst;
6894 	/*
6895 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6896 	 * holds our local endpoint address. Thus we reverse the dst and the
6897 	 * src in the lookup.
6898 	 */
6899 	inp = NULL;
6900 	net = NULL;
6901 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6902 	    (struct sockaddr *)&src,
6903 	    &inp, &net, 1,
6904 	    SCTP_DEFAULT_VRFID);
6905 	if ((stcb != NULL) &&
6906 	    (net != NULL) &&
6907 	    (inp != NULL)) {
6908 		/* Check the UDP port numbers */
6909 		if ((udp->uh_dport != net->port) ||
6910 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6911 			SCTP_TCB_UNLOCK(stcb);
6912 			return;
6913 		}
6914 		/* Check the verification tag */
6915 		if (ntohl(sh->v_tag) != 0) {
6916 			/*
6917 			 * This must be the verification tag used for
6918 			 * sending out packets. We don't consider packets
6919 			 * reflecting the verification tag.
6920 			 */
6921 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
6922 				SCTP_TCB_UNLOCK(stcb);
6923 				return;
6924 			}
6925 		} else {
6926 			if (ntohs(outer_ip->ip_len) >=
6927 			    sizeof(struct ip) +
6928 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
6929 				/*
6930 				 * In this case we can check if we got an
6931 				 * INIT chunk and if the initiate tag
6932 				 * matches.
6933 				 */
6934 				ch = (struct sctp_init_chunk *)(sh + 1);
6935 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
6936 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
6937 					SCTP_TCB_UNLOCK(stcb);
6938 					return;
6939 				}
6940 			} else {
6941 				SCTP_TCB_UNLOCK(stcb);
6942 				return;
6943 			}
6944 		}
6945 		type = icmp->icmp_type;
6946 		code = icmp->icmp_code;
6947 		if ((type == ICMP_UNREACH) &&
6948 		    (code == ICMP_UNREACH_PORT)) {
6949 			code = ICMP_UNREACH_PROTOCOL;
6950 		}
6951 		sctp_notify(inp, stcb, net, type, code,
6952 		    ntohs(inner_ip->ip_len),
6953 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
6954 	} else {
6955 		if ((stcb == NULL) && (inp != NULL)) {
6956 			/* reduce ref-count */
6957 			SCTP_INP_WLOCK(inp);
6958 			SCTP_INP_DECR_REF(inp);
6959 			SCTP_INP_WUNLOCK(inp);
6960 		}
6961 		if (stcb) {
6962 			SCTP_TCB_UNLOCK(stcb);
6963 		}
6964 	}
6965 	return;
6966 }
6967 #endif
6968 
6969 #ifdef INET6
6970 static void
6971 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
6972 {
6973 	struct ip6ctlparam *ip6cp;
6974 	struct sctp_inpcb *inp;
6975 	struct sctp_tcb *stcb;
6976 	struct sctp_nets *net;
6977 	struct sctphdr sh;
6978 	struct udphdr udp;
6979 	struct sockaddr_in6 src, dst;
6980 	uint8_t type, code;
6981 
6982 	ip6cp = (struct ip6ctlparam *)d;
6983 	/*
6984 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
6985 	 */
6986 	if (ip6cp->ip6c_m == NULL) {
6987 		return;
6988 	}
6989 	/*
6990 	 * Check if we can safely examine the ports and the verification tag
6991 	 * of the SCTP common header.
6992 	 */
6993 	if (ip6cp->ip6c_m->m_pkthdr.len <
6994 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
6995 		return;
6996 	}
6997 	/* Copy out the UDP header. */
6998 	memset(&udp, 0, sizeof(struct udphdr));
6999 	m_copydata(ip6cp->ip6c_m,
7000 	    ip6cp->ip6c_off,
7001 	    sizeof(struct udphdr),
7002 	    (caddr_t)&udp);
7003 	/* Copy out the port numbers and the verification tag. */
7004 	memset(&sh, 0, sizeof(struct sctphdr));
7005 	m_copydata(ip6cp->ip6c_m,
7006 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7007 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7008 	    (caddr_t)&sh);
7009 	memset(&src, 0, sizeof(struct sockaddr_in6));
7010 	src.sin6_family = AF_INET6;
7011 	src.sin6_len = sizeof(struct sockaddr_in6);
7012 	src.sin6_port = sh.src_port;
7013 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7014 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7015 		return;
7016 	}
7017 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7018 	dst.sin6_family = AF_INET6;
7019 	dst.sin6_len = sizeof(struct sockaddr_in6);
7020 	dst.sin6_port = sh.dest_port;
7021 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7022 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7023 		return;
7024 	}
7025 	inp = NULL;
7026 	net = NULL;
7027 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7028 	    (struct sockaddr *)&src,
7029 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7030 	if ((stcb != NULL) &&
7031 	    (net != NULL) &&
7032 	    (inp != NULL)) {
7033 		/* Check the UDP port numbers */
7034 		if ((udp.uh_dport != net->port) ||
7035 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7036 			SCTP_TCB_UNLOCK(stcb);
7037 			return;
7038 		}
7039 		/* Check the verification tag */
7040 		if (ntohl(sh.v_tag) != 0) {
7041 			/*
7042 			 * This must be the verification tag used for
7043 			 * sending out packets. We don't consider packets
7044 			 * reflecting the verification tag.
7045 			 */
7046 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7047 				SCTP_TCB_UNLOCK(stcb);
7048 				return;
7049 			}
7050 		} else {
7051 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7052 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7053 			    sizeof(struct sctphdr) +
7054 			    sizeof(struct sctp_chunkhdr) +
7055 			    offsetof(struct sctp_init, a_rwnd)) {
7056 				/*
7057 				 * In this case we can check if we got an
7058 				 * INIT chunk and if the initiate tag
7059 				 * matches.
7060 				 */
7061 				uint32_t initiate_tag;
7062 				uint8_t chunk_type;
7063 
7064 				m_copydata(ip6cp->ip6c_m,
7065 				    ip6cp->ip6c_off +
7066 				    sizeof(struct udphdr) +
7067 				    sizeof(struct sctphdr),
7068 				    sizeof(uint8_t),
7069 				    (caddr_t)&chunk_type);
7070 				m_copydata(ip6cp->ip6c_m,
7071 				    ip6cp->ip6c_off +
7072 				    sizeof(struct udphdr) +
7073 				    sizeof(struct sctphdr) +
7074 				    sizeof(struct sctp_chunkhdr),
7075 				    sizeof(uint32_t),
7076 				    (caddr_t)&initiate_tag);
7077 				if ((chunk_type != SCTP_INITIATION) ||
7078 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7079 					SCTP_TCB_UNLOCK(stcb);
7080 					return;
7081 				}
7082 			} else {
7083 				SCTP_TCB_UNLOCK(stcb);
7084 				return;
7085 			}
7086 		}
7087 		type = ip6cp->ip6c_icmp6->icmp6_type;
7088 		code = ip6cp->ip6c_icmp6->icmp6_code;
7089 		if ((type == ICMP6_DST_UNREACH) &&
7090 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7091 			type = ICMP6_PARAM_PROB;
7092 			code = ICMP6_PARAMPROB_NEXTHEADER;
7093 		}
7094 		sctp6_notify(inp, stcb, net, type, code,
7095 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7096 	} else {
7097 		if ((stcb == NULL) && (inp != NULL)) {
7098 			/* reduce inp's ref-count */
7099 			SCTP_INP_WLOCK(inp);
7100 			SCTP_INP_DECR_REF(inp);
7101 			SCTP_INP_WUNLOCK(inp);
7102 		}
7103 		if (stcb) {
7104 			SCTP_TCB_UNLOCK(stcb);
7105 		}
7106 	}
7107 }
7108 #endif
7109 
7110 void
7111 sctp_over_udp_stop(void)
7112 {
7113 	/*
7114 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7115 	 * for writting!
7116 	 */
7117 #ifdef INET
7118 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7119 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7120 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7121 	}
7122 #endif
7123 #ifdef INET6
7124 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7125 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7126 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7127 	}
7128 #endif
7129 }
7130 
7131 int
7132 sctp_over_udp_start(void)
7133 {
7134 	uint16_t port;
7135 	int ret;
7136 #ifdef INET
7137 	struct sockaddr_in sin;
7138 #endif
7139 #ifdef INET6
7140 	struct sockaddr_in6 sin6;
7141 #endif
7142 	/*
7143 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7144 	 * for writting!
7145 	 */
7146 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7147 	if (ntohs(port) == 0) {
7148 		/* Must have a port set */
7149 		return (EINVAL);
7150 	}
7151 #ifdef INET
7152 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7153 		/* Already running -- must stop first */
7154 		return (EALREADY);
7155 	}
7156 #endif
7157 #ifdef INET6
7158 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7159 		/* Already running -- must stop first */
7160 		return (EALREADY);
7161 	}
7162 #endif
7163 #ifdef INET
7164 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7165 	    SOCK_DGRAM, IPPROTO_UDP,
7166 	    curthread->td_ucred, curthread))) {
7167 		sctp_over_udp_stop();
7168 		return (ret);
7169 	}
7170 	/* Call the special UDP hook. */
7171 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7172 	    sctp_recv_udp_tunneled_packet,
7173 	    sctp_recv_icmp_tunneled_packet,
7174 	    NULL))) {
7175 		sctp_over_udp_stop();
7176 		return (ret);
7177 	}
7178 	/* Ok, we have a socket, bind it to the port. */
7179 	memset(&sin, 0, sizeof(struct sockaddr_in));
7180 	sin.sin_len = sizeof(struct sockaddr_in);
7181 	sin.sin_family = AF_INET;
7182 	sin.sin_port = htons(port);
7183 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7184 	    (struct sockaddr *)&sin, curthread))) {
7185 		sctp_over_udp_stop();
7186 		return (ret);
7187 	}
7188 #endif
7189 #ifdef INET6
7190 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7191 	    SOCK_DGRAM, IPPROTO_UDP,
7192 	    curthread->td_ucred, curthread))) {
7193 		sctp_over_udp_stop();
7194 		return (ret);
7195 	}
7196 	/* Call the special UDP hook. */
7197 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7198 	    sctp_recv_udp_tunneled_packet,
7199 	    sctp_recv_icmp6_tunneled_packet,
7200 	    NULL))) {
7201 		sctp_over_udp_stop();
7202 		return (ret);
7203 	}
7204 	/* Ok, we have a socket, bind it to the port. */
7205 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7206 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7207 	sin6.sin6_family = AF_INET6;
7208 	sin6.sin6_port = htons(port);
7209 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7210 	    (struct sockaddr *)&sin6, curthread))) {
7211 		sctp_over_udp_stop();
7212 		return (ret);
7213 	}
7214 #endif
7215 	return (0);
7216 }
7217 
7218 /*
7219  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7220  * If all arguments are zero, zero is returned.
7221  */
7222 uint32_t
7223 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7224 {
7225 	if (mtu1 > 0) {
7226 		if (mtu2 > 0) {
7227 			if (mtu3 > 0) {
7228 				return (min(mtu1, min(mtu2, mtu3)));
7229 			} else {
7230 				return (min(mtu1, mtu2));
7231 			}
7232 		} else {
7233 			if (mtu3 > 0) {
7234 				return (min(mtu1, mtu3));
7235 			} else {
7236 				return (mtu1);
7237 			}
7238 		}
7239 	} else {
7240 		if (mtu2 > 0) {
7241 			if (mtu3 > 0) {
7242 				return (min(mtu2, mtu3));
7243 			} else {
7244 				return (mtu2);
7245 			}
7246 		} else {
7247 			return (mtu3);
7248 		}
7249 	}
7250 }
7251 
7252 void
7253 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7254 {
7255 	struct in_conninfo inc;
7256 
7257 	memset(&inc, 0, sizeof(struct in_conninfo));
7258 	inc.inc_fibnum = fibnum;
7259 	switch (addr->sa.sa_family) {
7260 #ifdef INET
7261 	case AF_INET:
7262 		inc.inc_faddr = addr->sin.sin_addr;
7263 		break;
7264 #endif
7265 #ifdef INET6
7266 	case AF_INET6:
7267 		inc.inc_flags |= INC_ISIPV6;
7268 		inc.inc6_faddr = addr->sin6.sin6_addr;
7269 		break;
7270 #endif
7271 	default:
7272 		return;
7273 	}
7274 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7275 }
7276 
7277 uint32_t
7278 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7279 {
7280 	struct in_conninfo inc;
7281 
7282 	memset(&inc, 0, sizeof(struct in_conninfo));
7283 	inc.inc_fibnum = fibnum;
7284 	switch (addr->sa.sa_family) {
7285 #ifdef INET
7286 	case AF_INET:
7287 		inc.inc_faddr = addr->sin.sin_addr;
7288 		break;
7289 #endif
7290 #ifdef INET6
7291 	case AF_INET6:
7292 		inc.inc_flags |= INC_ISIPV6;
7293 		inc.inc6_faddr = addr->sin6.sin6_addr;
7294 		break;
7295 #endif
7296 	default:
7297 		return (0);
7298 	}
7299 	return ((uint32_t)tcp_hc_getmtu(&inc));
7300 }
7301