xref: /freebsd/sys/netinet/sctputil.c (revision 488ab515d6cc02f6f743f0badfc8e94eb553cd30)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #include <netinet6/sctp6_var.h>
45 #endif
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_output.h>
48 #include <netinet/sctp_uio.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_auth.h>
52 #include <netinet/sctp_asconf.h>
53 #include <netinet/sctp_bsd_addr.h>
54 #if defined(INET6) || defined(INET)
55 #include <netinet/tcp_var.h>
56 #endif
57 #include <netinet/udp.h>
58 #include <netinet/udp_var.h>
59 #include <sys/proc.h>
60 #ifdef INET6
61 #include <netinet/icmp6.h>
62 #endif
63 
64 
65 #ifndef KTR_SCTP
66 #define KTR_SCTP KTR_SUBSYS
67 #endif
68 
69 extern const struct sctp_cc_functions sctp_cc_functions[];
70 extern const struct sctp_ss_functions sctp_ss_functions[];
71 
72 void
73 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
74 {
75 	struct sctp_cwnd_log sctp_clog;
76 
77 	sctp_clog.x.sb.stcb = stcb;
78 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
79 	if (stcb)
80 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
81 	else
82 		sctp_clog.x.sb.stcb_sbcc = 0;
83 	sctp_clog.x.sb.incr = incr;
84 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
85 	    SCTP_LOG_EVENT_SB,
86 	    from,
87 	    sctp_clog.x.misc.log1,
88 	    sctp_clog.x.misc.log2,
89 	    sctp_clog.x.misc.log3,
90 	    sctp_clog.x.misc.log4);
91 }
92 
93 void
94 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
95 {
96 	struct sctp_cwnd_log sctp_clog;
97 
98 	sctp_clog.x.close.inp = (void *)inp;
99 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
100 	if (stcb) {
101 		sctp_clog.x.close.stcb = (void *)stcb;
102 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
103 	} else {
104 		sctp_clog.x.close.stcb = 0;
105 		sctp_clog.x.close.state = 0;
106 	}
107 	sctp_clog.x.close.loc = loc;
108 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
109 	    SCTP_LOG_EVENT_CLOSE,
110 	    0,
111 	    sctp_clog.x.misc.log1,
112 	    sctp_clog.x.misc.log2,
113 	    sctp_clog.x.misc.log3,
114 	    sctp_clog.x.misc.log4);
115 }
116 
117 void
118 rto_logging(struct sctp_nets *net, int from)
119 {
120 	struct sctp_cwnd_log sctp_clog;
121 
122 	memset(&sctp_clog, 0, sizeof(sctp_clog));
123 	sctp_clog.x.rto.net = (void *)net;
124 	sctp_clog.x.rto.rtt = net->rtt / 1000;
125 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
126 	    SCTP_LOG_EVENT_RTT,
127 	    from,
128 	    sctp_clog.x.misc.log1,
129 	    sctp_clog.x.misc.log2,
130 	    sctp_clog.x.misc.log3,
131 	    sctp_clog.x.misc.log4);
132 }
133 
134 void
135 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
136 {
137 	struct sctp_cwnd_log sctp_clog;
138 
139 	sctp_clog.x.strlog.stcb = stcb;
140 	sctp_clog.x.strlog.n_tsn = tsn;
141 	sctp_clog.x.strlog.n_sseq = sseq;
142 	sctp_clog.x.strlog.e_tsn = 0;
143 	sctp_clog.x.strlog.e_sseq = 0;
144 	sctp_clog.x.strlog.strm = stream;
145 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
146 	    SCTP_LOG_EVENT_STRM,
147 	    from,
148 	    sctp_clog.x.misc.log1,
149 	    sctp_clog.x.misc.log2,
150 	    sctp_clog.x.misc.log3,
151 	    sctp_clog.x.misc.log4);
152 }
153 
154 void
155 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
156 {
157 	struct sctp_cwnd_log sctp_clog;
158 
159 	sctp_clog.x.nagle.stcb = (void *)stcb;
160 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
161 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
162 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
163 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
164 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
165 	    SCTP_LOG_EVENT_NAGLE,
166 	    action,
167 	    sctp_clog.x.misc.log1,
168 	    sctp_clog.x.misc.log2,
169 	    sctp_clog.x.misc.log3,
170 	    sctp_clog.x.misc.log4);
171 }
172 
173 void
174 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
175 {
176 	struct sctp_cwnd_log sctp_clog;
177 
178 	sctp_clog.x.sack.cumack = cumack;
179 	sctp_clog.x.sack.oldcumack = old_cumack;
180 	sctp_clog.x.sack.tsn = tsn;
181 	sctp_clog.x.sack.numGaps = gaps;
182 	sctp_clog.x.sack.numDups = dups;
183 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
184 	    SCTP_LOG_EVENT_SACK,
185 	    from,
186 	    sctp_clog.x.misc.log1,
187 	    sctp_clog.x.misc.log2,
188 	    sctp_clog.x.misc.log3,
189 	    sctp_clog.x.misc.log4);
190 }
191 
192 void
193 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
194 {
195 	struct sctp_cwnd_log sctp_clog;
196 
197 	memset(&sctp_clog, 0, sizeof(sctp_clog));
198 	sctp_clog.x.map.base = map;
199 	sctp_clog.x.map.cum = cum;
200 	sctp_clog.x.map.high = high;
201 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
202 	    SCTP_LOG_EVENT_MAP,
203 	    from,
204 	    sctp_clog.x.misc.log1,
205 	    sctp_clog.x.misc.log2,
206 	    sctp_clog.x.misc.log3,
207 	    sctp_clog.x.misc.log4);
208 }
209 
210 void
211 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
212 {
213 	struct sctp_cwnd_log sctp_clog;
214 
215 	memset(&sctp_clog, 0, sizeof(sctp_clog));
216 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
217 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
218 	sctp_clog.x.fr.tsn = tsn;
219 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
220 	    SCTP_LOG_EVENT_FR,
221 	    from,
222 	    sctp_clog.x.misc.log1,
223 	    sctp_clog.x.misc.log2,
224 	    sctp_clog.x.misc.log3,
225 	    sctp_clog.x.misc.log4);
226 }
227 
228 #ifdef SCTP_MBUF_LOGGING
229 void
230 sctp_log_mb(struct mbuf *m, int from)
231 {
232 	struct sctp_cwnd_log sctp_clog;
233 
234 	sctp_clog.x.mb.mp = m;
235 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
236 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
237 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
238 	if (SCTP_BUF_IS_EXTENDED(m)) {
239 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
240 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
241 	} else {
242 		sctp_clog.x.mb.ext = 0;
243 		sctp_clog.x.mb.refcnt = 0;
244 	}
245 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
246 	    SCTP_LOG_EVENT_MBUF,
247 	    from,
248 	    sctp_clog.x.misc.log1,
249 	    sctp_clog.x.misc.log2,
250 	    sctp_clog.x.misc.log3,
251 	    sctp_clog.x.misc.log4);
252 }
253 
254 void
255 sctp_log_mbc(struct mbuf *m, int from)
256 {
257 	struct mbuf *mat;
258 
259 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
260 		sctp_log_mb(mat, from);
261 	}
262 }
263 #endif
264 
265 void
266 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
267 {
268 	struct sctp_cwnd_log sctp_clog;
269 
270 	if (control == NULL) {
271 		SCTP_PRINTF("Gak log of NULL?\n");
272 		return;
273 	}
274 	sctp_clog.x.strlog.stcb = control->stcb;
275 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
276 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
277 	sctp_clog.x.strlog.strm = control->sinfo_stream;
278 	if (poschk != NULL) {
279 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
280 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
281 	} else {
282 		sctp_clog.x.strlog.e_tsn = 0;
283 		sctp_clog.x.strlog.e_sseq = 0;
284 	}
285 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
286 	    SCTP_LOG_EVENT_STRM,
287 	    from,
288 	    sctp_clog.x.misc.log1,
289 	    sctp_clog.x.misc.log2,
290 	    sctp_clog.x.misc.log3,
291 	    sctp_clog.x.misc.log4);
292 }
293 
294 void
295 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
296 {
297 	struct sctp_cwnd_log sctp_clog;
298 
299 	sctp_clog.x.cwnd.net = net;
300 	if (stcb->asoc.send_queue_cnt > 255)
301 		sctp_clog.x.cwnd.cnt_in_send = 255;
302 	else
303 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
304 	if (stcb->asoc.stream_queue_cnt > 255)
305 		sctp_clog.x.cwnd.cnt_in_str = 255;
306 	else
307 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
308 
309 	if (net) {
310 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
311 		sctp_clog.x.cwnd.inflight = net->flight_size;
312 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
313 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
314 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
315 	}
316 	if (SCTP_CWNDLOG_PRESEND == from) {
317 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
318 	}
319 	sctp_clog.x.cwnd.cwnd_augment = augment;
320 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
321 	    SCTP_LOG_EVENT_CWND,
322 	    from,
323 	    sctp_clog.x.misc.log1,
324 	    sctp_clog.x.misc.log2,
325 	    sctp_clog.x.misc.log3,
326 	    sctp_clog.x.misc.log4);
327 }
328 
329 void
330 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
331 {
332 	struct sctp_cwnd_log sctp_clog;
333 
334 	memset(&sctp_clog, 0, sizeof(sctp_clog));
335 	if (inp) {
336 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
337 
338 	} else {
339 		sctp_clog.x.lock.sock = (void *)NULL;
340 	}
341 	sctp_clog.x.lock.inp = (void *)inp;
342 	if (stcb) {
343 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
344 	} else {
345 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
346 	}
347 	if (inp) {
348 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
349 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
350 	} else {
351 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
352 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
353 	}
354 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
355 	if (inp && (inp->sctp_socket)) {
356 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
357 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
358 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
359 	} else {
360 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
361 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
362 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
363 	}
364 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
365 	    SCTP_LOG_LOCK_EVENT,
366 	    from,
367 	    sctp_clog.x.misc.log1,
368 	    sctp_clog.x.misc.log2,
369 	    sctp_clog.x.misc.log3,
370 	    sctp_clog.x.misc.log4);
371 }
372 
373 void
374 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
375 {
376 	struct sctp_cwnd_log sctp_clog;
377 
378 	memset(&sctp_clog, 0, sizeof(sctp_clog));
379 	sctp_clog.x.cwnd.net = net;
380 	sctp_clog.x.cwnd.cwnd_new_value = error;
381 	sctp_clog.x.cwnd.inflight = net->flight_size;
382 	sctp_clog.x.cwnd.cwnd_augment = burst;
383 	if (stcb->asoc.send_queue_cnt > 255)
384 		sctp_clog.x.cwnd.cnt_in_send = 255;
385 	else
386 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
387 	if (stcb->asoc.stream_queue_cnt > 255)
388 		sctp_clog.x.cwnd.cnt_in_str = 255;
389 	else
390 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
391 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
392 	    SCTP_LOG_EVENT_MAXBURST,
393 	    from,
394 	    sctp_clog.x.misc.log1,
395 	    sctp_clog.x.misc.log2,
396 	    sctp_clog.x.misc.log3,
397 	    sctp_clog.x.misc.log4);
398 }
399 
400 void
401 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
402 {
403 	struct sctp_cwnd_log sctp_clog;
404 
405 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
406 	sctp_clog.x.rwnd.send_size = snd_size;
407 	sctp_clog.x.rwnd.overhead = overhead;
408 	sctp_clog.x.rwnd.new_rwnd = 0;
409 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
410 	    SCTP_LOG_EVENT_RWND,
411 	    from,
412 	    sctp_clog.x.misc.log1,
413 	    sctp_clog.x.misc.log2,
414 	    sctp_clog.x.misc.log3,
415 	    sctp_clog.x.misc.log4);
416 }
417 
418 void
419 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
420 {
421 	struct sctp_cwnd_log sctp_clog;
422 
423 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
424 	sctp_clog.x.rwnd.send_size = flight_size;
425 	sctp_clog.x.rwnd.overhead = overhead;
426 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
427 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
428 	    SCTP_LOG_EVENT_RWND,
429 	    from,
430 	    sctp_clog.x.misc.log1,
431 	    sctp_clog.x.misc.log2,
432 	    sctp_clog.x.misc.log3,
433 	    sctp_clog.x.misc.log4);
434 }
435 
436 #ifdef SCTP_MBCNT_LOGGING
437 static void
438 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
439 {
440 	struct sctp_cwnd_log sctp_clog;
441 
442 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
443 	sctp_clog.x.mbcnt.size_change = book;
444 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
445 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
446 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
447 	    SCTP_LOG_EVENT_MBCNT,
448 	    from,
449 	    sctp_clog.x.misc.log1,
450 	    sctp_clog.x.misc.log2,
451 	    sctp_clog.x.misc.log3,
452 	    sctp_clog.x.misc.log4);
453 }
454 #endif
455 
456 void
457 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
458 {
459 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
460 	    SCTP_LOG_MISC_EVENT,
461 	    from,
462 	    a, b, c, d);
463 }
464 
465 void
466 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
467 {
468 	struct sctp_cwnd_log sctp_clog;
469 
470 	sctp_clog.x.wake.stcb = (void *)stcb;
471 	sctp_clog.x.wake.wake_cnt = wake_cnt;
472 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
473 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
474 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
475 
476 	if (stcb->asoc.stream_queue_cnt < 0xff)
477 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
478 	else
479 		sctp_clog.x.wake.stream_qcnt = 0xff;
480 
481 	if (stcb->asoc.chunks_on_out_queue < 0xff)
482 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
483 	else
484 		sctp_clog.x.wake.chunks_on_oque = 0xff;
485 
486 	sctp_clog.x.wake.sctpflags = 0;
487 	/* set in the defered mode stuff */
488 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
489 		sctp_clog.x.wake.sctpflags |= 1;
490 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
491 		sctp_clog.x.wake.sctpflags |= 2;
492 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
493 		sctp_clog.x.wake.sctpflags |= 4;
494 	/* what about the sb */
495 	if (stcb->sctp_socket) {
496 		struct socket *so = stcb->sctp_socket;
497 
498 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
499 	} else {
500 		sctp_clog.x.wake.sbflags = 0xff;
501 	}
502 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
503 	    SCTP_LOG_EVENT_WAKE,
504 	    from,
505 	    sctp_clog.x.misc.log1,
506 	    sctp_clog.x.misc.log2,
507 	    sctp_clog.x.misc.log3,
508 	    sctp_clog.x.misc.log4);
509 }
510 
511 void
512 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
513 {
514 	struct sctp_cwnd_log sctp_clog;
515 
516 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
517 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
518 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
519 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
520 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
521 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
522 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
523 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
524 	    SCTP_LOG_EVENT_BLOCK,
525 	    from,
526 	    sctp_clog.x.misc.log1,
527 	    sctp_clog.x.misc.log2,
528 	    sctp_clog.x.misc.log3,
529 	    sctp_clog.x.misc.log4);
530 }
531 
532 int
533 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
534 {
535 	/* May need to fix this if ktrdump does not work */
536 	return (0);
537 }
538 
539 #ifdef SCTP_AUDITING_ENABLED
540 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
541 static int sctp_audit_indx = 0;
542 
543 static
544 void
545 sctp_print_audit_report(void)
546 {
547 	int i;
548 	int cnt;
549 
550 	cnt = 0;
551 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
552 		if ((sctp_audit_data[i][0] == 0xe0) &&
553 		    (sctp_audit_data[i][1] == 0x01)) {
554 			cnt = 0;
555 			SCTP_PRINTF("\n");
556 		} else if (sctp_audit_data[i][0] == 0xf0) {
557 			cnt = 0;
558 			SCTP_PRINTF("\n");
559 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
560 		    (sctp_audit_data[i][1] == 0x01)) {
561 			SCTP_PRINTF("\n");
562 			cnt = 0;
563 		}
564 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
565 		    (uint32_t)sctp_audit_data[i][1]);
566 		cnt++;
567 		if ((cnt % 14) == 0)
568 			SCTP_PRINTF("\n");
569 	}
570 	for (i = 0; i < sctp_audit_indx; i++) {
571 		if ((sctp_audit_data[i][0] == 0xe0) &&
572 		    (sctp_audit_data[i][1] == 0x01)) {
573 			cnt = 0;
574 			SCTP_PRINTF("\n");
575 		} else if (sctp_audit_data[i][0] == 0xf0) {
576 			cnt = 0;
577 			SCTP_PRINTF("\n");
578 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
579 		    (sctp_audit_data[i][1] == 0x01)) {
580 			SCTP_PRINTF("\n");
581 			cnt = 0;
582 		}
583 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
584 		    (uint32_t)sctp_audit_data[i][1]);
585 		cnt++;
586 		if ((cnt % 14) == 0)
587 			SCTP_PRINTF("\n");
588 	}
589 	SCTP_PRINTF("\n");
590 }
591 
592 void
593 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
594     struct sctp_nets *net)
595 {
596 	int resend_cnt, tot_out, rep, tot_book_cnt;
597 	struct sctp_nets *lnet;
598 	struct sctp_tmit_chunk *chk;
599 
600 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
601 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
602 	sctp_audit_indx++;
603 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
604 		sctp_audit_indx = 0;
605 	}
606 	if (inp == NULL) {
607 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
608 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
609 		sctp_audit_indx++;
610 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
611 			sctp_audit_indx = 0;
612 		}
613 		return;
614 	}
615 	if (stcb == NULL) {
616 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
617 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
618 		sctp_audit_indx++;
619 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
620 			sctp_audit_indx = 0;
621 		}
622 		return;
623 	}
624 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
625 	sctp_audit_data[sctp_audit_indx][1] =
626 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
627 	sctp_audit_indx++;
628 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
629 		sctp_audit_indx = 0;
630 	}
631 	rep = 0;
632 	tot_book_cnt = 0;
633 	resend_cnt = tot_out = 0;
634 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
635 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
636 			resend_cnt++;
637 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
638 			tot_out += chk->book_size;
639 			tot_book_cnt++;
640 		}
641 	}
642 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
643 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
644 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
645 		sctp_audit_indx++;
646 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
647 			sctp_audit_indx = 0;
648 		}
649 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
650 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
651 		rep = 1;
652 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
653 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
654 		sctp_audit_data[sctp_audit_indx][1] =
655 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
656 		sctp_audit_indx++;
657 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
658 			sctp_audit_indx = 0;
659 		}
660 	}
661 	if (tot_out != stcb->asoc.total_flight) {
662 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
663 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
664 		sctp_audit_indx++;
665 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
666 			sctp_audit_indx = 0;
667 		}
668 		rep = 1;
669 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
670 		    (int)stcb->asoc.total_flight);
671 		stcb->asoc.total_flight = tot_out;
672 	}
673 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
674 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
675 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
676 		sctp_audit_indx++;
677 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
678 			sctp_audit_indx = 0;
679 		}
680 		rep = 1;
681 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
682 
683 		stcb->asoc.total_flight_count = tot_book_cnt;
684 	}
685 	tot_out = 0;
686 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
687 		tot_out += lnet->flight_size;
688 	}
689 	if (tot_out != stcb->asoc.total_flight) {
690 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
691 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
692 		sctp_audit_indx++;
693 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
694 			sctp_audit_indx = 0;
695 		}
696 		rep = 1;
697 		SCTP_PRINTF("real flight:%d net total was %d\n",
698 		    stcb->asoc.total_flight, tot_out);
699 		/* now corrective action */
700 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
701 
702 			tot_out = 0;
703 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
704 				if ((chk->whoTo == lnet) &&
705 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
706 					tot_out += chk->book_size;
707 				}
708 			}
709 			if (lnet->flight_size != tot_out) {
710 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
711 				    (void *)lnet, lnet->flight_size,
712 				    tot_out);
713 				lnet->flight_size = tot_out;
714 			}
715 		}
716 	}
717 	if (rep) {
718 		sctp_print_audit_report();
719 	}
720 }
721 
722 void
723 sctp_audit_log(uint8_t ev, uint8_t fd)
724 {
725 
726 	sctp_audit_data[sctp_audit_indx][0] = ev;
727 	sctp_audit_data[sctp_audit_indx][1] = fd;
728 	sctp_audit_indx++;
729 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
730 		sctp_audit_indx = 0;
731 	}
732 }
733 
734 #endif
735 
736 /*
737  * sctp_stop_timers_for_shutdown() should be called
738  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
739  * state to make sure that all timers are stopped.
740  */
741 void
742 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
743 {
744 	struct sctp_association *asoc;
745 	struct sctp_nets *net;
746 
747 	asoc = &stcb->asoc;
748 
749 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
750 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
751 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
752 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
753 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
754 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
755 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
756 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
757 	}
758 }
759 
760 /*
761  * a list of sizes based on typical mtu's, used only if next hop size not
762  * returned.
763  */
764 static uint32_t sctp_mtu_sizes[] = {
765 	68,
766 	296,
767 	508,
768 	512,
769 	544,
770 	576,
771 	1006,
772 	1492,
773 	1500,
774 	1536,
775 	2002,
776 	2048,
777 	4352,
778 	4464,
779 	8166,
780 	17914,
781 	32000,
782 	65535
783 };
784 
785 /*
786  * Return the largest MTU smaller than val. If there is no
787  * entry, just return val.
788  */
789 uint32_t
790 sctp_get_prev_mtu(uint32_t val)
791 {
792 	uint32_t i;
793 
794 	if (val <= sctp_mtu_sizes[0]) {
795 		return (val);
796 	}
797 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
798 		if (val <= sctp_mtu_sizes[i]) {
799 			break;
800 		}
801 	}
802 	return (sctp_mtu_sizes[i - 1]);
803 }
804 
805 /*
806  * Return the smallest MTU larger than val. If there is no
807  * entry, just return val.
808  */
809 uint32_t
810 sctp_get_next_mtu(uint32_t val)
811 {
812 	/* select another MTU that is just bigger than this one */
813 	uint32_t i;
814 
815 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
816 		if (val < sctp_mtu_sizes[i]) {
817 			return (sctp_mtu_sizes[i]);
818 		}
819 	}
820 	return (val);
821 }
822 
823 void
824 sctp_fill_random_store(struct sctp_pcb *m)
825 {
826 	/*
827 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
828 	 * our counter. The result becomes our good random numbers and we
829 	 * then setup to give these out. Note that we do no locking to
830 	 * protect this. This is ok, since if competing folks call this we
831 	 * will get more gobbled gook in the random store which is what we
832 	 * want. There is a danger that two guys will use the same random
833 	 * numbers, but thats ok too since that is random as well :->
834 	 */
835 	m->store_at = 0;
836 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
837 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
838 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
839 	m->random_counter++;
840 }
841 
842 uint32_t
843 sctp_select_initial_TSN(struct sctp_pcb *inp)
844 {
845 	/*
846 	 * A true implementation should use random selection process to get
847 	 * the initial stream sequence number, using RFC1750 as a good
848 	 * guideline
849 	 */
850 	uint32_t x, *xp;
851 	uint8_t *p;
852 	int store_at, new_store;
853 
854 	if (inp->initial_sequence_debug != 0) {
855 		uint32_t ret;
856 
857 		ret = inp->initial_sequence_debug;
858 		inp->initial_sequence_debug++;
859 		return (ret);
860 	}
861 retry:
862 	store_at = inp->store_at;
863 	new_store = store_at + sizeof(uint32_t);
864 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
865 		new_store = 0;
866 	}
867 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
868 		goto retry;
869 	}
870 	if (new_store == 0) {
871 		/* Refill the random store */
872 		sctp_fill_random_store(inp);
873 	}
874 	p = &inp->random_store[store_at];
875 	xp = (uint32_t *)p;
876 	x = *xp;
877 	return (x);
878 }
879 
880 uint32_t
881 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
882 {
883 	uint32_t x;
884 	struct timeval now;
885 
886 	if (check) {
887 		(void)SCTP_GETTIME_TIMEVAL(&now);
888 	}
889 	for (;;) {
890 		x = sctp_select_initial_TSN(&inp->sctp_ep);
891 		if (x == 0) {
892 			/* we never use 0 */
893 			continue;
894 		}
895 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
896 			break;
897 		}
898 	}
899 	return (x);
900 }
901 
902 int32_t
903 sctp_map_assoc_state(int kernel_state)
904 {
905 	int32_t user_state;
906 
907 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
908 		user_state = SCTP_CLOSED;
909 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
910 		user_state = SCTP_SHUTDOWN_PENDING;
911 	} else {
912 		switch (kernel_state & SCTP_STATE_MASK) {
913 		case SCTP_STATE_EMPTY:
914 			user_state = SCTP_CLOSED;
915 			break;
916 		case SCTP_STATE_INUSE:
917 			user_state = SCTP_CLOSED;
918 			break;
919 		case SCTP_STATE_COOKIE_WAIT:
920 			user_state = SCTP_COOKIE_WAIT;
921 			break;
922 		case SCTP_STATE_COOKIE_ECHOED:
923 			user_state = SCTP_COOKIE_ECHOED;
924 			break;
925 		case SCTP_STATE_OPEN:
926 			user_state = SCTP_ESTABLISHED;
927 			break;
928 		case SCTP_STATE_SHUTDOWN_SENT:
929 			user_state = SCTP_SHUTDOWN_SENT;
930 			break;
931 		case SCTP_STATE_SHUTDOWN_RECEIVED:
932 			user_state = SCTP_SHUTDOWN_RECEIVED;
933 			break;
934 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
935 			user_state = SCTP_SHUTDOWN_ACK_SENT;
936 			break;
937 		default:
938 			user_state = SCTP_CLOSED;
939 			break;
940 		}
941 	}
942 	return (user_state);
943 }
944 
945 int
946 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
947     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
948 {
949 	struct sctp_association *asoc;
950 
951 	/*
952 	 * Anything set to zero is taken care of by the allocation routine's
953 	 * bzero
954 	 */
955 
956 	/*
957 	 * Up front select what scoping to apply on addresses I tell my peer
958 	 * Not sure what to do with these right now, we will need to come up
959 	 * with a way to set them. We may need to pass them through from the
960 	 * caller in the sctp_aloc_assoc() function.
961 	 */
962 	int i;
963 #if defined(SCTP_DETAILED_STR_STATS)
964 	int j;
965 #endif
966 
967 	asoc = &stcb->asoc;
968 	/* init all variables to a known value. */
969 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
970 	asoc->max_burst = inp->sctp_ep.max_burst;
971 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
972 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
973 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
974 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
975 	asoc->ecn_supported = inp->ecn_supported;
976 	asoc->prsctp_supported = inp->prsctp_supported;
977 	asoc->idata_supported = inp->idata_supported;
978 	asoc->auth_supported = inp->auth_supported;
979 	asoc->asconf_supported = inp->asconf_supported;
980 	asoc->reconfig_supported = inp->reconfig_supported;
981 	asoc->nrsack_supported = inp->nrsack_supported;
982 	asoc->pktdrop_supported = inp->pktdrop_supported;
983 	asoc->idata_supported = inp->idata_supported;
984 	asoc->sctp_cmt_pf = (uint8_t)0;
985 	asoc->sctp_frag_point = inp->sctp_frag_point;
986 	asoc->sctp_features = inp->sctp_features;
987 	asoc->default_dscp = inp->sctp_ep.default_dscp;
988 	asoc->max_cwnd = inp->max_cwnd;
989 #ifdef INET6
990 	if (inp->sctp_ep.default_flowlabel) {
991 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
992 	} else {
993 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
994 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
995 			asoc->default_flowlabel &= 0x000fffff;
996 			asoc->default_flowlabel |= 0x80000000;
997 		} else {
998 			asoc->default_flowlabel = 0;
999 		}
1000 	}
1001 #endif
1002 	asoc->sb_send_resv = 0;
1003 	if (override_tag) {
1004 		asoc->my_vtag = override_tag;
1005 	} else {
1006 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1007 	}
1008 	/* Get the nonce tags */
1009 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1010 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1011 	asoc->vrf_id = vrf_id;
1012 
1013 #ifdef SCTP_ASOCLOG_OF_TSNS
1014 	asoc->tsn_in_at = 0;
1015 	asoc->tsn_out_at = 0;
1016 	asoc->tsn_in_wrapped = 0;
1017 	asoc->tsn_out_wrapped = 0;
1018 	asoc->cumack_log_at = 0;
1019 	asoc->cumack_log_atsnt = 0;
1020 #endif
1021 #ifdef SCTP_FS_SPEC_LOG
1022 	asoc->fs_index = 0;
1023 #endif
1024 	asoc->refcnt = 0;
1025 	asoc->assoc_up_sent = 0;
1026 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1027 	    sctp_select_initial_TSN(&inp->sctp_ep);
1028 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1029 	/* we are optimisitic here */
1030 	asoc->peer_supports_nat = 0;
1031 	asoc->sent_queue_retran_cnt = 0;
1032 
1033 	/* for CMT */
1034 	asoc->last_net_cmt_send_started = NULL;
1035 
1036 	/* This will need to be adjusted */
1037 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1038 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1039 	asoc->asconf_seq_in = asoc->last_acked_seq;
1040 
1041 	/* here we are different, we hold the next one we expect */
1042 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1043 
1044 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1045 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1046 
1047 	asoc->default_mtu = inp->sctp_ep.default_mtu;
1048 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1049 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1050 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1051 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1052 	asoc->free_chunk_cnt = 0;
1053 
1054 	asoc->iam_blocking = 0;
1055 	asoc->context = inp->sctp_context;
1056 	asoc->local_strreset_support = inp->local_strreset_support;
1057 	asoc->def_send = inp->def_send;
1058 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1059 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1060 	asoc->pr_sctp_cnt = 0;
1061 	asoc->total_output_queue_size = 0;
1062 
1063 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1064 		asoc->scope.ipv6_addr_legal = 1;
1065 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1066 			asoc->scope.ipv4_addr_legal = 1;
1067 		} else {
1068 			asoc->scope.ipv4_addr_legal = 0;
1069 		}
1070 	} else {
1071 		asoc->scope.ipv6_addr_legal = 0;
1072 		asoc->scope.ipv4_addr_legal = 1;
1073 	}
1074 
1075 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1076 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1077 
1078 	asoc->smallest_mtu = inp->sctp_frag_point;
1079 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1080 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1081 
1082 	asoc->stream_locked_on = 0;
1083 	asoc->ecn_echo_cnt_onq = 0;
1084 	asoc->stream_locked = 0;
1085 
1086 	asoc->send_sack = 1;
1087 
1088 	LIST_INIT(&asoc->sctp_restricted_addrs);
1089 
1090 	TAILQ_INIT(&asoc->nets);
1091 	TAILQ_INIT(&asoc->pending_reply_queue);
1092 	TAILQ_INIT(&asoc->asconf_ack_sent);
1093 	/* Setup to fill the hb random cache at first HB */
1094 	asoc->hb_random_idx = 4;
1095 
1096 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1097 
1098 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1099 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1100 
1101 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1102 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1103 
1104 	/*
1105 	 * Now the stream parameters, here we allocate space for all streams
1106 	 * that we request by default.
1107 	 */
1108 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1109 	    o_strms;
1110 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1111 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1112 	    SCTP_M_STRMO);
1113 	if (asoc->strmout == NULL) {
1114 		/* big trouble no memory */
1115 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1116 		return (ENOMEM);
1117 	}
1118 	for (i = 0; i < asoc->streamoutcnt; i++) {
1119 		/*
1120 		 * inbound side must be set to 0xffff, also NOTE when we get
1121 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1122 		 * count (streamoutcnt) but first check if we sent to any of
1123 		 * the upper streams that were dropped (if some were). Those
1124 		 * that were dropped must be notified to the upper layer as
1125 		 * failed to send.
1126 		 */
1127 		asoc->strmout[i].next_mid_ordered = 0;
1128 		asoc->strmout[i].next_mid_unordered = 0;
1129 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1130 		asoc->strmout[i].chunks_on_queues = 0;
1131 #if defined(SCTP_DETAILED_STR_STATS)
1132 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1133 			asoc->strmout[i].abandoned_sent[j] = 0;
1134 			asoc->strmout[i].abandoned_unsent[j] = 0;
1135 		}
1136 #else
1137 		asoc->strmout[i].abandoned_sent[0] = 0;
1138 		asoc->strmout[i].abandoned_unsent[0] = 0;
1139 #endif
1140 		asoc->strmout[i].sid = i;
1141 		asoc->strmout[i].last_msg_incomplete = 0;
1142 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1143 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1144 	}
1145 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1146 
1147 	/* Now the mapping array */
1148 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1149 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1150 	    SCTP_M_MAP);
1151 	if (asoc->mapping_array == NULL) {
1152 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1153 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1154 		return (ENOMEM);
1155 	}
1156 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1157 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1158 	    SCTP_M_MAP);
1159 	if (asoc->nr_mapping_array == NULL) {
1160 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1161 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1162 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1163 		return (ENOMEM);
1164 	}
1165 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1166 
1167 	/* Now the init of the other outqueues */
1168 	TAILQ_INIT(&asoc->free_chunks);
1169 	TAILQ_INIT(&asoc->control_send_queue);
1170 	TAILQ_INIT(&asoc->asconf_send_queue);
1171 	TAILQ_INIT(&asoc->send_queue);
1172 	TAILQ_INIT(&asoc->sent_queue);
1173 	TAILQ_INIT(&asoc->resetHead);
1174 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1175 	TAILQ_INIT(&asoc->asconf_queue);
1176 	/* authentication fields */
1177 	asoc->authinfo.random = NULL;
1178 	asoc->authinfo.active_keyid = 0;
1179 	asoc->authinfo.assoc_key = NULL;
1180 	asoc->authinfo.assoc_keyid = 0;
1181 	asoc->authinfo.recv_key = NULL;
1182 	asoc->authinfo.recv_keyid = 0;
1183 	LIST_INIT(&asoc->shared_keys);
1184 	asoc->marked_retrans = 0;
1185 	asoc->port = inp->sctp_ep.port;
1186 	asoc->timoinit = 0;
1187 	asoc->timodata = 0;
1188 	asoc->timosack = 0;
1189 	asoc->timoshutdown = 0;
1190 	asoc->timoheartbeat = 0;
1191 	asoc->timocookie = 0;
1192 	asoc->timoshutdownack = 0;
1193 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1194 	asoc->discontinuity_time = asoc->start_time;
1195 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1196 		asoc->abandoned_unsent[i] = 0;
1197 		asoc->abandoned_sent[i] = 0;
1198 	}
1199 	/*
1200 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1201 	 * freed later when the association is freed.
1202 	 */
1203 	return (0);
1204 }
1205 
1206 void
1207 sctp_print_mapping_array(struct sctp_association *asoc)
1208 {
1209 	unsigned int i, limit;
1210 
1211 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1212 	    asoc->mapping_array_size,
1213 	    asoc->mapping_array_base_tsn,
1214 	    asoc->cumulative_tsn,
1215 	    asoc->highest_tsn_inside_map,
1216 	    asoc->highest_tsn_inside_nr_map);
1217 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1218 		if (asoc->mapping_array[limit - 1] != 0) {
1219 			break;
1220 		}
1221 	}
1222 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1223 	for (i = 0; i < limit; i++) {
1224 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1225 	}
1226 	if (limit % 16)
1227 		SCTP_PRINTF("\n");
1228 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1229 		if (asoc->nr_mapping_array[limit - 1]) {
1230 			break;
1231 		}
1232 	}
1233 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1234 	for (i = 0; i < limit; i++) {
1235 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1236 	}
1237 	if (limit % 16)
1238 		SCTP_PRINTF("\n");
1239 }
1240 
1241 int
1242 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1243 {
1244 	/* mapping array needs to grow */
1245 	uint8_t *new_array1, *new_array2;
1246 	uint32_t new_size;
1247 
1248 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1249 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1250 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1251 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1252 		/* can't get more, forget it */
1253 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1254 		if (new_array1) {
1255 			SCTP_FREE(new_array1, SCTP_M_MAP);
1256 		}
1257 		if (new_array2) {
1258 			SCTP_FREE(new_array2, SCTP_M_MAP);
1259 		}
1260 		return (-1);
1261 	}
1262 	memset(new_array1, 0, new_size);
1263 	memset(new_array2, 0, new_size);
1264 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1265 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1266 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1267 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1268 	asoc->mapping_array = new_array1;
1269 	asoc->nr_mapping_array = new_array2;
1270 	asoc->mapping_array_size = new_size;
1271 	return (0);
1272 }
1273 
1274 
1275 static void
1276 sctp_iterator_work(struct sctp_iterator *it)
1277 {
1278 	int iteration_count = 0;
1279 	int inp_skip = 0;
1280 	int first_in = 1;
1281 	struct sctp_inpcb *tinp;
1282 
1283 	SCTP_INP_INFO_RLOCK();
1284 	SCTP_ITERATOR_LOCK();
1285 	sctp_it_ctl.cur_it = it;
1286 	if (it->inp) {
1287 		SCTP_INP_RLOCK(it->inp);
1288 		SCTP_INP_DECR_REF(it->inp);
1289 	}
1290 	if (it->inp == NULL) {
1291 		/* iterator is complete */
1292 done_with_iterator:
1293 		sctp_it_ctl.cur_it = NULL;
1294 		SCTP_ITERATOR_UNLOCK();
1295 		SCTP_INP_INFO_RUNLOCK();
1296 		if (it->function_atend != NULL) {
1297 			(*it->function_atend) (it->pointer, it->val);
1298 		}
1299 		SCTP_FREE(it, SCTP_M_ITER);
1300 		return;
1301 	}
1302 select_a_new_ep:
1303 	if (first_in) {
1304 		first_in = 0;
1305 	} else {
1306 		SCTP_INP_RLOCK(it->inp);
1307 	}
1308 	while (((it->pcb_flags) &&
1309 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1310 	    ((it->pcb_features) &&
1311 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1312 		/* endpoint flags or features don't match, so keep looking */
1313 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1314 			SCTP_INP_RUNLOCK(it->inp);
1315 			goto done_with_iterator;
1316 		}
1317 		tinp = it->inp;
1318 		it->inp = LIST_NEXT(it->inp, sctp_list);
1319 		SCTP_INP_RUNLOCK(tinp);
1320 		if (it->inp == NULL) {
1321 			goto done_with_iterator;
1322 		}
1323 		SCTP_INP_RLOCK(it->inp);
1324 	}
1325 	/* now go through each assoc which is in the desired state */
1326 	if (it->done_current_ep == 0) {
1327 		if (it->function_inp != NULL)
1328 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1329 		it->done_current_ep = 1;
1330 	}
1331 	if (it->stcb == NULL) {
1332 		/* run the per instance function */
1333 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1334 	}
1335 	if ((inp_skip) || it->stcb == NULL) {
1336 		if (it->function_inp_end != NULL) {
1337 			inp_skip = (*it->function_inp_end) (it->inp,
1338 			    it->pointer,
1339 			    it->val);
1340 		}
1341 		SCTP_INP_RUNLOCK(it->inp);
1342 		goto no_stcb;
1343 	}
1344 	while (it->stcb) {
1345 		SCTP_TCB_LOCK(it->stcb);
1346 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1347 			/* not in the right state... keep looking */
1348 			SCTP_TCB_UNLOCK(it->stcb);
1349 			goto next_assoc;
1350 		}
1351 		/* see if we have limited out the iterator loop */
1352 		iteration_count++;
1353 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1354 			/* Pause to let others grab the lock */
1355 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1356 			SCTP_TCB_UNLOCK(it->stcb);
1357 			SCTP_INP_INCR_REF(it->inp);
1358 			SCTP_INP_RUNLOCK(it->inp);
1359 			SCTP_ITERATOR_UNLOCK();
1360 			SCTP_INP_INFO_RUNLOCK();
1361 			SCTP_INP_INFO_RLOCK();
1362 			SCTP_ITERATOR_LOCK();
1363 			if (sctp_it_ctl.iterator_flags) {
1364 				/* We won't be staying here */
1365 				SCTP_INP_DECR_REF(it->inp);
1366 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1367 				if (sctp_it_ctl.iterator_flags &
1368 				    SCTP_ITERATOR_STOP_CUR_IT) {
1369 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1370 					goto done_with_iterator;
1371 				}
1372 				if (sctp_it_ctl.iterator_flags &
1373 				    SCTP_ITERATOR_STOP_CUR_INP) {
1374 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1375 					goto no_stcb;
1376 				}
1377 				/* If we reach here huh? */
1378 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1379 				    sctp_it_ctl.iterator_flags);
1380 				sctp_it_ctl.iterator_flags = 0;
1381 			}
1382 			SCTP_INP_RLOCK(it->inp);
1383 			SCTP_INP_DECR_REF(it->inp);
1384 			SCTP_TCB_LOCK(it->stcb);
1385 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1386 			iteration_count = 0;
1387 		}
1388 		/* run function on this one */
1389 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1390 
1391 		/*
1392 		 * we lie here, it really needs to have its own type but
1393 		 * first I must verify that this won't effect things :-0
1394 		 */
1395 		if (it->no_chunk_output == 0)
1396 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1397 
1398 		SCTP_TCB_UNLOCK(it->stcb);
1399 next_assoc:
1400 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1401 		if (it->stcb == NULL) {
1402 			/* Run last function */
1403 			if (it->function_inp_end != NULL) {
1404 				inp_skip = (*it->function_inp_end) (it->inp,
1405 				    it->pointer,
1406 				    it->val);
1407 			}
1408 		}
1409 	}
1410 	SCTP_INP_RUNLOCK(it->inp);
1411 no_stcb:
1412 	/* done with all assocs on this endpoint, move on to next endpoint */
1413 	it->done_current_ep = 0;
1414 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1415 		it->inp = NULL;
1416 	} else {
1417 		it->inp = LIST_NEXT(it->inp, sctp_list);
1418 	}
1419 	if (it->inp == NULL) {
1420 		goto done_with_iterator;
1421 	}
1422 	goto select_a_new_ep;
1423 }
1424 
1425 void
1426 sctp_iterator_worker(void)
1427 {
1428 	struct sctp_iterator *it, *nit;
1429 
1430 	/* This function is called with the WQ lock in place */
1431 
1432 	sctp_it_ctl.iterator_running = 1;
1433 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1434 		/* now lets work on this one */
1435 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1436 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1437 		CURVNET_SET(it->vn);
1438 		sctp_iterator_work(it);
1439 		CURVNET_RESTORE();
1440 		SCTP_IPI_ITERATOR_WQ_LOCK();
1441 		/* sa_ignore FREED_MEMORY */
1442 	}
1443 	sctp_it_ctl.iterator_running = 0;
1444 	return;
1445 }
1446 
1447 
1448 static void
1449 sctp_handle_addr_wq(void)
1450 {
1451 	/* deal with the ADDR wq from the rtsock calls */
1452 	struct sctp_laddr *wi, *nwi;
1453 	struct sctp_asconf_iterator *asc;
1454 
1455 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1456 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1457 	if (asc == NULL) {
1458 		/* Try later, no memory */
1459 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1460 		    (struct sctp_inpcb *)NULL,
1461 		    (struct sctp_tcb *)NULL,
1462 		    (struct sctp_nets *)NULL);
1463 		return;
1464 	}
1465 	LIST_INIT(&asc->list_of_work);
1466 	asc->cnt = 0;
1467 
1468 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1469 		LIST_REMOVE(wi, sctp_nxt_addr);
1470 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1471 		asc->cnt++;
1472 	}
1473 
1474 	if (asc->cnt == 0) {
1475 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1476 	} else {
1477 		int ret;
1478 
1479 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1480 		    sctp_asconf_iterator_stcb,
1481 		    NULL,	/* No ep end for boundall */
1482 		    SCTP_PCB_FLAGS_BOUNDALL,
1483 		    SCTP_PCB_ANY_FEATURES,
1484 		    SCTP_ASOC_ANY_STATE,
1485 		    (void *)asc, 0,
1486 		    sctp_asconf_iterator_end, NULL, 0);
1487 		if (ret) {
1488 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1489 			/*
1490 			 * Freeing if we are stopping or put back on the
1491 			 * addr_wq.
1492 			 */
1493 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1494 				sctp_asconf_iterator_end(asc, 0);
1495 			} else {
1496 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1497 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1498 				}
1499 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1500 			}
1501 		}
1502 	}
1503 }
1504 
1505 void
1506 sctp_timeout_handler(void *t)
1507 {
1508 	struct sctp_inpcb *inp;
1509 	struct sctp_tcb *stcb;
1510 	struct sctp_nets *net;
1511 	struct sctp_timer *tmr;
1512 	struct mbuf *op_err;
1513 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1514 	struct socket *so;
1515 #endif
1516 	int did_output;
1517 	int type;
1518 
1519 	tmr = (struct sctp_timer *)t;
1520 	inp = (struct sctp_inpcb *)tmr->ep;
1521 	stcb = (struct sctp_tcb *)tmr->tcb;
1522 	net = (struct sctp_nets *)tmr->net;
1523 	CURVNET_SET((struct vnet *)tmr->vnet);
1524 	did_output = 1;
1525 
1526 #ifdef SCTP_AUDITING_ENABLED
1527 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1528 	sctp_auditing(3, inp, stcb, net);
1529 #endif
1530 
1531 	/* sanity checks... */
1532 	if (tmr->self != (void *)tmr) {
1533 		/*
1534 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1535 		 * (void *)tmr);
1536 		 */
1537 		CURVNET_RESTORE();
1538 		return;
1539 	}
1540 	tmr->stopped_from = 0xa001;
1541 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1542 		/*
1543 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1544 		 * tmr->type);
1545 		 */
1546 		CURVNET_RESTORE();
1547 		return;
1548 	}
1549 	tmr->stopped_from = 0xa002;
1550 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1551 		CURVNET_RESTORE();
1552 		return;
1553 	}
1554 	/* if this is an iterator timeout, get the struct and clear inp */
1555 	tmr->stopped_from = 0xa003;
1556 	if (inp) {
1557 		SCTP_INP_INCR_REF(inp);
1558 		if ((inp->sctp_socket == NULL) &&
1559 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1560 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1561 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1562 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1563 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1564 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1565 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1566 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1567 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))) {
1568 			SCTP_INP_DECR_REF(inp);
1569 			CURVNET_RESTORE();
1570 			return;
1571 		}
1572 	}
1573 	tmr->stopped_from = 0xa004;
1574 	if (stcb) {
1575 		atomic_add_int(&stcb->asoc.refcnt, 1);
1576 		if (stcb->asoc.state == 0) {
1577 			atomic_add_int(&stcb->asoc.refcnt, -1);
1578 			if (inp) {
1579 				SCTP_INP_DECR_REF(inp);
1580 			}
1581 			CURVNET_RESTORE();
1582 			return;
1583 		}
1584 	}
1585 	type = tmr->type;
1586 	tmr->stopped_from = 0xa005;
1587 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1588 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1589 		if (inp) {
1590 			SCTP_INP_DECR_REF(inp);
1591 		}
1592 		if (stcb) {
1593 			atomic_add_int(&stcb->asoc.refcnt, -1);
1594 		}
1595 		CURVNET_RESTORE();
1596 		return;
1597 	}
1598 	tmr->stopped_from = 0xa006;
1599 
1600 	if (stcb) {
1601 		SCTP_TCB_LOCK(stcb);
1602 		atomic_add_int(&stcb->asoc.refcnt, -1);
1603 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1604 		    ((stcb->asoc.state == 0) ||
1605 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1606 			SCTP_TCB_UNLOCK(stcb);
1607 			if (inp) {
1608 				SCTP_INP_DECR_REF(inp);
1609 			}
1610 			CURVNET_RESTORE();
1611 			return;
1612 		}
1613 	} else if (inp != NULL) {
1614 		if (type != SCTP_TIMER_TYPE_INPKILL) {
1615 			SCTP_INP_WLOCK(inp);
1616 		}
1617 	} else {
1618 		SCTP_WQ_ADDR_LOCK();
1619 	}
1620 	/* record in stopped what t-o occurred */
1621 	tmr->stopped_from = type;
1622 
1623 	/* mark as being serviced now */
1624 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1625 		/*
1626 		 * Callout has been rescheduled.
1627 		 */
1628 		goto get_out;
1629 	}
1630 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1631 		/*
1632 		 * Not active, so no action.
1633 		 */
1634 		goto get_out;
1635 	}
1636 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1637 
1638 	/* call the handler for the appropriate timer type */
1639 	switch (type) {
1640 	case SCTP_TIMER_TYPE_ADDR_WQ:
1641 		sctp_handle_addr_wq();
1642 		break;
1643 	case SCTP_TIMER_TYPE_SEND:
1644 		if ((stcb == NULL) || (inp == NULL)) {
1645 			break;
1646 		}
1647 		SCTP_STAT_INCR(sctps_timodata);
1648 		stcb->asoc.timodata++;
1649 		stcb->asoc.num_send_timers_up--;
1650 		if (stcb->asoc.num_send_timers_up < 0) {
1651 			stcb->asoc.num_send_timers_up = 0;
1652 		}
1653 		SCTP_TCB_LOCK_ASSERT(stcb);
1654 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1655 			/* no need to unlock on tcb its gone */
1656 
1657 			goto out_decr;
1658 		}
1659 		SCTP_TCB_LOCK_ASSERT(stcb);
1660 #ifdef SCTP_AUDITING_ENABLED
1661 		sctp_auditing(4, inp, stcb, net);
1662 #endif
1663 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1664 		if ((stcb->asoc.num_send_timers_up == 0) &&
1665 		    (stcb->asoc.sent_queue_cnt > 0)) {
1666 			struct sctp_tmit_chunk *chk;
1667 
1668 			/*
1669 			 * safeguard. If there on some on the sent queue
1670 			 * somewhere but no timers running something is
1671 			 * wrong... so we start a timer on the first chunk
1672 			 * on the send queue on whatever net it is sent to.
1673 			 */
1674 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1675 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1676 			    chk->whoTo);
1677 		}
1678 		break;
1679 	case SCTP_TIMER_TYPE_INIT:
1680 		if ((stcb == NULL) || (inp == NULL)) {
1681 			break;
1682 		}
1683 		SCTP_STAT_INCR(sctps_timoinit);
1684 		stcb->asoc.timoinit++;
1685 		if (sctp_t1init_timer(inp, stcb, net)) {
1686 			/* no need to unlock on tcb its gone */
1687 			goto out_decr;
1688 		}
1689 		/* We do output but not here */
1690 		did_output = 0;
1691 		break;
1692 	case SCTP_TIMER_TYPE_RECV:
1693 		if ((stcb == NULL) || (inp == NULL)) {
1694 			break;
1695 		}
1696 		SCTP_STAT_INCR(sctps_timosack);
1697 		stcb->asoc.timosack++;
1698 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1699 #ifdef SCTP_AUDITING_ENABLED
1700 		sctp_auditing(4, inp, stcb, net);
1701 #endif
1702 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1703 		break;
1704 	case SCTP_TIMER_TYPE_SHUTDOWN:
1705 		if ((stcb == NULL) || (inp == NULL)) {
1706 			break;
1707 		}
1708 		if (sctp_shutdown_timer(inp, stcb, net)) {
1709 			/* no need to unlock on tcb its gone */
1710 			goto out_decr;
1711 		}
1712 		SCTP_STAT_INCR(sctps_timoshutdown);
1713 		stcb->asoc.timoshutdown++;
1714 #ifdef SCTP_AUDITING_ENABLED
1715 		sctp_auditing(4, inp, stcb, net);
1716 #endif
1717 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1718 		break;
1719 	case SCTP_TIMER_TYPE_HEARTBEAT:
1720 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1721 			break;
1722 		}
1723 		SCTP_STAT_INCR(sctps_timoheartbeat);
1724 		stcb->asoc.timoheartbeat++;
1725 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1726 			/* no need to unlock on tcb its gone */
1727 			goto out_decr;
1728 		}
1729 #ifdef SCTP_AUDITING_ENABLED
1730 		sctp_auditing(4, inp, stcb, net);
1731 #endif
1732 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1733 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1734 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1735 		}
1736 		break;
1737 	case SCTP_TIMER_TYPE_COOKIE:
1738 		if ((stcb == NULL) || (inp == NULL)) {
1739 			break;
1740 		}
1741 		if (sctp_cookie_timer(inp, stcb, net)) {
1742 			/* no need to unlock on tcb its gone */
1743 			goto out_decr;
1744 		}
1745 		SCTP_STAT_INCR(sctps_timocookie);
1746 		stcb->asoc.timocookie++;
1747 #ifdef SCTP_AUDITING_ENABLED
1748 		sctp_auditing(4, inp, stcb, net);
1749 #endif
1750 		/*
1751 		 * We consider T3 and Cookie timer pretty much the same with
1752 		 * respect to where from in chunk_output.
1753 		 */
1754 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1755 		break;
1756 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1757 		{
1758 			struct timeval tv;
1759 			int i, secret;
1760 
1761 			if (inp == NULL) {
1762 				break;
1763 			}
1764 			SCTP_STAT_INCR(sctps_timosecret);
1765 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1766 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1767 			inp->sctp_ep.last_secret_number =
1768 			    inp->sctp_ep.current_secret_number;
1769 			inp->sctp_ep.current_secret_number++;
1770 			if (inp->sctp_ep.current_secret_number >=
1771 			    SCTP_HOW_MANY_SECRETS) {
1772 				inp->sctp_ep.current_secret_number = 0;
1773 			}
1774 			secret = (int)inp->sctp_ep.current_secret_number;
1775 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1776 				inp->sctp_ep.secret_key[secret][i] =
1777 				    sctp_select_initial_TSN(&inp->sctp_ep);
1778 			}
1779 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1780 		}
1781 		did_output = 0;
1782 		break;
1783 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1784 		if ((stcb == NULL) || (inp == NULL)) {
1785 			break;
1786 		}
1787 		SCTP_STAT_INCR(sctps_timopathmtu);
1788 		sctp_pathmtu_timer(inp, stcb, net);
1789 		did_output = 0;
1790 		break;
1791 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1792 		if ((stcb == NULL) || (inp == NULL)) {
1793 			break;
1794 		}
1795 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1796 			/* no need to unlock on tcb its gone */
1797 			goto out_decr;
1798 		}
1799 		SCTP_STAT_INCR(sctps_timoshutdownack);
1800 		stcb->asoc.timoshutdownack++;
1801 #ifdef SCTP_AUDITING_ENABLED
1802 		sctp_auditing(4, inp, stcb, net);
1803 #endif
1804 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1805 		break;
1806 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1807 		if ((stcb == NULL) || (inp == NULL)) {
1808 			break;
1809 		}
1810 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1811 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1812 		    "Shutdown guard timer expired");
1813 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1814 		/* no need to unlock on tcb its gone */
1815 		goto out_decr;
1816 
1817 	case SCTP_TIMER_TYPE_STRRESET:
1818 		if ((stcb == NULL) || (inp == NULL)) {
1819 			break;
1820 		}
1821 		if (sctp_strreset_timer(inp, stcb, net)) {
1822 			/* no need to unlock on tcb its gone */
1823 			goto out_decr;
1824 		}
1825 		SCTP_STAT_INCR(sctps_timostrmrst);
1826 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1827 		break;
1828 	case SCTP_TIMER_TYPE_ASCONF:
1829 		if ((stcb == NULL) || (inp == NULL)) {
1830 			break;
1831 		}
1832 		if (sctp_asconf_timer(inp, stcb, net)) {
1833 			/* no need to unlock on tcb its gone */
1834 			goto out_decr;
1835 		}
1836 		SCTP_STAT_INCR(sctps_timoasconf);
1837 #ifdef SCTP_AUDITING_ENABLED
1838 		sctp_auditing(4, inp, stcb, net);
1839 #endif
1840 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1841 		break;
1842 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1843 		if ((stcb == NULL) || (inp == NULL)) {
1844 			break;
1845 		}
1846 		sctp_delete_prim_timer(inp, stcb, net);
1847 		SCTP_STAT_INCR(sctps_timodelprim);
1848 		break;
1849 
1850 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1851 		if ((stcb == NULL) || (inp == NULL)) {
1852 			break;
1853 		}
1854 		SCTP_STAT_INCR(sctps_timoautoclose);
1855 		sctp_autoclose_timer(inp, stcb, net);
1856 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1857 		did_output = 0;
1858 		break;
1859 	case SCTP_TIMER_TYPE_ASOCKILL:
1860 		if ((stcb == NULL) || (inp == NULL)) {
1861 			break;
1862 		}
1863 		SCTP_STAT_INCR(sctps_timoassockill);
1864 		/* Can we free it yet? */
1865 		SCTP_INP_DECR_REF(inp);
1866 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1867 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1868 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1869 		so = SCTP_INP_SO(inp);
1870 		atomic_add_int(&stcb->asoc.refcnt, 1);
1871 		SCTP_TCB_UNLOCK(stcb);
1872 		SCTP_SOCKET_LOCK(so, 1);
1873 		SCTP_TCB_LOCK(stcb);
1874 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1875 #endif
1876 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1877 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1878 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1879 		SCTP_SOCKET_UNLOCK(so, 1);
1880 #endif
1881 		/*
1882 		 * free asoc, always unlocks (or destroy's) so prevent
1883 		 * duplicate unlock or unlock of a free mtx :-0
1884 		 */
1885 		stcb = NULL;
1886 		goto out_no_decr;
1887 	case SCTP_TIMER_TYPE_INPKILL:
1888 		SCTP_STAT_INCR(sctps_timoinpkill);
1889 		if (inp == NULL) {
1890 			break;
1891 		}
1892 		/*
1893 		 * special case, take away our increment since WE are the
1894 		 * killer
1895 		 */
1896 		SCTP_INP_DECR_REF(inp);
1897 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1898 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1899 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1900 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1901 		inp = NULL;
1902 		goto out_no_decr;
1903 	default:
1904 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1905 		    type);
1906 		break;
1907 	}
1908 #ifdef SCTP_AUDITING_ENABLED
1909 	sctp_audit_log(0xF1, (uint8_t)type);
1910 	if (inp)
1911 		sctp_auditing(5, inp, stcb, net);
1912 #endif
1913 	if ((did_output) && stcb) {
1914 		/*
1915 		 * Now we need to clean up the control chunk chain if an
1916 		 * ECNE is on it. It must be marked as UNSENT again so next
1917 		 * call will continue to send it until such time that we get
1918 		 * a CWR, to remove it. It is, however, less likely that we
1919 		 * will find a ecn echo on the chain though.
1920 		 */
1921 		sctp_fix_ecn_echo(&stcb->asoc);
1922 	}
1923 get_out:
1924 	if (stcb) {
1925 		SCTP_TCB_UNLOCK(stcb);
1926 	} else if (inp != NULL) {
1927 		SCTP_INP_WUNLOCK(inp);
1928 	} else {
1929 		SCTP_WQ_ADDR_UNLOCK();
1930 	}
1931 
1932 out_decr:
1933 	if (inp) {
1934 		SCTP_INP_DECR_REF(inp);
1935 	}
1936 out_no_decr:
1937 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1938 	CURVNET_RESTORE();
1939 }
1940 
1941 void
1942 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1943     struct sctp_nets *net)
1944 {
1945 	uint32_t to_ticks;
1946 	struct sctp_timer *tmr;
1947 
1948 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1949 		return;
1950 
1951 	tmr = NULL;
1952 	if (stcb) {
1953 		SCTP_TCB_LOCK_ASSERT(stcb);
1954 	}
1955 	switch (t_type) {
1956 	case SCTP_TIMER_TYPE_ADDR_WQ:
1957 		/* Only 1 tick away :-) */
1958 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1959 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1960 		break;
1961 	case SCTP_TIMER_TYPE_SEND:
1962 		/* Here we use the RTO timer */
1963 		{
1964 			int rto_val;
1965 
1966 			if ((stcb == NULL) || (net == NULL)) {
1967 				return;
1968 			}
1969 			tmr = &net->rxt_timer;
1970 			if (net->RTO == 0) {
1971 				rto_val = stcb->asoc.initial_rto;
1972 			} else {
1973 				rto_val = net->RTO;
1974 			}
1975 			to_ticks = MSEC_TO_TICKS(rto_val);
1976 		}
1977 		break;
1978 	case SCTP_TIMER_TYPE_INIT:
1979 		/*
1980 		 * Here we use the INIT timer default usually about 1
1981 		 * minute.
1982 		 */
1983 		if ((stcb == NULL) || (net == NULL)) {
1984 			return;
1985 		}
1986 		tmr = &net->rxt_timer;
1987 		if (net->RTO == 0) {
1988 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1989 		} else {
1990 			to_ticks = MSEC_TO_TICKS(net->RTO);
1991 		}
1992 		break;
1993 	case SCTP_TIMER_TYPE_RECV:
1994 		/*
1995 		 * Here we use the Delayed-Ack timer value from the inp
1996 		 * ususually about 200ms.
1997 		 */
1998 		if (stcb == NULL) {
1999 			return;
2000 		}
2001 		tmr = &stcb->asoc.dack_timer;
2002 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2003 		break;
2004 	case SCTP_TIMER_TYPE_SHUTDOWN:
2005 		/* Here we use the RTO of the destination. */
2006 		if ((stcb == NULL) || (net == NULL)) {
2007 			return;
2008 		}
2009 		if (net->RTO == 0) {
2010 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2011 		} else {
2012 			to_ticks = MSEC_TO_TICKS(net->RTO);
2013 		}
2014 		tmr = &net->rxt_timer;
2015 		break;
2016 	case SCTP_TIMER_TYPE_HEARTBEAT:
2017 		/*
2018 		 * the net is used here so that we can add in the RTO. Even
2019 		 * though we use a different timer. We also add the HB timer
2020 		 * PLUS a random jitter.
2021 		 */
2022 		if ((stcb == NULL) || (net == NULL)) {
2023 			return;
2024 		} else {
2025 			uint32_t rndval;
2026 			uint32_t jitter;
2027 
2028 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2029 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2030 				return;
2031 			}
2032 			if (net->RTO == 0) {
2033 				to_ticks = stcb->asoc.initial_rto;
2034 			} else {
2035 				to_ticks = net->RTO;
2036 			}
2037 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2038 			jitter = rndval % to_ticks;
2039 			if (jitter >= (to_ticks >> 1)) {
2040 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2041 			} else {
2042 				to_ticks = to_ticks - jitter;
2043 			}
2044 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2045 			    !(net->dest_state & SCTP_ADDR_PF)) {
2046 				to_ticks += net->heart_beat_delay;
2047 			}
2048 			/*
2049 			 * Now we must convert the to_ticks that are now in
2050 			 * ms to ticks.
2051 			 */
2052 			to_ticks = MSEC_TO_TICKS(to_ticks);
2053 			tmr = &net->hb_timer;
2054 		}
2055 		break;
2056 	case SCTP_TIMER_TYPE_COOKIE:
2057 		/*
2058 		 * Here we can use the RTO timer from the network since one
2059 		 * RTT was compelete. If a retran happened then we will be
2060 		 * using the RTO initial value.
2061 		 */
2062 		if ((stcb == NULL) || (net == NULL)) {
2063 			return;
2064 		}
2065 		if (net->RTO == 0) {
2066 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2067 		} else {
2068 			to_ticks = MSEC_TO_TICKS(net->RTO);
2069 		}
2070 		tmr = &net->rxt_timer;
2071 		break;
2072 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2073 		/*
2074 		 * nothing needed but the endpoint here ususually about 60
2075 		 * minutes.
2076 		 */
2077 		tmr = &inp->sctp_ep.signature_change;
2078 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2079 		break;
2080 	case SCTP_TIMER_TYPE_ASOCKILL:
2081 		if (stcb == NULL) {
2082 			return;
2083 		}
2084 		tmr = &stcb->asoc.strreset_timer;
2085 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2086 		break;
2087 	case SCTP_TIMER_TYPE_INPKILL:
2088 		/*
2089 		 * The inp is setup to die. We re-use the signature_chage
2090 		 * timer since that has stopped and we are in the GONE
2091 		 * state.
2092 		 */
2093 		tmr = &inp->sctp_ep.signature_change;
2094 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2095 		break;
2096 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2097 		/*
2098 		 * Here we use the value found in the EP for PMTU ususually
2099 		 * about 10 minutes.
2100 		 */
2101 		if ((stcb == NULL) || (net == NULL)) {
2102 			return;
2103 		}
2104 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2105 			return;
2106 		}
2107 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2108 		tmr = &net->pmtu_timer;
2109 		break;
2110 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2111 		/* Here we use the RTO of the destination */
2112 		if ((stcb == NULL) || (net == NULL)) {
2113 			return;
2114 		}
2115 		if (net->RTO == 0) {
2116 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2117 		} else {
2118 			to_ticks = MSEC_TO_TICKS(net->RTO);
2119 		}
2120 		tmr = &net->rxt_timer;
2121 		break;
2122 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2123 		/*
2124 		 * Here we use the endpoints shutdown guard timer usually
2125 		 * about 3 minutes.
2126 		 */
2127 		if (stcb == NULL) {
2128 			return;
2129 		}
2130 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2131 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2132 		} else {
2133 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2134 		}
2135 		tmr = &stcb->asoc.shut_guard_timer;
2136 		break;
2137 	case SCTP_TIMER_TYPE_STRRESET:
2138 		/*
2139 		 * Here the timer comes from the stcb but its value is from
2140 		 * the net's RTO.
2141 		 */
2142 		if ((stcb == NULL) || (net == NULL)) {
2143 			return;
2144 		}
2145 		if (net->RTO == 0) {
2146 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2147 		} else {
2148 			to_ticks = MSEC_TO_TICKS(net->RTO);
2149 		}
2150 		tmr = &stcb->asoc.strreset_timer;
2151 		break;
2152 	case SCTP_TIMER_TYPE_ASCONF:
2153 		/*
2154 		 * Here the timer comes from the stcb but its value is from
2155 		 * the net's RTO.
2156 		 */
2157 		if ((stcb == NULL) || (net == NULL)) {
2158 			return;
2159 		}
2160 		if (net->RTO == 0) {
2161 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2162 		} else {
2163 			to_ticks = MSEC_TO_TICKS(net->RTO);
2164 		}
2165 		tmr = &stcb->asoc.asconf_timer;
2166 		break;
2167 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2168 		if ((stcb == NULL) || (net != NULL)) {
2169 			return;
2170 		}
2171 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2172 		tmr = &stcb->asoc.delete_prim_timer;
2173 		break;
2174 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2175 		if (stcb == NULL) {
2176 			return;
2177 		}
2178 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2179 			/*
2180 			 * Really an error since stcb is NOT set to
2181 			 * autoclose
2182 			 */
2183 			return;
2184 		}
2185 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2186 		tmr = &stcb->asoc.autoclose_timer;
2187 		break;
2188 	default:
2189 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2190 		    __func__, t_type);
2191 		return;
2192 		break;
2193 	}
2194 	if ((to_ticks <= 0) || (tmr == NULL)) {
2195 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2196 		    __func__, t_type, to_ticks, (void *)tmr);
2197 		return;
2198 	}
2199 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2200 		/*
2201 		 * we do NOT allow you to have it already running. if it is
2202 		 * we leave the current one up unchanged
2203 		 */
2204 		return;
2205 	}
2206 	/* At this point we can proceed */
2207 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2208 		stcb->asoc.num_send_timers_up++;
2209 	}
2210 	tmr->stopped_from = 0;
2211 	tmr->type = t_type;
2212 	tmr->ep = (void *)inp;
2213 	tmr->tcb = (void *)stcb;
2214 	tmr->net = (void *)net;
2215 	tmr->self = (void *)tmr;
2216 	tmr->vnet = (void *)curvnet;
2217 	tmr->ticks = sctp_get_tick_count();
2218 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2219 	return;
2220 }
2221 
2222 void
2223 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2224     struct sctp_nets *net, uint32_t from)
2225 {
2226 	struct sctp_timer *tmr;
2227 
2228 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2229 	    (inp == NULL))
2230 		return;
2231 
2232 	tmr = NULL;
2233 	if (stcb) {
2234 		SCTP_TCB_LOCK_ASSERT(stcb);
2235 	}
2236 	switch (t_type) {
2237 	case SCTP_TIMER_TYPE_ADDR_WQ:
2238 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2239 		break;
2240 	case SCTP_TIMER_TYPE_SEND:
2241 		if ((stcb == NULL) || (net == NULL)) {
2242 			return;
2243 		}
2244 		tmr = &net->rxt_timer;
2245 		break;
2246 	case SCTP_TIMER_TYPE_INIT:
2247 		if ((stcb == NULL) || (net == NULL)) {
2248 			return;
2249 		}
2250 		tmr = &net->rxt_timer;
2251 		break;
2252 	case SCTP_TIMER_TYPE_RECV:
2253 		if (stcb == NULL) {
2254 			return;
2255 		}
2256 		tmr = &stcb->asoc.dack_timer;
2257 		break;
2258 	case SCTP_TIMER_TYPE_SHUTDOWN:
2259 		if ((stcb == NULL) || (net == NULL)) {
2260 			return;
2261 		}
2262 		tmr = &net->rxt_timer;
2263 		break;
2264 	case SCTP_TIMER_TYPE_HEARTBEAT:
2265 		if ((stcb == NULL) || (net == NULL)) {
2266 			return;
2267 		}
2268 		tmr = &net->hb_timer;
2269 		break;
2270 	case SCTP_TIMER_TYPE_COOKIE:
2271 		if ((stcb == NULL) || (net == NULL)) {
2272 			return;
2273 		}
2274 		tmr = &net->rxt_timer;
2275 		break;
2276 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2277 		/* nothing needed but the endpoint here */
2278 		tmr = &inp->sctp_ep.signature_change;
2279 		/*
2280 		 * We re-use the newcookie timer for the INP kill timer. We
2281 		 * must assure that we do not kill it by accident.
2282 		 */
2283 		break;
2284 	case SCTP_TIMER_TYPE_ASOCKILL:
2285 		/*
2286 		 * Stop the asoc kill timer.
2287 		 */
2288 		if (stcb == NULL) {
2289 			return;
2290 		}
2291 		tmr = &stcb->asoc.strreset_timer;
2292 		break;
2293 
2294 	case SCTP_TIMER_TYPE_INPKILL:
2295 		/*
2296 		 * The inp is setup to die. We re-use the signature_chage
2297 		 * timer since that has stopped and we are in the GONE
2298 		 * state.
2299 		 */
2300 		tmr = &inp->sctp_ep.signature_change;
2301 		break;
2302 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2303 		if ((stcb == NULL) || (net == NULL)) {
2304 			return;
2305 		}
2306 		tmr = &net->pmtu_timer;
2307 		break;
2308 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2309 		if ((stcb == NULL) || (net == NULL)) {
2310 			return;
2311 		}
2312 		tmr = &net->rxt_timer;
2313 		break;
2314 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2315 		if (stcb == NULL) {
2316 			return;
2317 		}
2318 		tmr = &stcb->asoc.shut_guard_timer;
2319 		break;
2320 	case SCTP_TIMER_TYPE_STRRESET:
2321 		if (stcb == NULL) {
2322 			return;
2323 		}
2324 		tmr = &stcb->asoc.strreset_timer;
2325 		break;
2326 	case SCTP_TIMER_TYPE_ASCONF:
2327 		if (stcb == NULL) {
2328 			return;
2329 		}
2330 		tmr = &stcb->asoc.asconf_timer;
2331 		break;
2332 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2333 		if (stcb == NULL) {
2334 			return;
2335 		}
2336 		tmr = &stcb->asoc.delete_prim_timer;
2337 		break;
2338 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2339 		if (stcb == NULL) {
2340 			return;
2341 		}
2342 		tmr = &stcb->asoc.autoclose_timer;
2343 		break;
2344 	default:
2345 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2346 		    __func__, t_type);
2347 		break;
2348 	}
2349 	if (tmr == NULL) {
2350 		return;
2351 	}
2352 	if ((tmr->type != t_type) && tmr->type) {
2353 		/*
2354 		 * Ok we have a timer that is under joint use. Cookie timer
2355 		 * per chance with the SEND timer. We therefore are NOT
2356 		 * running the timer that the caller wants stopped.  So just
2357 		 * return.
2358 		 */
2359 		return;
2360 	}
2361 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2362 		stcb->asoc.num_send_timers_up--;
2363 		if (stcb->asoc.num_send_timers_up < 0) {
2364 			stcb->asoc.num_send_timers_up = 0;
2365 		}
2366 	}
2367 	tmr->self = NULL;
2368 	tmr->stopped_from = from;
2369 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2370 	return;
2371 }
2372 
2373 uint32_t
2374 sctp_calculate_len(struct mbuf *m)
2375 {
2376 	uint32_t tlen = 0;
2377 	struct mbuf *at;
2378 
2379 	at = m;
2380 	while (at) {
2381 		tlen += SCTP_BUF_LEN(at);
2382 		at = SCTP_BUF_NEXT(at);
2383 	}
2384 	return (tlen);
2385 }
2386 
2387 void
2388 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2389     struct sctp_association *asoc, uint32_t mtu)
2390 {
2391 	/*
2392 	 * Reset the P-MTU size on this association, this involves changing
2393 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2394 	 * allow the DF flag to be cleared.
2395 	 */
2396 	struct sctp_tmit_chunk *chk;
2397 	unsigned int eff_mtu, ovh;
2398 
2399 	asoc->smallest_mtu = mtu;
2400 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2401 		ovh = SCTP_MIN_OVERHEAD;
2402 	} else {
2403 		ovh = SCTP_MIN_V4_OVERHEAD;
2404 	}
2405 	eff_mtu = mtu - ovh;
2406 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2407 		if (chk->send_size > eff_mtu) {
2408 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2409 		}
2410 	}
2411 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2412 		if (chk->send_size > eff_mtu) {
2413 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2414 		}
2415 	}
2416 }
2417 
2418 
2419 /*
2420  * given an association and starting time of the current RTT period return
2421  * RTO in number of msecs net should point to the current network
2422  */
2423 
2424 uint32_t
2425 sctp_calculate_rto(struct sctp_tcb *stcb,
2426     struct sctp_association *asoc,
2427     struct sctp_nets *net,
2428     struct timeval *old,
2429     int rtt_from_sack)
2430 {
2431 	/*-
2432 	 * given an association and the starting time of the current RTT
2433 	 * period (in value1/value2) return RTO in number of msecs.
2434 	 */
2435 	int32_t rtt;		/* RTT in ms */
2436 	uint32_t new_rto;
2437 	int first_measure = 0;
2438 	struct timeval now;
2439 
2440 	/************************/
2441 	/* 1. calculate new RTT */
2442 	/************************/
2443 	/* get the current time */
2444 	if (stcb->asoc.use_precise_time) {
2445 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2446 	} else {
2447 		(void)SCTP_GETTIME_TIMEVAL(&now);
2448 	}
2449 	timevalsub(&now, old);
2450 	/* store the current RTT in us */
2451 	net->rtt = (uint64_t)1000000 *(uint64_t)now.tv_sec +
2452 	        (uint64_t)now.tv_usec;
2453 
2454 	/* compute rtt in ms */
2455 	rtt = (int32_t)(net->rtt / 1000);
2456 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2457 		/*
2458 		 * Tell the CC module that a new update has just occurred
2459 		 * from a sack
2460 		 */
2461 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2462 	}
2463 	/*
2464 	 * Do we need to determine the lan? We do this only on sacks i.e.
2465 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2466 	 */
2467 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2468 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2469 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2470 			net->lan_type = SCTP_LAN_INTERNET;
2471 		} else {
2472 			net->lan_type = SCTP_LAN_LOCAL;
2473 		}
2474 	}
2475 	/***************************/
2476 	/* 2. update RTTVAR & SRTT */
2477 	/***************************/
2478 	/*-
2479 	 * Compute the scaled average lastsa and the
2480 	 * scaled variance lastsv as described in van Jacobson
2481 	 * Paper "Congestion Avoidance and Control", Annex A.
2482 	 *
2483 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2484 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2485 	 */
2486 	if (net->RTO_measured) {
2487 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2488 		net->lastsa += rtt;
2489 		if (rtt < 0) {
2490 			rtt = -rtt;
2491 		}
2492 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2493 		net->lastsv += rtt;
2494 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2495 			rto_logging(net, SCTP_LOG_RTTVAR);
2496 		}
2497 	} else {
2498 		/* First RTO measurment */
2499 		net->RTO_measured = 1;
2500 		first_measure = 1;
2501 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2502 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2503 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2504 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2505 		}
2506 	}
2507 	if (net->lastsv == 0) {
2508 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2509 	}
2510 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2511 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2512 	    (stcb->asoc.sat_network_lockout == 0)) {
2513 		stcb->asoc.sat_network = 1;
2514 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2515 		stcb->asoc.sat_network = 0;
2516 		stcb->asoc.sat_network_lockout = 1;
2517 	}
2518 	/* bound it, per C6/C7 in Section 5.3.1 */
2519 	if (new_rto < stcb->asoc.minrto) {
2520 		new_rto = stcb->asoc.minrto;
2521 	}
2522 	if (new_rto > stcb->asoc.maxrto) {
2523 		new_rto = stcb->asoc.maxrto;
2524 	}
2525 	/* we are now returning the RTO */
2526 	return (new_rto);
2527 }
2528 
2529 /*
2530  * return a pointer to a contiguous piece of data from the given mbuf chain
2531  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2532  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2533  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2534  */
2535 caddr_t
2536 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2537 {
2538 	uint32_t count;
2539 	uint8_t *ptr;
2540 
2541 	ptr = in_ptr;
2542 	if ((off < 0) || (len <= 0))
2543 		return (NULL);
2544 
2545 	/* find the desired start location */
2546 	while ((m != NULL) && (off > 0)) {
2547 		if (off < SCTP_BUF_LEN(m))
2548 			break;
2549 		off -= SCTP_BUF_LEN(m);
2550 		m = SCTP_BUF_NEXT(m);
2551 	}
2552 	if (m == NULL)
2553 		return (NULL);
2554 
2555 	/* is the current mbuf large enough (eg. contiguous)? */
2556 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2557 		return (mtod(m, caddr_t)+off);
2558 	} else {
2559 		/* else, it spans more than one mbuf, so save a temp copy... */
2560 		while ((m != NULL) && (len > 0)) {
2561 			count = min(SCTP_BUF_LEN(m) - off, len);
2562 			memcpy(ptr, mtod(m, caddr_t)+off, count);
2563 			len -= count;
2564 			ptr += count;
2565 			off = 0;
2566 			m = SCTP_BUF_NEXT(m);
2567 		}
2568 		if ((m == NULL) && (len > 0))
2569 			return (NULL);
2570 		else
2571 			return ((caddr_t)in_ptr);
2572 	}
2573 }
2574 
2575 
2576 
2577 struct sctp_paramhdr *
2578 sctp_get_next_param(struct mbuf *m,
2579     int offset,
2580     struct sctp_paramhdr *pull,
2581     int pull_limit)
2582 {
2583 	/* This just provides a typed signature to Peter's Pull routine */
2584 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2585 	    (uint8_t *)pull));
2586 }
2587 
2588 
2589 struct mbuf *
2590 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2591 {
2592 	struct mbuf *m_last;
2593 	caddr_t dp;
2594 
2595 	if (padlen > 3) {
2596 		return (NULL);
2597 	}
2598 	if (padlen <= M_TRAILINGSPACE(m)) {
2599 		/*
2600 		 * The easy way. We hope the majority of the time we hit
2601 		 * here :)
2602 		 */
2603 		m_last = m;
2604 	} else {
2605 		/* Hard way we must grow the mbuf chain */
2606 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2607 		if (m_last == NULL) {
2608 			return (NULL);
2609 		}
2610 		SCTP_BUF_LEN(m_last) = 0;
2611 		SCTP_BUF_NEXT(m_last) = NULL;
2612 		SCTP_BUF_NEXT(m) = m_last;
2613 	}
2614 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2615 	SCTP_BUF_LEN(m_last) += padlen;
2616 	memset(dp, 0, padlen);
2617 	return (m_last);
2618 }
2619 
2620 struct mbuf *
2621 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2622 {
2623 	/* find the last mbuf in chain and pad it */
2624 	struct mbuf *m_at;
2625 
2626 	if (last_mbuf != NULL) {
2627 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2628 	} else {
2629 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2630 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2631 				return (sctp_add_pad_tombuf(m_at, padval));
2632 			}
2633 		}
2634 	}
2635 	return (NULL);
2636 }
2637 
2638 static void
2639 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2640     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2641 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2642     SCTP_UNUSED
2643 #endif
2644 )
2645 {
2646 	struct mbuf *m_notify;
2647 	struct sctp_assoc_change *sac;
2648 	struct sctp_queued_to_read *control;
2649 	unsigned int notif_len;
2650 	uint16_t abort_len;
2651 	unsigned int i;
2652 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2653 	struct socket *so;
2654 #endif
2655 
2656 	if (stcb == NULL) {
2657 		return;
2658 	}
2659 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2660 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2661 		if (abort != NULL) {
2662 			abort_len = ntohs(abort->ch.chunk_length);
2663 			/*
2664 			 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
2665 			 * contiguous.
2666 			 */
2667 			if (abort_len > SCTP_CHUNK_BUFFER_SIZE) {
2668 				abort_len = SCTP_CHUNK_BUFFER_SIZE;
2669 			}
2670 		} else {
2671 			abort_len = 0;
2672 		}
2673 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2674 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2675 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2676 			notif_len += abort_len;
2677 		}
2678 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2679 		if (m_notify == NULL) {
2680 			/* Retry with smaller value. */
2681 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2682 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2683 			if (m_notify == NULL) {
2684 				goto set_error;
2685 			}
2686 		}
2687 		SCTP_BUF_NEXT(m_notify) = NULL;
2688 		sac = mtod(m_notify, struct sctp_assoc_change *);
2689 		memset(sac, 0, notif_len);
2690 		sac->sac_type = SCTP_ASSOC_CHANGE;
2691 		sac->sac_flags = 0;
2692 		sac->sac_length = sizeof(struct sctp_assoc_change);
2693 		sac->sac_state = state;
2694 		sac->sac_error = error;
2695 		/* XXX verify these stream counts */
2696 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2697 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2698 		sac->sac_assoc_id = sctp_get_associd(stcb);
2699 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2700 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2701 				i = 0;
2702 				if (stcb->asoc.prsctp_supported == 1) {
2703 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2704 				}
2705 				if (stcb->asoc.auth_supported == 1) {
2706 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2707 				}
2708 				if (stcb->asoc.asconf_supported == 1) {
2709 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2710 				}
2711 				if (stcb->asoc.idata_supported == 1) {
2712 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2713 				}
2714 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2715 				if (stcb->asoc.reconfig_supported == 1) {
2716 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2717 				}
2718 				sac->sac_length += i;
2719 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2720 				memcpy(sac->sac_info, abort, abort_len);
2721 				sac->sac_length += abort_len;
2722 			}
2723 		}
2724 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2725 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2726 		    0, 0, stcb->asoc.context, 0, 0, 0,
2727 		    m_notify);
2728 		if (control != NULL) {
2729 			control->length = SCTP_BUF_LEN(m_notify);
2730 			control->spec_flags = M_NOTIFICATION;
2731 			/* not that we need this */
2732 			control->tail_mbuf = m_notify;
2733 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2734 			    control,
2735 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2736 			    so_locked);
2737 		} else {
2738 			sctp_m_freem(m_notify);
2739 		}
2740 	}
2741 	/*
2742 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2743 	 * comes in.
2744 	 */
2745 set_error:
2746 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2747 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2748 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2749 		SOCK_LOCK(stcb->sctp_socket);
2750 		if (from_peer) {
2751 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2752 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2753 				stcb->sctp_socket->so_error = ECONNREFUSED;
2754 			} else {
2755 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2756 				stcb->sctp_socket->so_error = ECONNRESET;
2757 			}
2758 		} else {
2759 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2760 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2761 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2762 				stcb->sctp_socket->so_error = ETIMEDOUT;
2763 			} else {
2764 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2765 				stcb->sctp_socket->so_error = ECONNABORTED;
2766 			}
2767 		}
2768 		SOCK_UNLOCK(stcb->sctp_socket);
2769 	}
2770 	/* Wake ANY sleepers */
2771 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2772 	so = SCTP_INP_SO(stcb->sctp_ep);
2773 	if (!so_locked) {
2774 		atomic_add_int(&stcb->asoc.refcnt, 1);
2775 		SCTP_TCB_UNLOCK(stcb);
2776 		SCTP_SOCKET_LOCK(so, 1);
2777 		SCTP_TCB_LOCK(stcb);
2778 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2779 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2780 			SCTP_SOCKET_UNLOCK(so, 1);
2781 			return;
2782 		}
2783 	}
2784 #endif
2785 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2786 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2787 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2788 		socantrcvmore(stcb->sctp_socket);
2789 	}
2790 	sorwakeup(stcb->sctp_socket);
2791 	sowwakeup(stcb->sctp_socket);
2792 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2793 	if (!so_locked) {
2794 		SCTP_SOCKET_UNLOCK(so, 1);
2795 	}
2796 #endif
2797 }
2798 
2799 static void
2800 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2801     struct sockaddr *sa, uint32_t error, int so_locked
2802 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2803     SCTP_UNUSED
2804 #endif
2805 )
2806 {
2807 	struct mbuf *m_notify;
2808 	struct sctp_paddr_change *spc;
2809 	struct sctp_queued_to_read *control;
2810 
2811 	if ((stcb == NULL) ||
2812 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2813 		/* event not enabled */
2814 		return;
2815 	}
2816 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2817 	if (m_notify == NULL)
2818 		return;
2819 	SCTP_BUF_LEN(m_notify) = 0;
2820 	spc = mtod(m_notify, struct sctp_paddr_change *);
2821 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2822 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2823 	spc->spc_flags = 0;
2824 	spc->spc_length = sizeof(struct sctp_paddr_change);
2825 	switch (sa->sa_family) {
2826 #ifdef INET
2827 	case AF_INET:
2828 #ifdef INET6
2829 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2830 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2831 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2832 		} else {
2833 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2834 		}
2835 #else
2836 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2837 #endif
2838 		break;
2839 #endif
2840 #ifdef INET6
2841 	case AF_INET6:
2842 		{
2843 			struct sockaddr_in6 *sin6;
2844 
2845 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2846 
2847 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2848 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2849 				if (sin6->sin6_scope_id == 0) {
2850 					/* recover scope_id for user */
2851 					(void)sa6_recoverscope(sin6);
2852 				} else {
2853 					/* clear embedded scope_id for user */
2854 					in6_clearscope(&sin6->sin6_addr);
2855 				}
2856 			}
2857 			break;
2858 		}
2859 #endif
2860 	default:
2861 		/* TSNH */
2862 		break;
2863 	}
2864 	spc->spc_state = state;
2865 	spc->spc_error = error;
2866 	spc->spc_assoc_id = sctp_get_associd(stcb);
2867 
2868 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2869 	SCTP_BUF_NEXT(m_notify) = NULL;
2870 
2871 	/* append to socket */
2872 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2873 	    0, 0, stcb->asoc.context, 0, 0, 0,
2874 	    m_notify);
2875 	if (control == NULL) {
2876 		/* no memory */
2877 		sctp_m_freem(m_notify);
2878 		return;
2879 	}
2880 	control->length = SCTP_BUF_LEN(m_notify);
2881 	control->spec_flags = M_NOTIFICATION;
2882 	/* not that we need this */
2883 	control->tail_mbuf = m_notify;
2884 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2885 	    control,
2886 	    &stcb->sctp_socket->so_rcv, 1,
2887 	    SCTP_READ_LOCK_NOT_HELD,
2888 	    so_locked);
2889 }
2890 
2891 
2892 static void
2893 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2894     struct sctp_tmit_chunk *chk, int so_locked
2895 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2896     SCTP_UNUSED
2897 #endif
2898 )
2899 {
2900 	struct mbuf *m_notify;
2901 	struct sctp_send_failed *ssf;
2902 	struct sctp_send_failed_event *ssfe;
2903 	struct sctp_queued_to_read *control;
2904 	struct sctp_chunkhdr *chkhdr;
2905 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
2906 
2907 	if ((stcb == NULL) ||
2908 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2909 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2910 		/* event not enabled */
2911 		return;
2912 	}
2913 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2914 		notifhdr_len = sizeof(struct sctp_send_failed_event);
2915 	} else {
2916 		notifhdr_len = sizeof(struct sctp_send_failed);
2917 	}
2918 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
2919 	if (m_notify == NULL)
2920 		/* no space left */
2921 		return;
2922 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
2923 	if (stcb->asoc.idata_supported) {
2924 		chkhdr_len = sizeof(struct sctp_idata_chunk);
2925 	} else {
2926 		chkhdr_len = sizeof(struct sctp_data_chunk);
2927 	}
2928 	/* Use some defaults in case we can't access the chunk header */
2929 	if (chk->send_size >= chkhdr_len) {
2930 		payload_len = chk->send_size - chkhdr_len;
2931 	} else {
2932 		payload_len = 0;
2933 	}
2934 	padding_len = 0;
2935 	if (chk->data != NULL) {
2936 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
2937 		if (chkhdr != NULL) {
2938 			chk_len = ntohs(chkhdr->chunk_length);
2939 			if ((chk_len >= chkhdr_len) &&
2940 			    (chk->send_size >= chk_len) &&
2941 			    (chk->send_size - chk_len < 4)) {
2942 				padding_len = chk->send_size - chk_len;
2943 				payload_len = chk->send_size - chkhdr_len - padding_len;
2944 			}
2945 		}
2946 	}
2947 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2948 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2949 		memset(ssfe, 0, notifhdr_len);
2950 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2951 		if (sent) {
2952 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2953 		} else {
2954 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2955 		}
2956 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
2957 		ssfe->ssfe_error = error;
2958 		/* not exactly what the user sent in, but should be close :) */
2959 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
2960 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2961 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
2962 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2963 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2964 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2965 	} else {
2966 		ssf = mtod(m_notify, struct sctp_send_failed *);
2967 		memset(ssf, 0, notifhdr_len);
2968 		ssf->ssf_type = SCTP_SEND_FAILED;
2969 		if (sent) {
2970 			ssf->ssf_flags = SCTP_DATA_SENT;
2971 		} else {
2972 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2973 		}
2974 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
2975 		ssf->ssf_error = error;
2976 		/* not exactly what the user sent in, but should be close :) */
2977 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
2978 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
2979 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2980 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
2981 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2982 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2983 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2984 	}
2985 	if (chk->data != NULL) {
2986 		/* Trim off the sctp chunk header (it should be there) */
2987 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
2988 			m_adj(chk->data, chkhdr_len);
2989 			m_adj(chk->data, -padding_len);
2990 			sctp_mbuf_crush(chk->data);
2991 			chk->send_size -= (chkhdr_len + padding_len);
2992 		}
2993 	}
2994 	SCTP_BUF_NEXT(m_notify) = chk->data;
2995 	/* Steal off the mbuf */
2996 	chk->data = NULL;
2997 	/*
2998 	 * For this case, we check the actual socket buffer, since the assoc
2999 	 * is going away we don't want to overfill the socket buffer for a
3000 	 * non-reader
3001 	 */
3002 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3003 		sctp_m_freem(m_notify);
3004 		return;
3005 	}
3006 	/* append to socket */
3007 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3008 	    0, 0, stcb->asoc.context, 0, 0, 0,
3009 	    m_notify);
3010 	if (control == NULL) {
3011 		/* no memory */
3012 		sctp_m_freem(m_notify);
3013 		return;
3014 	}
3015 	control->length = SCTP_BUF_LEN(m_notify);
3016 	control->spec_flags = M_NOTIFICATION;
3017 	/* not that we need this */
3018 	control->tail_mbuf = m_notify;
3019 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3020 	    control,
3021 	    &stcb->sctp_socket->so_rcv, 1,
3022 	    SCTP_READ_LOCK_NOT_HELD,
3023 	    so_locked);
3024 }
3025 
3026 
3027 static void
3028 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3029     struct sctp_stream_queue_pending *sp, int so_locked
3030 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3031     SCTP_UNUSED
3032 #endif
3033 )
3034 {
3035 	struct mbuf *m_notify;
3036 	struct sctp_send_failed *ssf;
3037 	struct sctp_send_failed_event *ssfe;
3038 	struct sctp_queued_to_read *control;
3039 	int notifhdr_len;
3040 
3041 	if ((stcb == NULL) ||
3042 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3043 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3044 		/* event not enabled */
3045 		return;
3046 	}
3047 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3048 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3049 	} else {
3050 		notifhdr_len = sizeof(struct sctp_send_failed);
3051 	}
3052 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3053 	if (m_notify == NULL) {
3054 		/* no space left */
3055 		return;
3056 	}
3057 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3058 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3059 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3060 		memset(ssfe, 0, notifhdr_len);
3061 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3062 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3063 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3064 		ssfe->ssfe_error = error;
3065 		/* not exactly what the user sent in, but should be close :) */
3066 		ssfe->ssfe_info.snd_sid = sp->sid;
3067 		if (sp->some_taken) {
3068 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3069 		} else {
3070 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3071 		}
3072 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3073 		ssfe->ssfe_info.snd_context = sp->context;
3074 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3075 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3076 	} else {
3077 		ssf = mtod(m_notify, struct sctp_send_failed *);
3078 		memset(ssf, 0, notifhdr_len);
3079 		ssf->ssf_type = SCTP_SEND_FAILED;
3080 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3081 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3082 		ssf->ssf_error = error;
3083 		/* not exactly what the user sent in, but should be close :) */
3084 		ssf->ssf_info.sinfo_stream = sp->sid;
3085 		ssf->ssf_info.sinfo_ssn = 0;
3086 		if (sp->some_taken) {
3087 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3088 		} else {
3089 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3090 		}
3091 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3092 		ssf->ssf_info.sinfo_context = sp->context;
3093 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3094 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3095 	}
3096 	SCTP_BUF_NEXT(m_notify) = sp->data;
3097 
3098 	/* Steal off the mbuf */
3099 	sp->data = NULL;
3100 	/*
3101 	 * For this case, we check the actual socket buffer, since the assoc
3102 	 * is going away we don't want to overfill the socket buffer for a
3103 	 * non-reader
3104 	 */
3105 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3106 		sctp_m_freem(m_notify);
3107 		return;
3108 	}
3109 	/* append to socket */
3110 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3111 	    0, 0, stcb->asoc.context, 0, 0, 0,
3112 	    m_notify);
3113 	if (control == NULL) {
3114 		/* no memory */
3115 		sctp_m_freem(m_notify);
3116 		return;
3117 	}
3118 	control->length = SCTP_BUF_LEN(m_notify);
3119 	control->spec_flags = M_NOTIFICATION;
3120 	/* not that we need this */
3121 	control->tail_mbuf = m_notify;
3122 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3123 	    control,
3124 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3125 }
3126 
3127 
3128 
3129 static void
3130 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3131 {
3132 	struct mbuf *m_notify;
3133 	struct sctp_adaptation_event *sai;
3134 	struct sctp_queued_to_read *control;
3135 
3136 	if ((stcb == NULL) ||
3137 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3138 		/* event not enabled */
3139 		return;
3140 	}
3141 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3142 	if (m_notify == NULL)
3143 		/* no space left */
3144 		return;
3145 	SCTP_BUF_LEN(m_notify) = 0;
3146 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3147 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3148 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3149 	sai->sai_flags = 0;
3150 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3151 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3152 	sai->sai_assoc_id = sctp_get_associd(stcb);
3153 
3154 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3155 	SCTP_BUF_NEXT(m_notify) = NULL;
3156 
3157 	/* append to socket */
3158 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3159 	    0, 0, stcb->asoc.context, 0, 0, 0,
3160 	    m_notify);
3161 	if (control == NULL) {
3162 		/* no memory */
3163 		sctp_m_freem(m_notify);
3164 		return;
3165 	}
3166 	control->length = SCTP_BUF_LEN(m_notify);
3167 	control->spec_flags = M_NOTIFICATION;
3168 	/* not that we need this */
3169 	control->tail_mbuf = m_notify;
3170 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3171 	    control,
3172 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3173 }
3174 
3175 /* This always must be called with the read-queue LOCKED in the INP */
3176 static void
3177 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3178     uint32_t val, int so_locked
3179 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3180     SCTP_UNUSED
3181 #endif
3182 )
3183 {
3184 	struct mbuf *m_notify;
3185 	struct sctp_pdapi_event *pdapi;
3186 	struct sctp_queued_to_read *control;
3187 	struct sockbuf *sb;
3188 
3189 	if ((stcb == NULL) ||
3190 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3191 		/* event not enabled */
3192 		return;
3193 	}
3194 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3195 		return;
3196 	}
3197 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3198 	if (m_notify == NULL)
3199 		/* no space left */
3200 		return;
3201 	SCTP_BUF_LEN(m_notify) = 0;
3202 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3203 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3204 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3205 	pdapi->pdapi_flags = 0;
3206 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3207 	pdapi->pdapi_indication = error;
3208 	pdapi->pdapi_stream = (val >> 16);
3209 	pdapi->pdapi_seq = (val & 0x0000ffff);
3210 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3211 
3212 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3213 	SCTP_BUF_NEXT(m_notify) = NULL;
3214 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3215 	    0, 0, stcb->asoc.context, 0, 0, 0,
3216 	    m_notify);
3217 	if (control == NULL) {
3218 		/* no memory */
3219 		sctp_m_freem(m_notify);
3220 		return;
3221 	}
3222 	control->length = SCTP_BUF_LEN(m_notify);
3223 	control->spec_flags = M_NOTIFICATION;
3224 	/* not that we need this */
3225 	control->tail_mbuf = m_notify;
3226 	sb = &stcb->sctp_socket->so_rcv;
3227 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3228 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3229 	}
3230 	sctp_sballoc(stcb, sb, m_notify);
3231 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3232 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3233 	}
3234 	control->end_added = 1;
3235 	if (stcb->asoc.control_pdapi)
3236 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3237 	else {
3238 		/* we really should not see this case */
3239 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3240 	}
3241 	if (stcb->sctp_ep && stcb->sctp_socket) {
3242 		/* This should always be the case */
3243 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3244 		struct socket *so;
3245 
3246 		so = SCTP_INP_SO(stcb->sctp_ep);
3247 		if (!so_locked) {
3248 			atomic_add_int(&stcb->asoc.refcnt, 1);
3249 			SCTP_TCB_UNLOCK(stcb);
3250 			SCTP_SOCKET_LOCK(so, 1);
3251 			SCTP_TCB_LOCK(stcb);
3252 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3253 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3254 				SCTP_SOCKET_UNLOCK(so, 1);
3255 				return;
3256 			}
3257 		}
3258 #endif
3259 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3260 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3261 		if (!so_locked) {
3262 			SCTP_SOCKET_UNLOCK(so, 1);
3263 		}
3264 #endif
3265 	}
3266 }
3267 
3268 static void
3269 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3270 {
3271 	struct mbuf *m_notify;
3272 	struct sctp_shutdown_event *sse;
3273 	struct sctp_queued_to_read *control;
3274 
3275 	/*
3276 	 * For TCP model AND UDP connected sockets we will send an error up
3277 	 * when an SHUTDOWN completes
3278 	 */
3279 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3280 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3281 		/* mark socket closed for read/write and wakeup! */
3282 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3283 		struct socket *so;
3284 
3285 		so = SCTP_INP_SO(stcb->sctp_ep);
3286 		atomic_add_int(&stcb->asoc.refcnt, 1);
3287 		SCTP_TCB_UNLOCK(stcb);
3288 		SCTP_SOCKET_LOCK(so, 1);
3289 		SCTP_TCB_LOCK(stcb);
3290 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3291 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3292 			SCTP_SOCKET_UNLOCK(so, 1);
3293 			return;
3294 		}
3295 #endif
3296 		socantsendmore(stcb->sctp_socket);
3297 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3298 		SCTP_SOCKET_UNLOCK(so, 1);
3299 #endif
3300 	}
3301 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3302 		/* event not enabled */
3303 		return;
3304 	}
3305 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3306 	if (m_notify == NULL)
3307 		/* no space left */
3308 		return;
3309 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3310 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3311 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3312 	sse->sse_flags = 0;
3313 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3314 	sse->sse_assoc_id = sctp_get_associd(stcb);
3315 
3316 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3317 	SCTP_BUF_NEXT(m_notify) = NULL;
3318 
3319 	/* append to socket */
3320 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3321 	    0, 0, stcb->asoc.context, 0, 0, 0,
3322 	    m_notify);
3323 	if (control == NULL) {
3324 		/* no memory */
3325 		sctp_m_freem(m_notify);
3326 		return;
3327 	}
3328 	control->length = SCTP_BUF_LEN(m_notify);
3329 	control->spec_flags = M_NOTIFICATION;
3330 	/* not that we need this */
3331 	control->tail_mbuf = m_notify;
3332 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3333 	    control,
3334 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3335 }
3336 
3337 static void
3338 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3339     int so_locked
3340 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3341     SCTP_UNUSED
3342 #endif
3343 )
3344 {
3345 	struct mbuf *m_notify;
3346 	struct sctp_sender_dry_event *event;
3347 	struct sctp_queued_to_read *control;
3348 
3349 	if ((stcb == NULL) ||
3350 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3351 		/* event not enabled */
3352 		return;
3353 	}
3354 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3355 	if (m_notify == NULL) {
3356 		/* no space left */
3357 		return;
3358 	}
3359 	SCTP_BUF_LEN(m_notify) = 0;
3360 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3361 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3362 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3363 	event->sender_dry_flags = 0;
3364 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3365 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3366 
3367 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3368 	SCTP_BUF_NEXT(m_notify) = NULL;
3369 
3370 	/* append to socket */
3371 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3372 	    0, 0, stcb->asoc.context, 0, 0, 0,
3373 	    m_notify);
3374 	if (control == NULL) {
3375 		/* no memory */
3376 		sctp_m_freem(m_notify);
3377 		return;
3378 	}
3379 	control->length = SCTP_BUF_LEN(m_notify);
3380 	control->spec_flags = M_NOTIFICATION;
3381 	/* not that we need this */
3382 	control->tail_mbuf = m_notify;
3383 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3384 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3385 }
3386 
3387 
3388 void
3389 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3390 {
3391 	struct mbuf *m_notify;
3392 	struct sctp_queued_to_read *control;
3393 	struct sctp_stream_change_event *stradd;
3394 
3395 	if ((stcb == NULL) ||
3396 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3397 		/* event not enabled */
3398 		return;
3399 	}
3400 	if ((stcb->asoc.peer_req_out) && flag) {
3401 		/* Peer made the request, don't tell the local user */
3402 		stcb->asoc.peer_req_out = 0;
3403 		return;
3404 	}
3405 	stcb->asoc.peer_req_out = 0;
3406 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3407 	if (m_notify == NULL)
3408 		/* no space left */
3409 		return;
3410 	SCTP_BUF_LEN(m_notify) = 0;
3411 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3412 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3413 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3414 	stradd->strchange_flags = flag;
3415 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3416 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3417 	stradd->strchange_instrms = numberin;
3418 	stradd->strchange_outstrms = numberout;
3419 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3420 	SCTP_BUF_NEXT(m_notify) = NULL;
3421 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3422 		/* no space */
3423 		sctp_m_freem(m_notify);
3424 		return;
3425 	}
3426 	/* append to socket */
3427 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3428 	    0, 0, stcb->asoc.context, 0, 0, 0,
3429 	    m_notify);
3430 	if (control == NULL) {
3431 		/* no memory */
3432 		sctp_m_freem(m_notify);
3433 		return;
3434 	}
3435 	control->length = SCTP_BUF_LEN(m_notify);
3436 	control->spec_flags = M_NOTIFICATION;
3437 	/* not that we need this */
3438 	control->tail_mbuf = m_notify;
3439 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3440 	    control,
3441 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3442 }
3443 
3444 void
3445 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3446 {
3447 	struct mbuf *m_notify;
3448 	struct sctp_queued_to_read *control;
3449 	struct sctp_assoc_reset_event *strasoc;
3450 
3451 	if ((stcb == NULL) ||
3452 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3453 		/* event not enabled */
3454 		return;
3455 	}
3456 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3457 	if (m_notify == NULL)
3458 		/* no space left */
3459 		return;
3460 	SCTP_BUF_LEN(m_notify) = 0;
3461 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3462 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3463 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3464 	strasoc->assocreset_flags = flag;
3465 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3466 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3467 	strasoc->assocreset_local_tsn = sending_tsn;
3468 	strasoc->assocreset_remote_tsn = recv_tsn;
3469 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3470 	SCTP_BUF_NEXT(m_notify) = NULL;
3471 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3472 		/* no space */
3473 		sctp_m_freem(m_notify);
3474 		return;
3475 	}
3476 	/* append to socket */
3477 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3478 	    0, 0, stcb->asoc.context, 0, 0, 0,
3479 	    m_notify);
3480 	if (control == NULL) {
3481 		/* no memory */
3482 		sctp_m_freem(m_notify);
3483 		return;
3484 	}
3485 	control->length = SCTP_BUF_LEN(m_notify);
3486 	control->spec_flags = M_NOTIFICATION;
3487 	/* not that we need this */
3488 	control->tail_mbuf = m_notify;
3489 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3490 	    control,
3491 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3492 }
3493 
3494 
3495 
3496 static void
3497 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3498     int number_entries, uint16_t *list, int flag)
3499 {
3500 	struct mbuf *m_notify;
3501 	struct sctp_queued_to_read *control;
3502 	struct sctp_stream_reset_event *strreset;
3503 	int len;
3504 
3505 	if ((stcb == NULL) ||
3506 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3507 		/* event not enabled */
3508 		return;
3509 	}
3510 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3511 	if (m_notify == NULL)
3512 		/* no space left */
3513 		return;
3514 	SCTP_BUF_LEN(m_notify) = 0;
3515 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3516 	if (len > M_TRAILINGSPACE(m_notify)) {
3517 		/* never enough room */
3518 		sctp_m_freem(m_notify);
3519 		return;
3520 	}
3521 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3522 	memset(strreset, 0, len);
3523 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3524 	strreset->strreset_flags = flag;
3525 	strreset->strreset_length = len;
3526 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3527 	if (number_entries) {
3528 		int i;
3529 
3530 		for (i = 0; i < number_entries; i++) {
3531 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3532 		}
3533 	}
3534 	SCTP_BUF_LEN(m_notify) = len;
3535 	SCTP_BUF_NEXT(m_notify) = NULL;
3536 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3537 		/* no space */
3538 		sctp_m_freem(m_notify);
3539 		return;
3540 	}
3541 	/* append to socket */
3542 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3543 	    0, 0, stcb->asoc.context, 0, 0, 0,
3544 	    m_notify);
3545 	if (control == NULL) {
3546 		/* no memory */
3547 		sctp_m_freem(m_notify);
3548 		return;
3549 	}
3550 	control->length = SCTP_BUF_LEN(m_notify);
3551 	control->spec_flags = M_NOTIFICATION;
3552 	/* not that we need this */
3553 	control->tail_mbuf = m_notify;
3554 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3555 	    control,
3556 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3557 }
3558 
3559 
3560 static void
3561 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3562 {
3563 	struct mbuf *m_notify;
3564 	struct sctp_remote_error *sre;
3565 	struct sctp_queued_to_read *control;
3566 	unsigned int notif_len;
3567 	uint16_t chunk_len;
3568 
3569 	if ((stcb == NULL) ||
3570 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3571 		return;
3572 	}
3573 	if (chunk != NULL) {
3574 		chunk_len = ntohs(chunk->ch.chunk_length);
3575 		/*
3576 		 * Only SCTP_CHUNK_BUFFER_SIZE are guaranteed to be
3577 		 * contiguous.
3578 		 */
3579 		if (chunk_len > SCTP_CHUNK_BUFFER_SIZE) {
3580 			chunk_len = SCTP_CHUNK_BUFFER_SIZE;
3581 		}
3582 	} else {
3583 		chunk_len = 0;
3584 	}
3585 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3586 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3587 	if (m_notify == NULL) {
3588 		/* Retry with smaller value. */
3589 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3590 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3591 		if (m_notify == NULL) {
3592 			return;
3593 		}
3594 	}
3595 	SCTP_BUF_NEXT(m_notify) = NULL;
3596 	sre = mtod(m_notify, struct sctp_remote_error *);
3597 	memset(sre, 0, notif_len);
3598 	sre->sre_type = SCTP_REMOTE_ERROR;
3599 	sre->sre_flags = 0;
3600 	sre->sre_length = sizeof(struct sctp_remote_error);
3601 	sre->sre_error = error;
3602 	sre->sre_assoc_id = sctp_get_associd(stcb);
3603 	if (notif_len > sizeof(struct sctp_remote_error)) {
3604 		memcpy(sre->sre_data, chunk, chunk_len);
3605 		sre->sre_length += chunk_len;
3606 	}
3607 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3608 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3609 	    0, 0, stcb->asoc.context, 0, 0, 0,
3610 	    m_notify);
3611 	if (control != NULL) {
3612 		control->length = SCTP_BUF_LEN(m_notify);
3613 		control->spec_flags = M_NOTIFICATION;
3614 		/* not that we need this */
3615 		control->tail_mbuf = m_notify;
3616 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3617 		    control,
3618 		    &stcb->sctp_socket->so_rcv, 1,
3619 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3620 	} else {
3621 		sctp_m_freem(m_notify);
3622 	}
3623 }
3624 
3625 
3626 void
3627 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3628     uint32_t error, void *data, int so_locked
3629 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3630     SCTP_UNUSED
3631 #endif
3632 )
3633 {
3634 	if ((stcb == NULL) ||
3635 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3636 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3637 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3638 		/* If the socket is gone we are out of here */
3639 		return;
3640 	}
3641 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3642 		return;
3643 	}
3644 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3645 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3646 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3647 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3648 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3649 			/* Don't report these in front states */
3650 			return;
3651 		}
3652 	}
3653 	switch (notification) {
3654 	case SCTP_NOTIFY_ASSOC_UP:
3655 		if (stcb->asoc.assoc_up_sent == 0) {
3656 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3657 			stcb->asoc.assoc_up_sent = 1;
3658 		}
3659 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3660 			sctp_notify_adaptation_layer(stcb);
3661 		}
3662 		if (stcb->asoc.auth_supported == 0) {
3663 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3664 			    NULL, so_locked);
3665 		}
3666 		break;
3667 	case SCTP_NOTIFY_ASSOC_DOWN:
3668 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3669 		break;
3670 	case SCTP_NOTIFY_INTERFACE_DOWN:
3671 		{
3672 			struct sctp_nets *net;
3673 
3674 			net = (struct sctp_nets *)data;
3675 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3676 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3677 			break;
3678 		}
3679 	case SCTP_NOTIFY_INTERFACE_UP:
3680 		{
3681 			struct sctp_nets *net;
3682 
3683 			net = (struct sctp_nets *)data;
3684 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3685 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3686 			break;
3687 		}
3688 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3689 		{
3690 			struct sctp_nets *net;
3691 
3692 			net = (struct sctp_nets *)data;
3693 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3694 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3695 			break;
3696 		}
3697 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3698 		sctp_notify_send_failed2(stcb, error,
3699 		    (struct sctp_stream_queue_pending *)data, so_locked);
3700 		break;
3701 	case SCTP_NOTIFY_SENT_DG_FAIL:
3702 		sctp_notify_send_failed(stcb, 1, error,
3703 		    (struct sctp_tmit_chunk *)data, so_locked);
3704 		break;
3705 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3706 		sctp_notify_send_failed(stcb, 0, error,
3707 		    (struct sctp_tmit_chunk *)data, so_locked);
3708 		break;
3709 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3710 		{
3711 			uint32_t val;
3712 
3713 			val = *((uint32_t *)data);
3714 
3715 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3716 			break;
3717 		}
3718 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3719 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3720 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3721 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3722 		} else {
3723 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3724 		}
3725 		break;
3726 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3727 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3728 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3729 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3730 		} else {
3731 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3732 		}
3733 		break;
3734 	case SCTP_NOTIFY_ASSOC_RESTART:
3735 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3736 		if (stcb->asoc.auth_supported == 0) {
3737 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3738 			    NULL, so_locked);
3739 		}
3740 		break;
3741 	case SCTP_NOTIFY_STR_RESET_SEND:
3742 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3743 		break;
3744 	case SCTP_NOTIFY_STR_RESET_RECV:
3745 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3746 		break;
3747 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3748 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3749 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3750 		break;
3751 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3752 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3753 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3754 		break;
3755 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3756 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3757 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3758 		break;
3759 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3760 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3761 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3762 		break;
3763 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3764 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3765 		    error, so_locked);
3766 		break;
3767 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3768 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3769 		    error, so_locked);
3770 		break;
3771 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3772 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3773 		    error, so_locked);
3774 		break;
3775 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3776 		sctp_notify_shutdown_event(stcb);
3777 		break;
3778 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3779 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3780 		    (uint16_t)(uintptr_t)data,
3781 		    so_locked);
3782 		break;
3783 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3784 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3785 		    (uint16_t)(uintptr_t)data,
3786 		    so_locked);
3787 		break;
3788 	case SCTP_NOTIFY_NO_PEER_AUTH:
3789 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3790 		    (uint16_t)(uintptr_t)data,
3791 		    so_locked);
3792 		break;
3793 	case SCTP_NOTIFY_SENDER_DRY:
3794 		sctp_notify_sender_dry_event(stcb, so_locked);
3795 		break;
3796 	case SCTP_NOTIFY_REMOTE_ERROR:
3797 		sctp_notify_remote_error(stcb, error, data);
3798 		break;
3799 	default:
3800 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3801 		    __func__, notification, notification);
3802 		break;
3803 	}			/* end switch */
3804 }
3805 
3806 void
3807 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3808 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3809     SCTP_UNUSED
3810 #endif
3811 )
3812 {
3813 	struct sctp_association *asoc;
3814 	struct sctp_stream_out *outs;
3815 	struct sctp_tmit_chunk *chk, *nchk;
3816 	struct sctp_stream_queue_pending *sp, *nsp;
3817 	int i;
3818 
3819 	if (stcb == NULL) {
3820 		return;
3821 	}
3822 	asoc = &stcb->asoc;
3823 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3824 		/* already being freed */
3825 		return;
3826 	}
3827 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3828 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3829 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3830 		return;
3831 	}
3832 	/* now through all the gunk freeing chunks */
3833 	if (holds_lock == 0) {
3834 		SCTP_TCB_SEND_LOCK(stcb);
3835 	}
3836 	/* sent queue SHOULD be empty */
3837 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3838 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3839 		asoc->sent_queue_cnt--;
3840 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3841 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3842 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3843 #ifdef INVARIANTS
3844 			} else {
3845 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3846 #endif
3847 			}
3848 		}
3849 		if (chk->data != NULL) {
3850 			sctp_free_bufspace(stcb, asoc, chk, 1);
3851 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3852 			    error, chk, so_locked);
3853 			if (chk->data) {
3854 				sctp_m_freem(chk->data);
3855 				chk->data = NULL;
3856 			}
3857 		}
3858 		sctp_free_a_chunk(stcb, chk, so_locked);
3859 		/* sa_ignore FREED_MEMORY */
3860 	}
3861 	/* pending send queue SHOULD be empty */
3862 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3863 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3864 		asoc->send_queue_cnt--;
3865 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3866 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3867 #ifdef INVARIANTS
3868 		} else {
3869 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3870 #endif
3871 		}
3872 		if (chk->data != NULL) {
3873 			sctp_free_bufspace(stcb, asoc, chk, 1);
3874 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3875 			    error, chk, so_locked);
3876 			if (chk->data) {
3877 				sctp_m_freem(chk->data);
3878 				chk->data = NULL;
3879 			}
3880 		}
3881 		sctp_free_a_chunk(stcb, chk, so_locked);
3882 		/* sa_ignore FREED_MEMORY */
3883 	}
3884 	for (i = 0; i < asoc->streamoutcnt; i++) {
3885 		/* For each stream */
3886 		outs = &asoc->strmout[i];
3887 		/* clean up any sends there */
3888 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3889 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
3890 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3891 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock);
3892 			sctp_free_spbufspace(stcb, asoc, sp);
3893 			if (sp->data) {
3894 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3895 				    error, (void *)sp, so_locked);
3896 				if (sp->data) {
3897 					sctp_m_freem(sp->data);
3898 					sp->data = NULL;
3899 					sp->tail_mbuf = NULL;
3900 					sp->length = 0;
3901 				}
3902 			}
3903 			if (sp->net) {
3904 				sctp_free_remote_addr(sp->net);
3905 				sp->net = NULL;
3906 			}
3907 			/* Free the chunk */
3908 			sctp_free_a_strmoq(stcb, sp, so_locked);
3909 			/* sa_ignore FREED_MEMORY */
3910 		}
3911 	}
3912 
3913 	if (holds_lock == 0) {
3914 		SCTP_TCB_SEND_UNLOCK(stcb);
3915 	}
3916 }
3917 
3918 void
3919 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3920     struct sctp_abort_chunk *abort, int so_locked
3921 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3922     SCTP_UNUSED
3923 #endif
3924 )
3925 {
3926 	if (stcb == NULL) {
3927 		return;
3928 	}
3929 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3930 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3931 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3932 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3933 	}
3934 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3935 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3936 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3937 		return;
3938 	}
3939 	/* Tell them we lost the asoc */
3940 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3941 	if (from_peer) {
3942 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3943 	} else {
3944 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3945 	}
3946 }
3947 
3948 void
3949 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3950     struct mbuf *m, int iphlen,
3951     struct sockaddr *src, struct sockaddr *dst,
3952     struct sctphdr *sh, struct mbuf *op_err,
3953     uint8_t mflowtype, uint32_t mflowid,
3954     uint32_t vrf_id, uint16_t port)
3955 {
3956 	uint32_t vtag;
3957 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3958 	struct socket *so;
3959 #endif
3960 
3961 	vtag = 0;
3962 	if (stcb != NULL) {
3963 		vtag = stcb->asoc.peer_vtag;
3964 		vrf_id = stcb->asoc.vrf_id;
3965 	}
3966 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3967 	    mflowtype, mflowid, inp->fibnum,
3968 	    vrf_id, port);
3969 	if (stcb != NULL) {
3970 		/* We have a TCB to abort, send notification too */
3971 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3972 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3973 		/* Ok, now lets free it */
3974 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3975 		so = SCTP_INP_SO(inp);
3976 		atomic_add_int(&stcb->asoc.refcnt, 1);
3977 		SCTP_TCB_UNLOCK(stcb);
3978 		SCTP_SOCKET_LOCK(so, 1);
3979 		SCTP_TCB_LOCK(stcb);
3980 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3981 #endif
3982 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3983 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3984 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3985 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3986 		}
3987 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
3988 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3989 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3990 		SCTP_SOCKET_UNLOCK(so, 1);
3991 #endif
3992 	}
3993 }
3994 #ifdef SCTP_ASOCLOG_OF_TSNS
3995 void
3996 sctp_print_out_track_log(struct sctp_tcb *stcb)
3997 {
3998 #ifdef NOSIY_PRINTS
3999 	int i;
4000 
4001 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
4002 	SCTP_PRINTF("IN bound TSN log-aaa\n");
4003 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
4004 		SCTP_PRINTF("None rcvd\n");
4005 		goto none_in;
4006 	}
4007 	if (stcb->asoc.tsn_in_wrapped) {
4008 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
4009 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4010 			    stcb->asoc.in_tsnlog[i].tsn,
4011 			    stcb->asoc.in_tsnlog[i].strm,
4012 			    stcb->asoc.in_tsnlog[i].seq,
4013 			    stcb->asoc.in_tsnlog[i].flgs,
4014 			    stcb->asoc.in_tsnlog[i].sz);
4015 		}
4016 	}
4017 	if (stcb->asoc.tsn_in_at) {
4018 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4019 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4020 			    stcb->asoc.in_tsnlog[i].tsn,
4021 			    stcb->asoc.in_tsnlog[i].strm,
4022 			    stcb->asoc.in_tsnlog[i].seq,
4023 			    stcb->asoc.in_tsnlog[i].flgs,
4024 			    stcb->asoc.in_tsnlog[i].sz);
4025 		}
4026 	}
4027 none_in:
4028 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4029 	if ((stcb->asoc.tsn_out_at == 0) &&
4030 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4031 		SCTP_PRINTF("None sent\n");
4032 	}
4033 	if (stcb->asoc.tsn_out_wrapped) {
4034 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4035 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4036 			    stcb->asoc.out_tsnlog[i].tsn,
4037 			    stcb->asoc.out_tsnlog[i].strm,
4038 			    stcb->asoc.out_tsnlog[i].seq,
4039 			    stcb->asoc.out_tsnlog[i].flgs,
4040 			    stcb->asoc.out_tsnlog[i].sz);
4041 		}
4042 	}
4043 	if (stcb->asoc.tsn_out_at) {
4044 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4045 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4046 			    stcb->asoc.out_tsnlog[i].tsn,
4047 			    stcb->asoc.out_tsnlog[i].strm,
4048 			    stcb->asoc.out_tsnlog[i].seq,
4049 			    stcb->asoc.out_tsnlog[i].flgs,
4050 			    stcb->asoc.out_tsnlog[i].sz);
4051 		}
4052 	}
4053 #endif
4054 }
4055 #endif
4056 
4057 void
4058 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4059     struct mbuf *op_err,
4060     int so_locked
4061 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4062     SCTP_UNUSED
4063 #endif
4064 )
4065 {
4066 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4067 	struct socket *so;
4068 #endif
4069 
4070 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4071 	so = SCTP_INP_SO(inp);
4072 #endif
4073 	if (stcb == NULL) {
4074 		/* Got to have a TCB */
4075 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4076 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4077 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4078 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4079 			}
4080 		}
4081 		return;
4082 	} else {
4083 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4084 	}
4085 	/* notify the peer */
4086 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4087 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4088 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4089 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4090 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4091 	}
4092 	/* notify the ulp */
4093 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4094 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4095 	}
4096 	/* now free the asoc */
4097 #ifdef SCTP_ASOCLOG_OF_TSNS
4098 	sctp_print_out_track_log(stcb);
4099 #endif
4100 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4101 	if (!so_locked) {
4102 		atomic_add_int(&stcb->asoc.refcnt, 1);
4103 		SCTP_TCB_UNLOCK(stcb);
4104 		SCTP_SOCKET_LOCK(so, 1);
4105 		SCTP_TCB_LOCK(stcb);
4106 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4107 	}
4108 #endif
4109 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4110 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4111 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4112 	if (!so_locked) {
4113 		SCTP_SOCKET_UNLOCK(so, 1);
4114 	}
4115 #endif
4116 }
4117 
4118 void
4119 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4120     struct sockaddr *src, struct sockaddr *dst,
4121     struct sctphdr *sh, struct sctp_inpcb *inp,
4122     struct mbuf *cause,
4123     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4124     uint32_t vrf_id, uint16_t port)
4125 {
4126 	struct sctp_chunkhdr *ch, chunk_buf;
4127 	unsigned int chk_length;
4128 	int contains_init_chunk;
4129 
4130 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4131 	/* Generate a TO address for future reference */
4132 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4133 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4134 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4135 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4136 		}
4137 	}
4138 	contains_init_chunk = 0;
4139 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4140 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4141 	while (ch != NULL) {
4142 		chk_length = ntohs(ch->chunk_length);
4143 		if (chk_length < sizeof(*ch)) {
4144 			/* break to abort land */
4145 			break;
4146 		}
4147 		switch (ch->chunk_type) {
4148 		case SCTP_INIT:
4149 			contains_init_chunk = 1;
4150 			break;
4151 		case SCTP_PACKET_DROPPED:
4152 			/* we don't respond to pkt-dropped */
4153 			return;
4154 		case SCTP_ABORT_ASSOCIATION:
4155 			/* we don't respond with an ABORT to an ABORT */
4156 			return;
4157 		case SCTP_SHUTDOWN_COMPLETE:
4158 			/*
4159 			 * we ignore it since we are not waiting for it and
4160 			 * peer is gone
4161 			 */
4162 			return;
4163 		case SCTP_SHUTDOWN_ACK:
4164 			sctp_send_shutdown_complete2(src, dst, sh,
4165 			    mflowtype, mflowid, fibnum,
4166 			    vrf_id, port);
4167 			return;
4168 		default:
4169 			break;
4170 		}
4171 		offset += SCTP_SIZE32(chk_length);
4172 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4173 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4174 	}
4175 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4176 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4177 	    (contains_init_chunk == 0))) {
4178 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4179 		    mflowtype, mflowid, fibnum,
4180 		    vrf_id, port);
4181 	}
4182 }
4183 
4184 /*
4185  * check the inbound datagram to make sure there is not an abort inside it,
4186  * if there is return 1, else return 0.
4187  */
4188 int
4189 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4190 {
4191 	struct sctp_chunkhdr *ch;
4192 	struct sctp_init_chunk *init_chk, chunk_buf;
4193 	int offset;
4194 	unsigned int chk_length;
4195 
4196 	offset = iphlen + sizeof(struct sctphdr);
4197 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4198 	    (uint8_t *)&chunk_buf);
4199 	while (ch != NULL) {
4200 		chk_length = ntohs(ch->chunk_length);
4201 		if (chk_length < sizeof(*ch)) {
4202 			/* packet is probably corrupt */
4203 			break;
4204 		}
4205 		/* we seem to be ok, is it an abort? */
4206 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4207 			/* yep, tell them */
4208 			return (1);
4209 		}
4210 		if (ch->chunk_type == SCTP_INITIATION) {
4211 			/* need to update the Vtag */
4212 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4213 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4214 			if (init_chk != NULL) {
4215 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4216 			}
4217 		}
4218 		/* Nope, move to the next chunk */
4219 		offset += SCTP_SIZE32(chk_length);
4220 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4221 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4222 	}
4223 	return (0);
4224 }
4225 
4226 /*
4227  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4228  * set (i.e. it's 0) so, create this function to compare link local scopes
4229  */
4230 #ifdef INET6
4231 uint32_t
4232 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4233 {
4234 	struct sockaddr_in6 a, b;
4235 
4236 	/* save copies */
4237 	a = *addr1;
4238 	b = *addr2;
4239 
4240 	if (a.sin6_scope_id == 0)
4241 		if (sa6_recoverscope(&a)) {
4242 			/* can't get scope, so can't match */
4243 			return (0);
4244 		}
4245 	if (b.sin6_scope_id == 0)
4246 		if (sa6_recoverscope(&b)) {
4247 			/* can't get scope, so can't match */
4248 			return (0);
4249 		}
4250 	if (a.sin6_scope_id != b.sin6_scope_id)
4251 		return (0);
4252 
4253 	return (1);
4254 }
4255 
4256 /*
4257  * returns a sockaddr_in6 with embedded scope recovered and removed
4258  */
4259 struct sockaddr_in6 *
4260 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4261 {
4262 	/* check and strip embedded scope junk */
4263 	if (addr->sin6_family == AF_INET6) {
4264 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4265 			if (addr->sin6_scope_id == 0) {
4266 				*store = *addr;
4267 				if (!sa6_recoverscope(store)) {
4268 					/* use the recovered scope */
4269 					addr = store;
4270 				}
4271 			} else {
4272 				/* else, return the original "to" addr */
4273 				in6_clearscope(&addr->sin6_addr);
4274 			}
4275 		}
4276 	}
4277 	return (addr);
4278 }
4279 #endif
4280 
4281 /*
4282  * are the two addresses the same?  currently a "scopeless" check returns: 1
4283  * if same, 0 if not
4284  */
4285 int
4286 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4287 {
4288 
4289 	/* must be valid */
4290 	if (sa1 == NULL || sa2 == NULL)
4291 		return (0);
4292 
4293 	/* must be the same family */
4294 	if (sa1->sa_family != sa2->sa_family)
4295 		return (0);
4296 
4297 	switch (sa1->sa_family) {
4298 #ifdef INET6
4299 	case AF_INET6:
4300 		{
4301 			/* IPv6 addresses */
4302 			struct sockaddr_in6 *sin6_1, *sin6_2;
4303 
4304 			sin6_1 = (struct sockaddr_in6 *)sa1;
4305 			sin6_2 = (struct sockaddr_in6 *)sa2;
4306 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4307 			    sin6_2));
4308 		}
4309 #endif
4310 #ifdef INET
4311 	case AF_INET:
4312 		{
4313 			/* IPv4 addresses */
4314 			struct sockaddr_in *sin_1, *sin_2;
4315 
4316 			sin_1 = (struct sockaddr_in *)sa1;
4317 			sin_2 = (struct sockaddr_in *)sa2;
4318 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4319 		}
4320 #endif
4321 	default:
4322 		/* we don't do these... */
4323 		return (0);
4324 	}
4325 }
4326 
4327 void
4328 sctp_print_address(struct sockaddr *sa)
4329 {
4330 #ifdef INET6
4331 	char ip6buf[INET6_ADDRSTRLEN];
4332 #endif
4333 
4334 	switch (sa->sa_family) {
4335 #ifdef INET6
4336 	case AF_INET6:
4337 		{
4338 			struct sockaddr_in6 *sin6;
4339 
4340 			sin6 = (struct sockaddr_in6 *)sa;
4341 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4342 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4343 			    ntohs(sin6->sin6_port),
4344 			    sin6->sin6_scope_id);
4345 			break;
4346 		}
4347 #endif
4348 #ifdef INET
4349 	case AF_INET:
4350 		{
4351 			struct sockaddr_in *sin;
4352 			unsigned char *p;
4353 
4354 			sin = (struct sockaddr_in *)sa;
4355 			p = (unsigned char *)&sin->sin_addr;
4356 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4357 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4358 			break;
4359 		}
4360 #endif
4361 	default:
4362 		SCTP_PRINTF("?\n");
4363 		break;
4364 	}
4365 }
4366 
4367 void
4368 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4369     struct sctp_inpcb *new_inp,
4370     struct sctp_tcb *stcb,
4371     int waitflags)
4372 {
4373 	/*
4374 	 * go through our old INP and pull off any control structures that
4375 	 * belong to stcb and move then to the new inp.
4376 	 */
4377 	struct socket *old_so, *new_so;
4378 	struct sctp_queued_to_read *control, *nctl;
4379 	struct sctp_readhead tmp_queue;
4380 	struct mbuf *m;
4381 	int error = 0;
4382 
4383 	old_so = old_inp->sctp_socket;
4384 	new_so = new_inp->sctp_socket;
4385 	TAILQ_INIT(&tmp_queue);
4386 	error = sblock(&old_so->so_rcv, waitflags);
4387 	if (error) {
4388 		/*
4389 		 * Gak, can't get sblock, we have a problem. data will be
4390 		 * left stranded.. and we don't dare look at it since the
4391 		 * other thread may be reading something. Oh well, its a
4392 		 * screwed up app that does a peeloff OR a accept while
4393 		 * reading from the main socket... actually its only the
4394 		 * peeloff() case, since I think read will fail on a
4395 		 * listening socket..
4396 		 */
4397 		return;
4398 	}
4399 	/* lock the socket buffers */
4400 	SCTP_INP_READ_LOCK(old_inp);
4401 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4402 		/* Pull off all for out target stcb */
4403 		if (control->stcb == stcb) {
4404 			/* remove it we want it */
4405 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4406 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4407 			m = control->data;
4408 			while (m) {
4409 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4410 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4411 				}
4412 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4413 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4414 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4415 				}
4416 				m = SCTP_BUF_NEXT(m);
4417 			}
4418 		}
4419 	}
4420 	SCTP_INP_READ_UNLOCK(old_inp);
4421 	/* Remove the sb-lock on the old socket */
4422 
4423 	sbunlock(&old_so->so_rcv);
4424 	/* Now we move them over to the new socket buffer */
4425 	SCTP_INP_READ_LOCK(new_inp);
4426 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4427 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4428 		m = control->data;
4429 		while (m) {
4430 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4431 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4432 			}
4433 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4434 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4435 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4436 			}
4437 			m = SCTP_BUF_NEXT(m);
4438 		}
4439 	}
4440 	SCTP_INP_READ_UNLOCK(new_inp);
4441 }
4442 
4443 void
4444 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4445     struct sctp_tcb *stcb,
4446     int so_locked
4447 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4448     SCTP_UNUSED
4449 #endif
4450 )
4451 {
4452 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4453 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4454 		struct socket *so;
4455 
4456 		so = SCTP_INP_SO(inp);
4457 		if (!so_locked) {
4458 			if (stcb) {
4459 				atomic_add_int(&stcb->asoc.refcnt, 1);
4460 				SCTP_TCB_UNLOCK(stcb);
4461 			}
4462 			SCTP_SOCKET_LOCK(so, 1);
4463 			if (stcb) {
4464 				SCTP_TCB_LOCK(stcb);
4465 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4466 			}
4467 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4468 				SCTP_SOCKET_UNLOCK(so, 1);
4469 				return;
4470 			}
4471 		}
4472 #endif
4473 		sctp_sorwakeup(inp, inp->sctp_socket);
4474 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4475 		if (!so_locked) {
4476 			SCTP_SOCKET_UNLOCK(so, 1);
4477 		}
4478 #endif
4479 	}
4480 }
4481 
4482 void
4483 sctp_add_to_readq(struct sctp_inpcb *inp,
4484     struct sctp_tcb *stcb,
4485     struct sctp_queued_to_read *control,
4486     struct sockbuf *sb,
4487     int end,
4488     int inp_read_lock_held,
4489     int so_locked
4490 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4491     SCTP_UNUSED
4492 #endif
4493 )
4494 {
4495 	/*
4496 	 * Here we must place the control on the end of the socket read
4497 	 * queue AND increment sb_cc so that select will work properly on
4498 	 * read.
4499 	 */
4500 	struct mbuf *m, *prev = NULL;
4501 
4502 	if (inp == NULL) {
4503 		/* Gak, TSNH!! */
4504 #ifdef INVARIANTS
4505 		panic("Gak, inp NULL on add_to_readq");
4506 #endif
4507 		return;
4508 	}
4509 	if (inp_read_lock_held == 0)
4510 		SCTP_INP_READ_LOCK(inp);
4511 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4512 		sctp_free_remote_addr(control->whoFrom);
4513 		if (control->data) {
4514 			sctp_m_freem(control->data);
4515 			control->data = NULL;
4516 		}
4517 		sctp_free_a_readq(stcb, control);
4518 		if (inp_read_lock_held == 0)
4519 			SCTP_INP_READ_UNLOCK(inp);
4520 		return;
4521 	}
4522 	if (!(control->spec_flags & M_NOTIFICATION)) {
4523 		atomic_add_int(&inp->total_recvs, 1);
4524 		if (!control->do_not_ref_stcb) {
4525 			atomic_add_int(&stcb->total_recvs, 1);
4526 		}
4527 	}
4528 	m = control->data;
4529 	control->held_length = 0;
4530 	control->length = 0;
4531 	while (m) {
4532 		if (SCTP_BUF_LEN(m) == 0) {
4533 			/* Skip mbufs with NO length */
4534 			if (prev == NULL) {
4535 				/* First one */
4536 				control->data = sctp_m_free(m);
4537 				m = control->data;
4538 			} else {
4539 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4540 				m = SCTP_BUF_NEXT(prev);
4541 			}
4542 			if (m == NULL) {
4543 				control->tail_mbuf = prev;
4544 			}
4545 			continue;
4546 		}
4547 		prev = m;
4548 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4549 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4550 		}
4551 		sctp_sballoc(stcb, sb, m);
4552 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4553 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4554 		}
4555 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4556 		m = SCTP_BUF_NEXT(m);
4557 	}
4558 	if (prev != NULL) {
4559 		control->tail_mbuf = prev;
4560 	} else {
4561 		/* Everything got collapsed out?? */
4562 		sctp_free_remote_addr(control->whoFrom);
4563 		sctp_free_a_readq(stcb, control);
4564 		if (inp_read_lock_held == 0)
4565 			SCTP_INP_READ_UNLOCK(inp);
4566 		return;
4567 	}
4568 	if (end) {
4569 		control->end_added = 1;
4570 	}
4571 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4572 	control->on_read_q = 1;
4573 	if (inp_read_lock_held == 0)
4574 		SCTP_INP_READ_UNLOCK(inp);
4575 	if (inp && inp->sctp_socket) {
4576 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4577 	}
4578 }
4579 
4580 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4581  *************ALTERNATE ROUTING CODE
4582  */
4583 
4584 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4585  *************ALTERNATE ROUTING CODE
4586  */
4587 
4588 struct mbuf *
4589 sctp_generate_cause(uint16_t code, char *info)
4590 {
4591 	struct mbuf *m;
4592 	struct sctp_gen_error_cause *cause;
4593 	size_t info_len;
4594 	uint16_t len;
4595 
4596 	if ((code == 0) || (info == NULL)) {
4597 		return (NULL);
4598 	}
4599 	info_len = strlen(info);
4600 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4601 		return (NULL);
4602 	}
4603 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4604 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4605 	if (m != NULL) {
4606 		SCTP_BUF_LEN(m) = len;
4607 		cause = mtod(m, struct sctp_gen_error_cause *);
4608 		cause->code = htons(code);
4609 		cause->length = htons(len);
4610 		memcpy(cause->info, info, info_len);
4611 	}
4612 	return (m);
4613 }
4614 
4615 struct mbuf *
4616 sctp_generate_no_user_data_cause(uint32_t tsn)
4617 {
4618 	struct mbuf *m;
4619 	struct sctp_error_no_user_data *no_user_data_cause;
4620 	uint16_t len;
4621 
4622 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4623 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4624 	if (m != NULL) {
4625 		SCTP_BUF_LEN(m) = len;
4626 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4627 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4628 		no_user_data_cause->cause.length = htons(len);
4629 		no_user_data_cause->tsn = htonl(tsn);
4630 	}
4631 	return (m);
4632 }
4633 
4634 #ifdef SCTP_MBCNT_LOGGING
4635 void
4636 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4637     struct sctp_tmit_chunk *tp1, int chk_cnt)
4638 {
4639 	if (tp1->data == NULL) {
4640 		return;
4641 	}
4642 	asoc->chunks_on_out_queue -= chk_cnt;
4643 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4644 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4645 		    asoc->total_output_queue_size,
4646 		    tp1->book_size,
4647 		    0,
4648 		    tp1->mbcnt);
4649 	}
4650 	if (asoc->total_output_queue_size >= tp1->book_size) {
4651 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4652 	} else {
4653 		asoc->total_output_queue_size = 0;
4654 	}
4655 
4656 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4657 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4658 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4659 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4660 		} else {
4661 			stcb->sctp_socket->so_snd.sb_cc = 0;
4662 
4663 		}
4664 	}
4665 }
4666 
4667 #endif
4668 
4669 int
4670 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4671     uint8_t sent, int so_locked
4672 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4673     SCTP_UNUSED
4674 #endif
4675 )
4676 {
4677 	struct sctp_stream_out *strq;
4678 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4679 	struct sctp_stream_queue_pending *sp;
4680 	uint32_t mid;
4681 	uint16_t sid;
4682 	uint8_t foundeom = 0;
4683 	int ret_sz = 0;
4684 	int notdone;
4685 	int do_wakeup_routine = 0;
4686 
4687 	sid = tp1->rec.data.sid;
4688 	mid = tp1->rec.data.mid;
4689 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4690 		stcb->asoc.abandoned_sent[0]++;
4691 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4692 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
4693 #if defined(SCTP_DETAILED_STR_STATS)
4694 		stcb->asoc.strmout[sid].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4695 #endif
4696 	} else {
4697 		stcb->asoc.abandoned_unsent[0]++;
4698 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4699 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4700 #if defined(SCTP_DETAILED_STR_STATS)
4701 		stcb->asoc.strmout[sid].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4702 #endif
4703 	}
4704 	do {
4705 		ret_sz += tp1->book_size;
4706 		if (tp1->data != NULL) {
4707 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4708 				sctp_flight_size_decrease(tp1);
4709 				sctp_total_flight_decrease(stcb, tp1);
4710 			}
4711 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4712 			stcb->asoc.peers_rwnd += tp1->send_size;
4713 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4714 			if (sent) {
4715 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4716 			} else {
4717 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4718 			}
4719 			if (tp1->data) {
4720 				sctp_m_freem(tp1->data);
4721 				tp1->data = NULL;
4722 			}
4723 			do_wakeup_routine = 1;
4724 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4725 				stcb->asoc.sent_queue_cnt_removeable--;
4726 			}
4727 		}
4728 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4729 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4730 		    SCTP_DATA_NOT_FRAG) {
4731 			/* not frag'ed we ae done   */
4732 			notdone = 0;
4733 			foundeom = 1;
4734 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4735 			/* end of frag, we are done */
4736 			notdone = 0;
4737 			foundeom = 1;
4738 		} else {
4739 			/*
4740 			 * Its a begin or middle piece, we must mark all of
4741 			 * it
4742 			 */
4743 			notdone = 1;
4744 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4745 		}
4746 	} while (tp1 && notdone);
4747 	if (foundeom == 0) {
4748 		/*
4749 		 * The multi-part message was scattered across the send and
4750 		 * sent queue.
4751 		 */
4752 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4753 			if ((tp1->rec.data.sid != sid) ||
4754 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4755 				break;
4756 			}
4757 			/*
4758 			 * save to chk in case we have some on stream out
4759 			 * queue. If so and we have an un-transmitted one we
4760 			 * don't have to fudge the TSN.
4761 			 */
4762 			chk = tp1;
4763 			ret_sz += tp1->book_size;
4764 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4765 			if (sent) {
4766 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4767 			} else {
4768 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4769 			}
4770 			if (tp1->data) {
4771 				sctp_m_freem(tp1->data);
4772 				tp1->data = NULL;
4773 			}
4774 			/* No flight involved here book the size to 0 */
4775 			tp1->book_size = 0;
4776 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4777 				foundeom = 1;
4778 			}
4779 			do_wakeup_routine = 1;
4780 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4781 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4782 			/*
4783 			 * on to the sent queue so we can wait for it to be
4784 			 * passed by.
4785 			 */
4786 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4787 			    sctp_next);
4788 			stcb->asoc.send_queue_cnt--;
4789 			stcb->asoc.sent_queue_cnt++;
4790 		}
4791 	}
4792 	if (foundeom == 0) {
4793 		/*
4794 		 * Still no eom found. That means there is stuff left on the
4795 		 * stream out queue.. yuck.
4796 		 */
4797 		SCTP_TCB_SEND_LOCK(stcb);
4798 		strq = &stcb->asoc.strmout[sid];
4799 		sp = TAILQ_FIRST(&strq->outqueue);
4800 		if (sp != NULL) {
4801 			sp->discard_rest = 1;
4802 			/*
4803 			 * We may need to put a chunk on the queue that
4804 			 * holds the TSN that would have been sent with the
4805 			 * LAST bit.
4806 			 */
4807 			if (chk == NULL) {
4808 				/* Yep, we have to */
4809 				sctp_alloc_a_chunk(stcb, chk);
4810 				if (chk == NULL) {
4811 					/*
4812 					 * we are hosed. All we can do is
4813 					 * nothing.. which will cause an
4814 					 * abort if the peer is paying
4815 					 * attention.
4816 					 */
4817 					goto oh_well;
4818 				}
4819 				memset(chk, 0, sizeof(*chk));
4820 				chk->rec.data.rcv_flags = 0;
4821 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4822 				chk->asoc = &stcb->asoc;
4823 				if (stcb->asoc.idata_supported == 0) {
4824 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4825 						chk->rec.data.mid = 0;
4826 					} else {
4827 						chk->rec.data.mid = strq->next_mid_ordered;
4828 					}
4829 				} else {
4830 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4831 						chk->rec.data.mid = strq->next_mid_unordered;
4832 					} else {
4833 						chk->rec.data.mid = strq->next_mid_ordered;
4834 					}
4835 				}
4836 				chk->rec.data.sid = sp->sid;
4837 				chk->rec.data.ppid = sp->ppid;
4838 				chk->rec.data.context = sp->context;
4839 				chk->flags = sp->act_flags;
4840 				chk->whoTo = NULL;
4841 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4842 				strq->chunks_on_queues++;
4843 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4844 				stcb->asoc.sent_queue_cnt++;
4845 				stcb->asoc.pr_sctp_cnt++;
4846 			}
4847 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4848 			if (sp->sinfo_flags & SCTP_UNORDERED) {
4849 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4850 			}
4851 			if (stcb->asoc.idata_supported == 0) {
4852 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4853 					strq->next_mid_ordered++;
4854 				}
4855 			} else {
4856 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4857 					strq->next_mid_unordered++;
4858 				} else {
4859 					strq->next_mid_ordered++;
4860 				}
4861 			}
4862 	oh_well:
4863 			if (sp->data) {
4864 				/*
4865 				 * Pull any data to free up the SB and allow
4866 				 * sender to "add more" while we will throw
4867 				 * away :-)
4868 				 */
4869 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4870 				ret_sz += sp->length;
4871 				do_wakeup_routine = 1;
4872 				sp->some_taken = 1;
4873 				sctp_m_freem(sp->data);
4874 				sp->data = NULL;
4875 				sp->tail_mbuf = NULL;
4876 				sp->length = 0;
4877 			}
4878 		}
4879 		SCTP_TCB_SEND_UNLOCK(stcb);
4880 	}
4881 	if (do_wakeup_routine) {
4882 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4883 		struct socket *so;
4884 
4885 		so = SCTP_INP_SO(stcb->sctp_ep);
4886 		if (!so_locked) {
4887 			atomic_add_int(&stcb->asoc.refcnt, 1);
4888 			SCTP_TCB_UNLOCK(stcb);
4889 			SCTP_SOCKET_LOCK(so, 1);
4890 			SCTP_TCB_LOCK(stcb);
4891 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4892 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4893 				/* assoc was freed while we were unlocked */
4894 				SCTP_SOCKET_UNLOCK(so, 1);
4895 				return (ret_sz);
4896 			}
4897 		}
4898 #endif
4899 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4900 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4901 		if (!so_locked) {
4902 			SCTP_SOCKET_UNLOCK(so, 1);
4903 		}
4904 #endif
4905 	}
4906 	return (ret_sz);
4907 }
4908 
4909 /*
4910  * checks to see if the given address, sa, is one that is currently known by
4911  * the kernel note: can't distinguish the same address on multiple interfaces
4912  * and doesn't handle multiple addresses with different zone/scope id's note:
4913  * ifa_ifwithaddr() compares the entire sockaddr struct
4914  */
4915 struct sctp_ifa *
4916 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4917     int holds_lock)
4918 {
4919 	struct sctp_laddr *laddr;
4920 
4921 	if (holds_lock == 0) {
4922 		SCTP_INP_RLOCK(inp);
4923 	}
4924 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4925 		if (laddr->ifa == NULL)
4926 			continue;
4927 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4928 			continue;
4929 #ifdef INET
4930 		if (addr->sa_family == AF_INET) {
4931 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4932 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4933 				/* found him. */
4934 				if (holds_lock == 0) {
4935 					SCTP_INP_RUNLOCK(inp);
4936 				}
4937 				return (laddr->ifa);
4938 				break;
4939 			}
4940 		}
4941 #endif
4942 #ifdef INET6
4943 		if (addr->sa_family == AF_INET6) {
4944 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4945 			    &laddr->ifa->address.sin6)) {
4946 				/* found him. */
4947 				if (holds_lock == 0) {
4948 					SCTP_INP_RUNLOCK(inp);
4949 				}
4950 				return (laddr->ifa);
4951 				break;
4952 			}
4953 		}
4954 #endif
4955 	}
4956 	if (holds_lock == 0) {
4957 		SCTP_INP_RUNLOCK(inp);
4958 	}
4959 	return (NULL);
4960 }
4961 
4962 uint32_t
4963 sctp_get_ifa_hash_val(struct sockaddr *addr)
4964 {
4965 	switch (addr->sa_family) {
4966 #ifdef INET
4967 	case AF_INET:
4968 		{
4969 			struct sockaddr_in *sin;
4970 
4971 			sin = (struct sockaddr_in *)addr;
4972 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4973 		}
4974 #endif
4975 #ifdef INET6
4976 	case AF_INET6:
4977 		{
4978 			struct sockaddr_in6 *sin6;
4979 			uint32_t hash_of_addr;
4980 
4981 			sin6 = (struct sockaddr_in6 *)addr;
4982 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4983 			    sin6->sin6_addr.s6_addr32[1] +
4984 			    sin6->sin6_addr.s6_addr32[2] +
4985 			    sin6->sin6_addr.s6_addr32[3]);
4986 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4987 			return (hash_of_addr);
4988 		}
4989 #endif
4990 	default:
4991 		break;
4992 	}
4993 	return (0);
4994 }
4995 
4996 struct sctp_ifa *
4997 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4998 {
4999 	struct sctp_ifa *sctp_ifap;
5000 	struct sctp_vrf *vrf;
5001 	struct sctp_ifalist *hash_head;
5002 	uint32_t hash_of_addr;
5003 
5004 	if (holds_lock == 0)
5005 		SCTP_IPI_ADDR_RLOCK();
5006 
5007 	vrf = sctp_find_vrf(vrf_id);
5008 	if (vrf == NULL) {
5009 		if (holds_lock == 0)
5010 			SCTP_IPI_ADDR_RUNLOCK();
5011 		return (NULL);
5012 	}
5013 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5014 
5015 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5016 	if (hash_head == NULL) {
5017 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5018 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5019 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5020 		sctp_print_address(addr);
5021 		SCTP_PRINTF("No such bucket for address\n");
5022 		if (holds_lock == 0)
5023 			SCTP_IPI_ADDR_RUNLOCK();
5024 
5025 		return (NULL);
5026 	}
5027 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5028 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5029 			continue;
5030 #ifdef INET
5031 		if (addr->sa_family == AF_INET) {
5032 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5033 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5034 				/* found him. */
5035 				if (holds_lock == 0)
5036 					SCTP_IPI_ADDR_RUNLOCK();
5037 				return (sctp_ifap);
5038 				break;
5039 			}
5040 		}
5041 #endif
5042 #ifdef INET6
5043 		if (addr->sa_family == AF_INET6) {
5044 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5045 			    &sctp_ifap->address.sin6)) {
5046 				/* found him. */
5047 				if (holds_lock == 0)
5048 					SCTP_IPI_ADDR_RUNLOCK();
5049 				return (sctp_ifap);
5050 				break;
5051 			}
5052 		}
5053 #endif
5054 	}
5055 	if (holds_lock == 0)
5056 		SCTP_IPI_ADDR_RUNLOCK();
5057 	return (NULL);
5058 }
5059 
5060 static void
5061 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5062     uint32_t rwnd_req)
5063 {
5064 	/* User pulled some data, do we need a rwnd update? */
5065 	int r_unlocked = 0;
5066 	uint32_t dif, rwnd;
5067 	struct socket *so = NULL;
5068 
5069 	if (stcb == NULL)
5070 		return;
5071 
5072 	atomic_add_int(&stcb->asoc.refcnt, 1);
5073 
5074 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5075 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5076 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5077 		/* Pre-check If we are freeing no update */
5078 		goto no_lock;
5079 	}
5080 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5081 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5082 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5083 		goto out;
5084 	}
5085 	so = stcb->sctp_socket;
5086 	if (so == NULL) {
5087 		goto out;
5088 	}
5089 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5090 	/* Have you have freed enough to look */
5091 	*freed_so_far = 0;
5092 	/* Yep, its worth a look and the lock overhead */
5093 
5094 	/* Figure out what the rwnd would be */
5095 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5096 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5097 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5098 	} else {
5099 		dif = 0;
5100 	}
5101 	if (dif >= rwnd_req) {
5102 		if (hold_rlock) {
5103 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5104 			r_unlocked = 1;
5105 		}
5106 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5107 			/*
5108 			 * One last check before we allow the guy possibly
5109 			 * to get in. There is a race, where the guy has not
5110 			 * reached the gate. In that case
5111 			 */
5112 			goto out;
5113 		}
5114 		SCTP_TCB_LOCK(stcb);
5115 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5116 			/* No reports here */
5117 			SCTP_TCB_UNLOCK(stcb);
5118 			goto out;
5119 		}
5120 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5121 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5122 
5123 		sctp_chunk_output(stcb->sctp_ep, stcb,
5124 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5125 		/* make sure no timer is running */
5126 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5127 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5128 		SCTP_TCB_UNLOCK(stcb);
5129 	} else {
5130 		/* Update how much we have pending */
5131 		stcb->freed_by_sorcv_sincelast = dif;
5132 	}
5133 out:
5134 	if (so && r_unlocked && hold_rlock) {
5135 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5136 	}
5137 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5138 no_lock:
5139 	atomic_add_int(&stcb->asoc.refcnt, -1);
5140 	return;
5141 }
5142 
5143 int
5144 sctp_sorecvmsg(struct socket *so,
5145     struct uio *uio,
5146     struct mbuf **mp,
5147     struct sockaddr *from,
5148     int fromlen,
5149     int *msg_flags,
5150     struct sctp_sndrcvinfo *sinfo,
5151     int filling_sinfo)
5152 {
5153 	/*
5154 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5155 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5156 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5157 	 * On the way out we may send out any combination of:
5158 	 * MSG_NOTIFICATION MSG_EOR
5159 	 *
5160 	 */
5161 	struct sctp_inpcb *inp = NULL;
5162 	int my_len = 0;
5163 	int cp_len = 0, error = 0;
5164 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5165 	struct mbuf *m = NULL;
5166 	struct sctp_tcb *stcb = NULL;
5167 	int wakeup_read_socket = 0;
5168 	int freecnt_applied = 0;
5169 	int out_flags = 0, in_flags = 0;
5170 	int block_allowed = 1;
5171 	uint32_t freed_so_far = 0;
5172 	uint32_t copied_so_far = 0;
5173 	int in_eeor_mode = 0;
5174 	int no_rcv_needed = 0;
5175 	uint32_t rwnd_req = 0;
5176 	int hold_sblock = 0;
5177 	int hold_rlock = 0;
5178 	ssize_t slen = 0;
5179 	uint32_t held_length = 0;
5180 	int sockbuf_lock = 0;
5181 
5182 	if (uio == NULL) {
5183 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5184 		return (EINVAL);
5185 	}
5186 	if (msg_flags) {
5187 		in_flags = *msg_flags;
5188 		if (in_flags & MSG_PEEK)
5189 			SCTP_STAT_INCR(sctps_read_peeks);
5190 	} else {
5191 		in_flags = 0;
5192 	}
5193 	slen = uio->uio_resid;
5194 
5195 	/* Pull in and set up our int flags */
5196 	if (in_flags & MSG_OOB) {
5197 		/* Out of band's NOT supported */
5198 		return (EOPNOTSUPP);
5199 	}
5200 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5201 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5202 		return (EINVAL);
5203 	}
5204 	if ((in_flags & (MSG_DONTWAIT
5205 	    | MSG_NBIO
5206 	    )) ||
5207 	    SCTP_SO_IS_NBIO(so)) {
5208 		block_allowed = 0;
5209 	}
5210 	/* setup the endpoint */
5211 	inp = (struct sctp_inpcb *)so->so_pcb;
5212 	if (inp == NULL) {
5213 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5214 		return (EFAULT);
5215 	}
5216 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5217 	/* Must be at least a MTU's worth */
5218 	if (rwnd_req < SCTP_MIN_RWND)
5219 		rwnd_req = SCTP_MIN_RWND;
5220 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5221 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5222 		sctp_misc_ints(SCTP_SORECV_ENTER,
5223 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5224 	}
5225 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5226 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5227 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5228 	}
5229 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5230 	if (error) {
5231 		goto release_unlocked;
5232 	}
5233 	sockbuf_lock = 1;
5234 restart:
5235 
5236 
5237 restart_nosblocks:
5238 	if (hold_sblock == 0) {
5239 		SOCKBUF_LOCK(&so->so_rcv);
5240 		hold_sblock = 1;
5241 	}
5242 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5243 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5244 		goto out;
5245 	}
5246 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5247 		if (so->so_error) {
5248 			error = so->so_error;
5249 			if ((in_flags & MSG_PEEK) == 0)
5250 				so->so_error = 0;
5251 			goto out;
5252 		} else {
5253 			if (so->so_rcv.sb_cc == 0) {
5254 				/* indicate EOF */
5255 				error = 0;
5256 				goto out;
5257 			}
5258 		}
5259 	}
5260 	if (so->so_rcv.sb_cc <= held_length) {
5261 		if (so->so_error) {
5262 			error = so->so_error;
5263 			if ((in_flags & MSG_PEEK) == 0) {
5264 				so->so_error = 0;
5265 			}
5266 			goto out;
5267 		}
5268 		if ((so->so_rcv.sb_cc == 0) &&
5269 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5270 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5271 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5272 				/*
5273 				 * For active open side clear flags for
5274 				 * re-use passive open is blocked by
5275 				 * connect.
5276 				 */
5277 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5278 					/*
5279 					 * You were aborted, passive side
5280 					 * always hits here
5281 					 */
5282 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5283 					error = ECONNRESET;
5284 				}
5285 				so->so_state &= ~(SS_ISCONNECTING |
5286 				    SS_ISDISCONNECTING |
5287 				    SS_ISCONFIRMING |
5288 				    SS_ISCONNECTED);
5289 				if (error == 0) {
5290 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5291 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5292 						error = ENOTCONN;
5293 					}
5294 				}
5295 				goto out;
5296 			}
5297 		}
5298 		if (block_allowed) {
5299 			error = sbwait(&so->so_rcv);
5300 			if (error) {
5301 				goto out;
5302 			}
5303 			held_length = 0;
5304 			goto restart_nosblocks;
5305 		} else {
5306 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5307 			error = EWOULDBLOCK;
5308 			goto out;
5309 		}
5310 	}
5311 	if (hold_sblock == 1) {
5312 		SOCKBUF_UNLOCK(&so->so_rcv);
5313 		hold_sblock = 0;
5314 	}
5315 	/* we possibly have data we can read */
5316 	/* sa_ignore FREED_MEMORY */
5317 	control = TAILQ_FIRST(&inp->read_queue);
5318 	if (control == NULL) {
5319 		/*
5320 		 * This could be happening since the appender did the
5321 		 * increment but as not yet did the tailq insert onto the
5322 		 * read_queue
5323 		 */
5324 		if (hold_rlock == 0) {
5325 			SCTP_INP_READ_LOCK(inp);
5326 		}
5327 		control = TAILQ_FIRST(&inp->read_queue);
5328 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5329 #ifdef INVARIANTS
5330 			panic("Huh, its non zero and nothing on control?");
5331 #endif
5332 			so->so_rcv.sb_cc = 0;
5333 		}
5334 		SCTP_INP_READ_UNLOCK(inp);
5335 		hold_rlock = 0;
5336 		goto restart;
5337 	}
5338 	if ((control->length == 0) &&
5339 	    (control->do_not_ref_stcb)) {
5340 		/*
5341 		 * Clean up code for freeing assoc that left behind a
5342 		 * pdapi.. maybe a peer in EEOR that just closed after
5343 		 * sending and never indicated a EOR.
5344 		 */
5345 		if (hold_rlock == 0) {
5346 			hold_rlock = 1;
5347 			SCTP_INP_READ_LOCK(inp);
5348 		}
5349 		control->held_length = 0;
5350 		if (control->data) {
5351 			/* Hmm there is data here .. fix */
5352 			struct mbuf *m_tmp;
5353 			int cnt = 0;
5354 
5355 			m_tmp = control->data;
5356 			while (m_tmp) {
5357 				cnt += SCTP_BUF_LEN(m_tmp);
5358 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5359 					control->tail_mbuf = m_tmp;
5360 					control->end_added = 1;
5361 				}
5362 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5363 			}
5364 			control->length = cnt;
5365 		} else {
5366 			/* remove it */
5367 			TAILQ_REMOVE(&inp->read_queue, control, next);
5368 			/* Add back any hiddend data */
5369 			sctp_free_remote_addr(control->whoFrom);
5370 			sctp_free_a_readq(stcb, control);
5371 		}
5372 		if (hold_rlock) {
5373 			hold_rlock = 0;
5374 			SCTP_INP_READ_UNLOCK(inp);
5375 		}
5376 		goto restart;
5377 	}
5378 	if ((control->length == 0) &&
5379 	    (control->end_added == 1)) {
5380 		/*
5381 		 * Do we also need to check for (control->pdapi_aborted ==
5382 		 * 1)?
5383 		 */
5384 		if (hold_rlock == 0) {
5385 			hold_rlock = 1;
5386 			SCTP_INP_READ_LOCK(inp);
5387 		}
5388 		TAILQ_REMOVE(&inp->read_queue, control, next);
5389 		if (control->data) {
5390 #ifdef INVARIANTS
5391 			panic("control->data not null but control->length == 0");
5392 #else
5393 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5394 			sctp_m_freem(control->data);
5395 			control->data = NULL;
5396 #endif
5397 		}
5398 		if (control->aux_data) {
5399 			sctp_m_free(control->aux_data);
5400 			control->aux_data = NULL;
5401 		}
5402 #ifdef INVARIANTS
5403 		if (control->on_strm_q) {
5404 			panic("About to free ctl:%p so:%p and its in %d",
5405 			    control, so, control->on_strm_q);
5406 		}
5407 #endif
5408 		sctp_free_remote_addr(control->whoFrom);
5409 		sctp_free_a_readq(stcb, control);
5410 		if (hold_rlock) {
5411 			hold_rlock = 0;
5412 			SCTP_INP_READ_UNLOCK(inp);
5413 		}
5414 		goto restart;
5415 	}
5416 	if (control->length == 0) {
5417 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5418 		    (filling_sinfo)) {
5419 			/* find a more suitable one then this */
5420 			ctl = TAILQ_NEXT(control, next);
5421 			while (ctl) {
5422 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5423 				    (ctl->some_taken ||
5424 				    (ctl->spec_flags & M_NOTIFICATION) ||
5425 				    ((ctl->do_not_ref_stcb == 0) &&
5426 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5427 				    ) {
5428 					/*-
5429 					 * If we have a different TCB next, and there is data
5430 					 * present. If we have already taken some (pdapi), OR we can
5431 					 * ref the tcb and no delivery as started on this stream, we
5432 					 * take it. Note we allow a notification on a different
5433 					 * assoc to be delivered..
5434 					 */
5435 					control = ctl;
5436 					goto found_one;
5437 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5438 					    (ctl->length) &&
5439 					    ((ctl->some_taken) ||
5440 					    ((ctl->do_not_ref_stcb == 0) &&
5441 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5442 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5443 					/*-
5444 					 * If we have the same tcb, and there is data present, and we
5445 					 * have the strm interleave feature present. Then if we have
5446 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5447 					 * not started a delivery for this stream, we can take it.
5448 					 * Note we do NOT allow a notificaiton on the same assoc to
5449 					 * be delivered.
5450 					 */
5451 					control = ctl;
5452 					goto found_one;
5453 				}
5454 				ctl = TAILQ_NEXT(ctl, next);
5455 			}
5456 		}
5457 		/*
5458 		 * if we reach here, not suitable replacement is available
5459 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5460 		 * into the our held count, and its time to sleep again.
5461 		 */
5462 		held_length = so->so_rcv.sb_cc;
5463 		control->held_length = so->so_rcv.sb_cc;
5464 		goto restart;
5465 	}
5466 	/* Clear the held length since there is something to read */
5467 	control->held_length = 0;
5468 found_one:
5469 	/*
5470 	 * If we reach here, control has a some data for us to read off.
5471 	 * Note that stcb COULD be NULL.
5472 	 */
5473 	if (hold_rlock == 0) {
5474 		hold_rlock = 1;
5475 		SCTP_INP_READ_LOCK(inp);
5476 	}
5477 	control->some_taken++;
5478 	stcb = control->stcb;
5479 	if (stcb) {
5480 		if ((control->do_not_ref_stcb == 0) &&
5481 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5482 			if (freecnt_applied == 0)
5483 				stcb = NULL;
5484 		} else if (control->do_not_ref_stcb == 0) {
5485 			/* you can't free it on me please */
5486 			/*
5487 			 * The lock on the socket buffer protects us so the
5488 			 * free code will stop. But since we used the
5489 			 * socketbuf lock and the sender uses the tcb_lock
5490 			 * to increment, we need to use the atomic add to
5491 			 * the refcnt
5492 			 */
5493 			if (freecnt_applied) {
5494 #ifdef INVARIANTS
5495 				panic("refcnt already incremented");
5496 #else
5497 				SCTP_PRINTF("refcnt already incremented?\n");
5498 #endif
5499 			} else {
5500 				atomic_add_int(&stcb->asoc.refcnt, 1);
5501 				freecnt_applied = 1;
5502 			}
5503 			/*
5504 			 * Setup to remember how much we have not yet told
5505 			 * the peer our rwnd has opened up. Note we grab the
5506 			 * value from the tcb from last time. Note too that
5507 			 * sack sending clears this when a sack is sent,
5508 			 * which is fine. Once we hit the rwnd_req, we then
5509 			 * will go to the sctp_user_rcvd() that will not
5510 			 * lock until it KNOWs it MUST send a WUP-SACK.
5511 			 */
5512 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5513 			stcb->freed_by_sorcv_sincelast = 0;
5514 		}
5515 	}
5516 	if (stcb &&
5517 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5518 	    control->do_not_ref_stcb == 0) {
5519 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5520 	}
5521 	/* First lets get off the sinfo and sockaddr info */
5522 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5523 		sinfo->sinfo_stream = control->sinfo_stream;
5524 		sinfo->sinfo_ssn = (uint16_t)control->mid;
5525 		sinfo->sinfo_flags = control->sinfo_flags;
5526 		sinfo->sinfo_ppid = control->sinfo_ppid;
5527 		sinfo->sinfo_context = control->sinfo_context;
5528 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5529 		sinfo->sinfo_tsn = control->sinfo_tsn;
5530 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5531 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5532 		nxt = TAILQ_NEXT(control, next);
5533 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5534 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5535 			struct sctp_extrcvinfo *s_extra;
5536 
5537 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5538 			if ((nxt) &&
5539 			    (nxt->length)) {
5540 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5541 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5542 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5543 				}
5544 				if (nxt->spec_flags & M_NOTIFICATION) {
5545 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5546 				}
5547 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5548 				s_extra->serinfo_next_length = nxt->length;
5549 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5550 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5551 				if (nxt->tail_mbuf != NULL) {
5552 					if (nxt->end_added) {
5553 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5554 					}
5555 				}
5556 			} else {
5557 				/*
5558 				 * we explicitly 0 this, since the memcpy
5559 				 * got some other things beyond the older
5560 				 * sinfo_ that is on the control's structure
5561 				 * :-D
5562 				 */
5563 				nxt = NULL;
5564 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5565 				s_extra->serinfo_next_aid = 0;
5566 				s_extra->serinfo_next_length = 0;
5567 				s_extra->serinfo_next_ppid = 0;
5568 				s_extra->serinfo_next_stream = 0;
5569 			}
5570 		}
5571 		/*
5572 		 * update off the real current cum-ack, if we have an stcb.
5573 		 */
5574 		if ((control->do_not_ref_stcb == 0) && stcb)
5575 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5576 		/*
5577 		 * mask off the high bits, we keep the actual chunk bits in
5578 		 * there.
5579 		 */
5580 		sinfo->sinfo_flags &= 0x00ff;
5581 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5582 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5583 		}
5584 	}
5585 #ifdef SCTP_ASOCLOG_OF_TSNS
5586 	{
5587 		int index, newindex;
5588 		struct sctp_pcbtsn_rlog *entry;
5589 
5590 		do {
5591 			index = inp->readlog_index;
5592 			newindex = index + 1;
5593 			if (newindex >= SCTP_READ_LOG_SIZE) {
5594 				newindex = 0;
5595 			}
5596 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5597 		entry = &inp->readlog[index];
5598 		entry->vtag = control->sinfo_assoc_id;
5599 		entry->strm = control->sinfo_stream;
5600 		entry->seq = (uint16_t)control->mid;
5601 		entry->sz = control->length;
5602 		entry->flgs = control->sinfo_flags;
5603 	}
5604 #endif
5605 	if ((fromlen > 0) && (from != NULL)) {
5606 		union sctp_sockstore store;
5607 		size_t len;
5608 
5609 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5610 #ifdef INET6
5611 		case AF_INET6:
5612 			len = sizeof(struct sockaddr_in6);
5613 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5614 			store.sin6.sin6_port = control->port_from;
5615 			break;
5616 #endif
5617 #ifdef INET
5618 		case AF_INET:
5619 #ifdef INET6
5620 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5621 				len = sizeof(struct sockaddr_in6);
5622 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5623 				    &store.sin6);
5624 				store.sin6.sin6_port = control->port_from;
5625 			} else {
5626 				len = sizeof(struct sockaddr_in);
5627 				store.sin = control->whoFrom->ro._l_addr.sin;
5628 				store.sin.sin_port = control->port_from;
5629 			}
5630 #else
5631 			len = sizeof(struct sockaddr_in);
5632 			store.sin = control->whoFrom->ro._l_addr.sin;
5633 			store.sin.sin_port = control->port_from;
5634 #endif
5635 			break;
5636 #endif
5637 		default:
5638 			len = 0;
5639 			break;
5640 		}
5641 		memcpy(from, &store, min((size_t)fromlen, len));
5642 #ifdef INET6
5643 		{
5644 			struct sockaddr_in6 lsa6, *from6;
5645 
5646 			from6 = (struct sockaddr_in6 *)from;
5647 			sctp_recover_scope_mac(from6, (&lsa6));
5648 		}
5649 #endif
5650 	}
5651 	if (hold_rlock) {
5652 		SCTP_INP_READ_UNLOCK(inp);
5653 		hold_rlock = 0;
5654 	}
5655 	if (hold_sblock) {
5656 		SOCKBUF_UNLOCK(&so->so_rcv);
5657 		hold_sblock = 0;
5658 	}
5659 	/* now copy out what data we can */
5660 	if (mp == NULL) {
5661 		/* copy out each mbuf in the chain up to length */
5662 get_more_data:
5663 		m = control->data;
5664 		while (m) {
5665 			/* Move out all we can */
5666 			cp_len = (int)uio->uio_resid;
5667 			my_len = (int)SCTP_BUF_LEN(m);
5668 			if (cp_len > my_len) {
5669 				/* not enough in this buf */
5670 				cp_len = my_len;
5671 			}
5672 			if (hold_rlock) {
5673 				SCTP_INP_READ_UNLOCK(inp);
5674 				hold_rlock = 0;
5675 			}
5676 			if (cp_len > 0)
5677 				error = uiomove(mtod(m, char *), cp_len, uio);
5678 			/* re-read */
5679 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5680 				goto release;
5681 			}
5682 			if ((control->do_not_ref_stcb == 0) && stcb &&
5683 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5684 				no_rcv_needed = 1;
5685 			}
5686 			if (error) {
5687 				/* error we are out of here */
5688 				goto release;
5689 			}
5690 			SCTP_INP_READ_LOCK(inp);
5691 			hold_rlock = 1;
5692 			if (cp_len == SCTP_BUF_LEN(m)) {
5693 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5694 				    (control->end_added)) {
5695 					out_flags |= MSG_EOR;
5696 					if ((control->do_not_ref_stcb == 0) &&
5697 					    (control->stcb != NULL) &&
5698 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5699 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5700 				}
5701 				if (control->spec_flags & M_NOTIFICATION) {
5702 					out_flags |= MSG_NOTIFICATION;
5703 				}
5704 				/* we ate up the mbuf */
5705 				if (in_flags & MSG_PEEK) {
5706 					/* just looking */
5707 					m = SCTP_BUF_NEXT(m);
5708 					copied_so_far += cp_len;
5709 				} else {
5710 					/* dispose of the mbuf */
5711 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5712 						sctp_sblog(&so->so_rcv,
5713 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5714 					}
5715 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5716 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5717 						sctp_sblog(&so->so_rcv,
5718 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5719 					}
5720 					copied_so_far += cp_len;
5721 					freed_so_far += cp_len;
5722 					freed_so_far += MSIZE;
5723 					atomic_subtract_int(&control->length, cp_len);
5724 					control->data = sctp_m_free(m);
5725 					m = control->data;
5726 					/*
5727 					 * been through it all, must hold sb
5728 					 * lock ok to null tail
5729 					 */
5730 					if (control->data == NULL) {
5731 #ifdef INVARIANTS
5732 						if ((control->end_added == 0) ||
5733 						    (TAILQ_NEXT(control, next) == NULL)) {
5734 							/*
5735 							 * If the end is not
5736 							 * added, OR the
5737 							 * next is NOT null
5738 							 * we MUST have the
5739 							 * lock.
5740 							 */
5741 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5742 								panic("Hmm we don't own the lock?");
5743 							}
5744 						}
5745 #endif
5746 						control->tail_mbuf = NULL;
5747 #ifdef INVARIANTS
5748 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5749 							panic("end_added, nothing left and no MSG_EOR");
5750 						}
5751 #endif
5752 					}
5753 				}
5754 			} else {
5755 				/* Do we need to trim the mbuf? */
5756 				if (control->spec_flags & M_NOTIFICATION) {
5757 					out_flags |= MSG_NOTIFICATION;
5758 				}
5759 				if ((in_flags & MSG_PEEK) == 0) {
5760 					SCTP_BUF_RESV_UF(m, cp_len);
5761 					SCTP_BUF_LEN(m) -= cp_len;
5762 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5763 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5764 					}
5765 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5766 					if ((control->do_not_ref_stcb == 0) &&
5767 					    stcb) {
5768 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5769 					}
5770 					copied_so_far += cp_len;
5771 					freed_so_far += cp_len;
5772 					freed_so_far += MSIZE;
5773 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5774 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5775 						    SCTP_LOG_SBRESULT, 0);
5776 					}
5777 					atomic_subtract_int(&control->length, cp_len);
5778 				} else {
5779 					copied_so_far += cp_len;
5780 				}
5781 			}
5782 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5783 				break;
5784 			}
5785 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5786 			    (control->do_not_ref_stcb == 0) &&
5787 			    (freed_so_far >= rwnd_req)) {
5788 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5789 			}
5790 		}		/* end while(m) */
5791 		/*
5792 		 * At this point we have looked at it all and we either have
5793 		 * a MSG_EOR/or read all the user wants... <OR>
5794 		 * control->length == 0.
5795 		 */
5796 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5797 			/* we are done with this control */
5798 			if (control->length == 0) {
5799 				if (control->data) {
5800 #ifdef INVARIANTS
5801 					panic("control->data not null at read eor?");
5802 #else
5803 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5804 					sctp_m_freem(control->data);
5805 					control->data = NULL;
5806 #endif
5807 				}
5808 		done_with_control:
5809 				if (hold_rlock == 0) {
5810 					SCTP_INP_READ_LOCK(inp);
5811 					hold_rlock = 1;
5812 				}
5813 				TAILQ_REMOVE(&inp->read_queue, control, next);
5814 				/* Add back any hiddend data */
5815 				if (control->held_length) {
5816 					held_length = 0;
5817 					control->held_length = 0;
5818 					wakeup_read_socket = 1;
5819 				}
5820 				if (control->aux_data) {
5821 					sctp_m_free(control->aux_data);
5822 					control->aux_data = NULL;
5823 				}
5824 				no_rcv_needed = control->do_not_ref_stcb;
5825 				sctp_free_remote_addr(control->whoFrom);
5826 				control->data = NULL;
5827 #ifdef INVARIANTS
5828 				if (control->on_strm_q) {
5829 					panic("About to free ctl:%p so:%p and its in %d",
5830 					    control, so, control->on_strm_q);
5831 				}
5832 #endif
5833 				sctp_free_a_readq(stcb, control);
5834 				control = NULL;
5835 				if ((freed_so_far >= rwnd_req) &&
5836 				    (no_rcv_needed == 0))
5837 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5838 
5839 			} else {
5840 				/*
5841 				 * The user did not read all of this
5842 				 * message, turn off the returned MSG_EOR
5843 				 * since we are leaving more behind on the
5844 				 * control to read.
5845 				 */
5846 #ifdef INVARIANTS
5847 				if (control->end_added &&
5848 				    (control->data == NULL) &&
5849 				    (control->tail_mbuf == NULL)) {
5850 					panic("Gak, control->length is corrupt?");
5851 				}
5852 #endif
5853 				no_rcv_needed = control->do_not_ref_stcb;
5854 				out_flags &= ~MSG_EOR;
5855 			}
5856 		}
5857 		if (out_flags & MSG_EOR) {
5858 			goto release;
5859 		}
5860 		if ((uio->uio_resid == 0) ||
5861 		    ((in_eeor_mode) &&
5862 		    (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
5863 			goto release;
5864 		}
5865 		/*
5866 		 * If I hit here the receiver wants more and this message is
5867 		 * NOT done (pd-api). So two questions. Can we block? if not
5868 		 * we are done. Did the user NOT set MSG_WAITALL?
5869 		 */
5870 		if (block_allowed == 0) {
5871 			goto release;
5872 		}
5873 		/*
5874 		 * We need to wait for more data a few things: - We don't
5875 		 * sbunlock() so we don't get someone else reading. - We
5876 		 * must be sure to account for the case where what is added
5877 		 * is NOT to our control when we wakeup.
5878 		 */
5879 
5880 		/*
5881 		 * Do we need to tell the transport a rwnd update might be
5882 		 * needed before we go to sleep?
5883 		 */
5884 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5885 		    ((freed_so_far >= rwnd_req) &&
5886 		    (control->do_not_ref_stcb == 0) &&
5887 		    (no_rcv_needed == 0))) {
5888 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5889 		}
5890 wait_some_more:
5891 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5892 			goto release;
5893 		}
5894 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5895 			goto release;
5896 
5897 		if (hold_rlock == 1) {
5898 			SCTP_INP_READ_UNLOCK(inp);
5899 			hold_rlock = 0;
5900 		}
5901 		if (hold_sblock == 0) {
5902 			SOCKBUF_LOCK(&so->so_rcv);
5903 			hold_sblock = 1;
5904 		}
5905 		if ((copied_so_far) && (control->length == 0) &&
5906 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5907 			goto release;
5908 		}
5909 		if (so->so_rcv.sb_cc <= control->held_length) {
5910 			error = sbwait(&so->so_rcv);
5911 			if (error) {
5912 				goto release;
5913 			}
5914 			control->held_length = 0;
5915 		}
5916 		if (hold_sblock) {
5917 			SOCKBUF_UNLOCK(&so->so_rcv);
5918 			hold_sblock = 0;
5919 		}
5920 		if (control->length == 0) {
5921 			/* still nothing here */
5922 			if (control->end_added == 1) {
5923 				/* he aborted, or is done i.e.did a shutdown */
5924 				out_flags |= MSG_EOR;
5925 				if (control->pdapi_aborted) {
5926 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5927 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5928 
5929 					out_flags |= MSG_TRUNC;
5930 				} else {
5931 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5932 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5933 				}
5934 				goto done_with_control;
5935 			}
5936 			if (so->so_rcv.sb_cc > held_length) {
5937 				control->held_length = so->so_rcv.sb_cc;
5938 				held_length = 0;
5939 			}
5940 			goto wait_some_more;
5941 		} else if (control->data == NULL) {
5942 			/*
5943 			 * we must re-sync since data is probably being
5944 			 * added
5945 			 */
5946 			SCTP_INP_READ_LOCK(inp);
5947 			if ((control->length > 0) && (control->data == NULL)) {
5948 				/*
5949 				 * big trouble.. we have the lock and its
5950 				 * corrupt?
5951 				 */
5952 #ifdef INVARIANTS
5953 				panic("Impossible data==NULL length !=0");
5954 #endif
5955 				out_flags |= MSG_EOR;
5956 				out_flags |= MSG_TRUNC;
5957 				control->length = 0;
5958 				SCTP_INP_READ_UNLOCK(inp);
5959 				goto done_with_control;
5960 			}
5961 			SCTP_INP_READ_UNLOCK(inp);
5962 			/* We will fall around to get more data */
5963 		}
5964 		goto get_more_data;
5965 	} else {
5966 		/*-
5967 		 * Give caller back the mbuf chain,
5968 		 * store in uio_resid the length
5969 		 */
5970 		wakeup_read_socket = 0;
5971 		if ((control->end_added == 0) ||
5972 		    (TAILQ_NEXT(control, next) == NULL)) {
5973 			/* Need to get rlock */
5974 			if (hold_rlock == 0) {
5975 				SCTP_INP_READ_LOCK(inp);
5976 				hold_rlock = 1;
5977 			}
5978 		}
5979 		if (control->end_added) {
5980 			out_flags |= MSG_EOR;
5981 			if ((control->do_not_ref_stcb == 0) &&
5982 			    (control->stcb != NULL) &&
5983 			    ((control->spec_flags & M_NOTIFICATION) == 0))
5984 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5985 		}
5986 		if (control->spec_flags & M_NOTIFICATION) {
5987 			out_flags |= MSG_NOTIFICATION;
5988 		}
5989 		uio->uio_resid = control->length;
5990 		*mp = control->data;
5991 		m = control->data;
5992 		while (m) {
5993 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5994 				sctp_sblog(&so->so_rcv,
5995 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5996 			}
5997 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5998 			freed_so_far += SCTP_BUF_LEN(m);
5999 			freed_so_far += MSIZE;
6000 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6001 				sctp_sblog(&so->so_rcv,
6002 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6003 			}
6004 			m = SCTP_BUF_NEXT(m);
6005 		}
6006 		control->data = control->tail_mbuf = NULL;
6007 		control->length = 0;
6008 		if (out_flags & MSG_EOR) {
6009 			/* Done with this control */
6010 			goto done_with_control;
6011 		}
6012 	}
6013 release:
6014 	if (hold_rlock == 1) {
6015 		SCTP_INP_READ_UNLOCK(inp);
6016 		hold_rlock = 0;
6017 	}
6018 	if (hold_sblock == 1) {
6019 		SOCKBUF_UNLOCK(&so->so_rcv);
6020 		hold_sblock = 0;
6021 	}
6022 	sbunlock(&so->so_rcv);
6023 	sockbuf_lock = 0;
6024 
6025 release_unlocked:
6026 	if (hold_sblock) {
6027 		SOCKBUF_UNLOCK(&so->so_rcv);
6028 		hold_sblock = 0;
6029 	}
6030 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6031 		if ((freed_so_far >= rwnd_req) &&
6032 		    (control && (control->do_not_ref_stcb == 0)) &&
6033 		    (no_rcv_needed == 0))
6034 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6035 	}
6036 out:
6037 	if (msg_flags) {
6038 		*msg_flags = out_flags;
6039 	}
6040 	if (((out_flags & MSG_EOR) == 0) &&
6041 	    ((in_flags & MSG_PEEK) == 0) &&
6042 	    (sinfo) &&
6043 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6044 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6045 		struct sctp_extrcvinfo *s_extra;
6046 
6047 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6048 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6049 	}
6050 	if (hold_rlock == 1) {
6051 		SCTP_INP_READ_UNLOCK(inp);
6052 	}
6053 	if (hold_sblock) {
6054 		SOCKBUF_UNLOCK(&so->so_rcv);
6055 	}
6056 	if (sockbuf_lock) {
6057 		sbunlock(&so->so_rcv);
6058 	}
6059 	if (freecnt_applied) {
6060 		/*
6061 		 * The lock on the socket buffer protects us so the free
6062 		 * code will stop. But since we used the socketbuf lock and
6063 		 * the sender uses the tcb_lock to increment, we need to use
6064 		 * the atomic add to the refcnt.
6065 		 */
6066 		if (stcb == NULL) {
6067 #ifdef INVARIANTS
6068 			panic("stcb for refcnt has gone NULL?");
6069 			goto stage_left;
6070 #else
6071 			goto stage_left;
6072 #endif
6073 		}
6074 		/* Save the value back for next time */
6075 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6076 		atomic_add_int(&stcb->asoc.refcnt, -1);
6077 	}
6078 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6079 		if (stcb) {
6080 			sctp_misc_ints(SCTP_SORECV_DONE,
6081 			    freed_so_far,
6082 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6083 			    stcb->asoc.my_rwnd,
6084 			    so->so_rcv.sb_cc);
6085 		} else {
6086 			sctp_misc_ints(SCTP_SORECV_DONE,
6087 			    freed_so_far,
6088 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6089 			    0,
6090 			    so->so_rcv.sb_cc);
6091 		}
6092 	}
6093 stage_left:
6094 	if (wakeup_read_socket) {
6095 		sctp_sorwakeup(inp, so);
6096 	}
6097 	return (error);
6098 }
6099 
6100 
6101 #ifdef SCTP_MBUF_LOGGING
6102 struct mbuf *
6103 sctp_m_free(struct mbuf *m)
6104 {
6105 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6106 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6107 	}
6108 	return (m_free(m));
6109 }
6110 
6111 void
6112 sctp_m_freem(struct mbuf *mb)
6113 {
6114 	while (mb != NULL)
6115 		mb = sctp_m_free(mb);
6116 }
6117 
6118 #endif
6119 
6120 int
6121 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6122 {
6123 	/*
6124 	 * Given a local address. For all associations that holds the
6125 	 * address, request a peer-set-primary.
6126 	 */
6127 	struct sctp_ifa *ifa;
6128 	struct sctp_laddr *wi;
6129 
6130 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6131 	if (ifa == NULL) {
6132 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6133 		return (EADDRNOTAVAIL);
6134 	}
6135 	/*
6136 	 * Now that we have the ifa we must awaken the iterator with this
6137 	 * message.
6138 	 */
6139 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6140 	if (wi == NULL) {
6141 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6142 		return (ENOMEM);
6143 	}
6144 	/* Now incr the count and int wi structure */
6145 	SCTP_INCR_LADDR_COUNT();
6146 	memset(wi, 0, sizeof(*wi));
6147 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6148 	wi->ifa = ifa;
6149 	wi->action = SCTP_SET_PRIM_ADDR;
6150 	atomic_add_int(&ifa->refcount, 1);
6151 
6152 	/* Now add it to the work queue */
6153 	SCTP_WQ_ADDR_LOCK();
6154 	/*
6155 	 * Should this really be a tailq? As it is we will process the
6156 	 * newest first :-0
6157 	 */
6158 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6159 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6160 	    (struct sctp_inpcb *)NULL,
6161 	    (struct sctp_tcb *)NULL,
6162 	    (struct sctp_nets *)NULL);
6163 	SCTP_WQ_ADDR_UNLOCK();
6164 	return (0);
6165 }
6166 
6167 
6168 int
6169 sctp_soreceive(struct socket *so,
6170     struct sockaddr **psa,
6171     struct uio *uio,
6172     struct mbuf **mp0,
6173     struct mbuf **controlp,
6174     int *flagsp)
6175 {
6176 	int error, fromlen;
6177 	uint8_t sockbuf[256];
6178 	struct sockaddr *from;
6179 	struct sctp_extrcvinfo sinfo;
6180 	int filling_sinfo = 1;
6181 	int flags;
6182 	struct sctp_inpcb *inp;
6183 
6184 	inp = (struct sctp_inpcb *)so->so_pcb;
6185 	/* pickup the assoc we are reading from */
6186 	if (inp == NULL) {
6187 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6188 		return (EINVAL);
6189 	}
6190 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6191 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6192 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6193 	    (controlp == NULL)) {
6194 		/* user does not want the sndrcv ctl */
6195 		filling_sinfo = 0;
6196 	}
6197 	if (psa) {
6198 		from = (struct sockaddr *)sockbuf;
6199 		fromlen = sizeof(sockbuf);
6200 		from->sa_len = 0;
6201 	} else {
6202 		from = NULL;
6203 		fromlen = 0;
6204 	}
6205 
6206 	if (filling_sinfo) {
6207 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6208 	}
6209 	if (flagsp != NULL) {
6210 		flags = *flagsp;
6211 	} else {
6212 		flags = 0;
6213 	}
6214 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, &flags,
6215 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6216 	if (flagsp != NULL) {
6217 		*flagsp = flags;
6218 	}
6219 	if (controlp != NULL) {
6220 		/* copy back the sinfo in a CMSG format */
6221 		if (filling_sinfo && ((flags & MSG_NOTIFICATION) == 0)) {
6222 			*controlp = sctp_build_ctl_nchunk(inp,
6223 			    (struct sctp_sndrcvinfo *)&sinfo);
6224 		} else {
6225 			*controlp = NULL;
6226 		}
6227 	}
6228 	if (psa) {
6229 		/* copy back the address info */
6230 		if (from && from->sa_len) {
6231 			*psa = sodupsockaddr(from, M_NOWAIT);
6232 		} else {
6233 			*psa = NULL;
6234 		}
6235 	}
6236 	return (error);
6237 }
6238 
6239 
6240 
6241 
6242 
6243 int
6244 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6245     int totaddr, int *error)
6246 {
6247 	int added = 0;
6248 	int i;
6249 	struct sctp_inpcb *inp;
6250 	struct sockaddr *sa;
6251 	size_t incr = 0;
6252 #ifdef INET
6253 	struct sockaddr_in *sin;
6254 #endif
6255 #ifdef INET6
6256 	struct sockaddr_in6 *sin6;
6257 #endif
6258 
6259 	sa = addr;
6260 	inp = stcb->sctp_ep;
6261 	*error = 0;
6262 	for (i = 0; i < totaddr; i++) {
6263 		switch (sa->sa_family) {
6264 #ifdef INET
6265 		case AF_INET:
6266 			incr = sizeof(struct sockaddr_in);
6267 			sin = (struct sockaddr_in *)sa;
6268 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6269 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6270 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6271 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6272 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6273 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6274 				*error = EINVAL;
6275 				goto out_now;
6276 			}
6277 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6278 			    SCTP_DONOT_SETSCOPE,
6279 			    SCTP_ADDR_IS_CONFIRMED)) {
6280 				/* assoc gone no un-lock */
6281 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6282 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6283 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6284 				*error = ENOBUFS;
6285 				goto out_now;
6286 			}
6287 			added++;
6288 			break;
6289 #endif
6290 #ifdef INET6
6291 		case AF_INET6:
6292 			incr = sizeof(struct sockaddr_in6);
6293 			sin6 = (struct sockaddr_in6 *)sa;
6294 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6295 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6296 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6297 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6298 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6299 				*error = EINVAL;
6300 				goto out_now;
6301 			}
6302 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6303 			    SCTP_DONOT_SETSCOPE,
6304 			    SCTP_ADDR_IS_CONFIRMED)) {
6305 				/* assoc gone no un-lock */
6306 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6307 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6308 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6309 				*error = ENOBUFS;
6310 				goto out_now;
6311 			}
6312 			added++;
6313 			break;
6314 #endif
6315 		default:
6316 			break;
6317 		}
6318 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6319 	}
6320 out_now:
6321 	return (added);
6322 }
6323 
6324 struct sctp_tcb *
6325 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6326     unsigned int *totaddr,
6327     unsigned int *num_v4, unsigned int *num_v6, int *error,
6328     unsigned int limit, int *bad_addr)
6329 {
6330 	struct sockaddr *sa;
6331 	struct sctp_tcb *stcb = NULL;
6332 	unsigned int incr, at, i;
6333 
6334 	at = 0;
6335 	sa = addr;
6336 	*error = *num_v6 = *num_v4 = 0;
6337 	/* account and validate addresses */
6338 	for (i = 0; i < *totaddr; i++) {
6339 		switch (sa->sa_family) {
6340 #ifdef INET
6341 		case AF_INET:
6342 			incr = (unsigned int)sizeof(struct sockaddr_in);
6343 			if (sa->sa_len != incr) {
6344 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6345 				*error = EINVAL;
6346 				*bad_addr = 1;
6347 				return (NULL);
6348 			}
6349 			(*num_v4) += 1;
6350 			break;
6351 #endif
6352 #ifdef INET6
6353 		case AF_INET6:
6354 			{
6355 				struct sockaddr_in6 *sin6;
6356 
6357 				sin6 = (struct sockaddr_in6 *)sa;
6358 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6359 					/* Must be non-mapped for connectx */
6360 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6361 					*error = EINVAL;
6362 					*bad_addr = 1;
6363 					return (NULL);
6364 				}
6365 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6366 				if (sa->sa_len != incr) {
6367 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6368 					*error = EINVAL;
6369 					*bad_addr = 1;
6370 					return (NULL);
6371 				}
6372 				(*num_v6) += 1;
6373 				break;
6374 			}
6375 #endif
6376 		default:
6377 			*totaddr = i;
6378 			incr = 0;
6379 			/* we are done */
6380 			break;
6381 		}
6382 		if (i == *totaddr) {
6383 			break;
6384 		}
6385 		SCTP_INP_INCR_REF(inp);
6386 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6387 		if (stcb != NULL) {
6388 			/* Already have or am bring up an association */
6389 			return (stcb);
6390 		} else {
6391 			SCTP_INP_DECR_REF(inp);
6392 		}
6393 		if ((at + incr) > limit) {
6394 			*totaddr = i;
6395 			break;
6396 		}
6397 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6398 	}
6399 	return ((struct sctp_tcb *)NULL);
6400 }
6401 
6402 /*
6403  * sctp_bindx(ADD) for one address.
6404  * assumes all arguments are valid/checked by caller.
6405  */
6406 void
6407 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6408     struct sockaddr *sa, sctp_assoc_t assoc_id,
6409     uint32_t vrf_id, int *error, void *p)
6410 {
6411 	struct sockaddr *addr_touse;
6412 #if defined(INET) && defined(INET6)
6413 	struct sockaddr_in sin;
6414 #endif
6415 
6416 	/* see if we're bound all already! */
6417 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6418 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6419 		*error = EINVAL;
6420 		return;
6421 	}
6422 	addr_touse = sa;
6423 #ifdef INET6
6424 	if (sa->sa_family == AF_INET6) {
6425 #ifdef INET
6426 		struct sockaddr_in6 *sin6;
6427 
6428 #endif
6429 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6430 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6431 			*error = EINVAL;
6432 			return;
6433 		}
6434 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6435 			/* can only bind v6 on PF_INET6 sockets */
6436 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6437 			*error = EINVAL;
6438 			return;
6439 		}
6440 #ifdef INET
6441 		sin6 = (struct sockaddr_in6 *)addr_touse;
6442 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6443 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6444 			    SCTP_IPV6_V6ONLY(inp)) {
6445 				/* can't bind v4-mapped on PF_INET sockets */
6446 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6447 				*error = EINVAL;
6448 				return;
6449 			}
6450 			in6_sin6_2_sin(&sin, sin6);
6451 			addr_touse = (struct sockaddr *)&sin;
6452 		}
6453 #endif
6454 	}
6455 #endif
6456 #ifdef INET
6457 	if (sa->sa_family == AF_INET) {
6458 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6459 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6460 			*error = EINVAL;
6461 			return;
6462 		}
6463 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6464 		    SCTP_IPV6_V6ONLY(inp)) {
6465 			/* can't bind v4 on PF_INET sockets */
6466 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6467 			*error = EINVAL;
6468 			return;
6469 		}
6470 	}
6471 #endif
6472 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6473 		if (p == NULL) {
6474 			/* Can't get proc for Net/Open BSD */
6475 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6476 			*error = EINVAL;
6477 			return;
6478 		}
6479 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6480 		return;
6481 	}
6482 	/*
6483 	 * No locks required here since bind and mgmt_ep_sa all do their own
6484 	 * locking. If we do something for the FIX: below we may need to
6485 	 * lock in that case.
6486 	 */
6487 	if (assoc_id == 0) {
6488 		/* add the address */
6489 		struct sctp_inpcb *lep;
6490 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6491 
6492 		/* validate the incoming port */
6493 		if ((lsin->sin_port != 0) &&
6494 		    (lsin->sin_port != inp->sctp_lport)) {
6495 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6496 			*error = EINVAL;
6497 			return;
6498 		} else {
6499 			/* user specified 0 port, set it to existing port */
6500 			lsin->sin_port = inp->sctp_lport;
6501 		}
6502 
6503 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6504 		if (lep != NULL) {
6505 			/*
6506 			 * We must decrement the refcount since we have the
6507 			 * ep already and are binding. No remove going on
6508 			 * here.
6509 			 */
6510 			SCTP_INP_DECR_REF(lep);
6511 		}
6512 		if (lep == inp) {
6513 			/* already bound to it.. ok */
6514 			return;
6515 		} else if (lep == NULL) {
6516 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6517 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6518 			    SCTP_ADD_IP_ADDRESS,
6519 			    vrf_id, NULL);
6520 		} else {
6521 			*error = EADDRINUSE;
6522 		}
6523 		if (*error)
6524 			return;
6525 	} else {
6526 		/*
6527 		 * FIX: decide whether we allow assoc based bindx
6528 		 */
6529 	}
6530 }
6531 
6532 /*
6533  * sctp_bindx(DELETE) for one address.
6534  * assumes all arguments are valid/checked by caller.
6535  */
6536 void
6537 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6538     struct sockaddr *sa, sctp_assoc_t assoc_id,
6539     uint32_t vrf_id, int *error)
6540 {
6541 	struct sockaddr *addr_touse;
6542 #if defined(INET) && defined(INET6)
6543 	struct sockaddr_in sin;
6544 #endif
6545 
6546 	/* see if we're bound all already! */
6547 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6548 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6549 		*error = EINVAL;
6550 		return;
6551 	}
6552 	addr_touse = sa;
6553 #ifdef INET6
6554 	if (sa->sa_family == AF_INET6) {
6555 #ifdef INET
6556 		struct sockaddr_in6 *sin6;
6557 #endif
6558 
6559 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6560 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6561 			*error = EINVAL;
6562 			return;
6563 		}
6564 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6565 			/* can only bind v6 on PF_INET6 sockets */
6566 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6567 			*error = EINVAL;
6568 			return;
6569 		}
6570 #ifdef INET
6571 		sin6 = (struct sockaddr_in6 *)addr_touse;
6572 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6573 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6574 			    SCTP_IPV6_V6ONLY(inp)) {
6575 				/* can't bind mapped-v4 on PF_INET sockets */
6576 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6577 				*error = EINVAL;
6578 				return;
6579 			}
6580 			in6_sin6_2_sin(&sin, sin6);
6581 			addr_touse = (struct sockaddr *)&sin;
6582 		}
6583 #endif
6584 	}
6585 #endif
6586 #ifdef INET
6587 	if (sa->sa_family == AF_INET) {
6588 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6589 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6590 			*error = EINVAL;
6591 			return;
6592 		}
6593 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6594 		    SCTP_IPV6_V6ONLY(inp)) {
6595 			/* can't bind v4 on PF_INET sockets */
6596 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6597 			*error = EINVAL;
6598 			return;
6599 		}
6600 	}
6601 #endif
6602 	/*
6603 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6604 	 * below is ever changed we may need to lock before calling
6605 	 * association level binding.
6606 	 */
6607 	if (assoc_id == 0) {
6608 		/* delete the address */
6609 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6610 		    SCTP_DEL_IP_ADDRESS,
6611 		    vrf_id, NULL);
6612 	} else {
6613 		/*
6614 		 * FIX: decide whether we allow assoc based bindx
6615 		 */
6616 	}
6617 }
6618 
6619 /*
6620  * returns the valid local address count for an assoc, taking into account
6621  * all scoping rules
6622  */
6623 int
6624 sctp_local_addr_count(struct sctp_tcb *stcb)
6625 {
6626 	int loopback_scope;
6627 #if defined(INET)
6628 	int ipv4_local_scope, ipv4_addr_legal;
6629 #endif
6630 #if defined (INET6)
6631 	int local_scope, site_scope, ipv6_addr_legal;
6632 #endif
6633 	struct sctp_vrf *vrf;
6634 	struct sctp_ifn *sctp_ifn;
6635 	struct sctp_ifa *sctp_ifa;
6636 	int count = 0;
6637 
6638 	/* Turn on all the appropriate scopes */
6639 	loopback_scope = stcb->asoc.scope.loopback_scope;
6640 #if defined(INET)
6641 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6642 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6643 #endif
6644 #if defined(INET6)
6645 	local_scope = stcb->asoc.scope.local_scope;
6646 	site_scope = stcb->asoc.scope.site_scope;
6647 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6648 #endif
6649 	SCTP_IPI_ADDR_RLOCK();
6650 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6651 	if (vrf == NULL) {
6652 		/* no vrf, no addresses */
6653 		SCTP_IPI_ADDR_RUNLOCK();
6654 		return (0);
6655 	}
6656 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6657 		/*
6658 		 * bound all case: go through all ifns on the vrf
6659 		 */
6660 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6661 			if ((loopback_scope == 0) &&
6662 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6663 				continue;
6664 			}
6665 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6666 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6667 					continue;
6668 				switch (sctp_ifa->address.sa.sa_family) {
6669 #ifdef INET
6670 				case AF_INET:
6671 					if (ipv4_addr_legal) {
6672 						struct sockaddr_in *sin;
6673 
6674 						sin = &sctp_ifa->address.sin;
6675 						if (sin->sin_addr.s_addr == 0) {
6676 							/*
6677 							 * skip unspecified
6678 							 * addrs
6679 							 */
6680 							continue;
6681 						}
6682 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6683 						    &sin->sin_addr) != 0) {
6684 							continue;
6685 						}
6686 						if ((ipv4_local_scope == 0) &&
6687 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6688 							continue;
6689 						}
6690 						/* count this one */
6691 						count++;
6692 					} else {
6693 						continue;
6694 					}
6695 					break;
6696 #endif
6697 #ifdef INET6
6698 				case AF_INET6:
6699 					if (ipv6_addr_legal) {
6700 						struct sockaddr_in6 *sin6;
6701 
6702 						sin6 = &sctp_ifa->address.sin6;
6703 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6704 							continue;
6705 						}
6706 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6707 						    &sin6->sin6_addr) != 0) {
6708 							continue;
6709 						}
6710 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6711 							if (local_scope == 0)
6712 								continue;
6713 							if (sin6->sin6_scope_id == 0) {
6714 								if (sa6_recoverscope(sin6) != 0)
6715 									/*
6716 									 *
6717 									 * bad
6718 									 * link
6719 									 *
6720 									 * local
6721 									 *
6722 									 * address
6723 									 */
6724 									continue;
6725 							}
6726 						}
6727 						if ((site_scope == 0) &&
6728 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6729 							continue;
6730 						}
6731 						/* count this one */
6732 						count++;
6733 					}
6734 					break;
6735 #endif
6736 				default:
6737 					/* TSNH */
6738 					break;
6739 				}
6740 			}
6741 		}
6742 	} else {
6743 		/*
6744 		 * subset bound case
6745 		 */
6746 		struct sctp_laddr *laddr;
6747 
6748 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6749 		    sctp_nxt_addr) {
6750 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6751 				continue;
6752 			}
6753 			/* count this one */
6754 			count++;
6755 		}
6756 	}
6757 	SCTP_IPI_ADDR_RUNLOCK();
6758 	return (count);
6759 }
6760 
6761 #if defined(SCTP_LOCAL_TRACE_BUF)
6762 
6763 void
6764 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6765 {
6766 	uint32_t saveindex, newindex;
6767 
6768 	do {
6769 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6770 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6771 			newindex = 1;
6772 		} else {
6773 			newindex = saveindex + 1;
6774 		}
6775 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6776 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6777 		saveindex = 0;
6778 	}
6779 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6780 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6781 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6782 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6783 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6784 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6785 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6786 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6787 }
6788 
6789 #endif
6790 static void
6791 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6792     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6793 {
6794 	struct ip *iph;
6795 #ifdef INET6
6796 	struct ip6_hdr *ip6;
6797 #endif
6798 	struct mbuf *sp, *last;
6799 	struct udphdr *uhdr;
6800 	uint16_t port;
6801 
6802 	if ((m->m_flags & M_PKTHDR) == 0) {
6803 		/* Can't handle one that is not a pkt hdr */
6804 		goto out;
6805 	}
6806 	/* Pull the src port */
6807 	iph = mtod(m, struct ip *);
6808 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6809 	port = uhdr->uh_sport;
6810 	/*
6811 	 * Split out the mbuf chain. Leave the IP header in m, place the
6812 	 * rest in the sp.
6813 	 */
6814 	sp = m_split(m, off, M_NOWAIT);
6815 	if (sp == NULL) {
6816 		/* Gak, drop packet, we can't do a split */
6817 		goto out;
6818 	}
6819 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6820 		/* Gak, packet can't have an SCTP header in it - too small */
6821 		m_freem(sp);
6822 		goto out;
6823 	}
6824 	/* Now pull up the UDP header and SCTP header together */
6825 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6826 	if (sp == NULL) {
6827 		/* Gak pullup failed */
6828 		goto out;
6829 	}
6830 	/* Trim out the UDP header */
6831 	m_adj(sp, sizeof(struct udphdr));
6832 
6833 	/* Now reconstruct the mbuf chain */
6834 	for (last = m; last->m_next; last = last->m_next);
6835 	last->m_next = sp;
6836 	m->m_pkthdr.len += sp->m_pkthdr.len;
6837 	/*
6838 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6839 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6840 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6841 	 * SCTP checksum. Therefore, clear the bit.
6842 	 */
6843 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6844 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6845 	    m->m_pkthdr.len,
6846 	    if_name(m->m_pkthdr.rcvif),
6847 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6848 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6849 	iph = mtod(m, struct ip *);
6850 	switch (iph->ip_v) {
6851 #ifdef INET
6852 	case IPVERSION:
6853 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6854 		sctp_input_with_port(m, off, port);
6855 		break;
6856 #endif
6857 #ifdef INET6
6858 	case IPV6_VERSION >> 4:
6859 		ip6 = mtod(m, struct ip6_hdr *);
6860 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6861 		sctp6_input_with_port(&m, &off, port);
6862 		break;
6863 #endif
6864 	default:
6865 		goto out;
6866 		break;
6867 	}
6868 	return;
6869 out:
6870 	m_freem(m);
6871 }
6872 
6873 #ifdef INET
6874 static void
6875 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6876 {
6877 	struct ip *outer_ip, *inner_ip;
6878 	struct sctphdr *sh;
6879 	struct icmp *icmp;
6880 	struct udphdr *udp;
6881 	struct sctp_inpcb *inp;
6882 	struct sctp_tcb *stcb;
6883 	struct sctp_nets *net;
6884 	struct sctp_init_chunk *ch;
6885 	struct sockaddr_in src, dst;
6886 	uint8_t type, code;
6887 
6888 	inner_ip = (struct ip *)vip;
6889 	icmp = (struct icmp *)((caddr_t)inner_ip -
6890 	    (sizeof(struct icmp) - sizeof(struct ip)));
6891 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6892 	if (ntohs(outer_ip->ip_len) <
6893 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6894 		return;
6895 	}
6896 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6897 	sh = (struct sctphdr *)(udp + 1);
6898 	memset(&src, 0, sizeof(struct sockaddr_in));
6899 	src.sin_family = AF_INET;
6900 	src.sin_len = sizeof(struct sockaddr_in);
6901 	src.sin_port = sh->src_port;
6902 	src.sin_addr = inner_ip->ip_src;
6903 	memset(&dst, 0, sizeof(struct sockaddr_in));
6904 	dst.sin_family = AF_INET;
6905 	dst.sin_len = sizeof(struct sockaddr_in);
6906 	dst.sin_port = sh->dest_port;
6907 	dst.sin_addr = inner_ip->ip_dst;
6908 	/*
6909 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6910 	 * holds our local endpoint address. Thus we reverse the dst and the
6911 	 * src in the lookup.
6912 	 */
6913 	inp = NULL;
6914 	net = NULL;
6915 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6916 	    (struct sockaddr *)&src,
6917 	    &inp, &net, 1,
6918 	    SCTP_DEFAULT_VRFID);
6919 	if ((stcb != NULL) &&
6920 	    (net != NULL) &&
6921 	    (inp != NULL)) {
6922 		/* Check the UDP port numbers */
6923 		if ((udp->uh_dport != net->port) ||
6924 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6925 			SCTP_TCB_UNLOCK(stcb);
6926 			return;
6927 		}
6928 		/* Check the verification tag */
6929 		if (ntohl(sh->v_tag) != 0) {
6930 			/*
6931 			 * This must be the verification tag used for
6932 			 * sending out packets. We don't consider packets
6933 			 * reflecting the verification tag.
6934 			 */
6935 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
6936 				SCTP_TCB_UNLOCK(stcb);
6937 				return;
6938 			}
6939 		} else {
6940 			if (ntohs(outer_ip->ip_len) >=
6941 			    sizeof(struct ip) +
6942 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
6943 				/*
6944 				 * In this case we can check if we got an
6945 				 * INIT chunk and if the initiate tag
6946 				 * matches.
6947 				 */
6948 				ch = (struct sctp_init_chunk *)(sh + 1);
6949 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
6950 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
6951 					SCTP_TCB_UNLOCK(stcb);
6952 					return;
6953 				}
6954 			} else {
6955 				SCTP_TCB_UNLOCK(stcb);
6956 				return;
6957 			}
6958 		}
6959 		type = icmp->icmp_type;
6960 		code = icmp->icmp_code;
6961 		if ((type == ICMP_UNREACH) &&
6962 		    (code == ICMP_UNREACH_PORT)) {
6963 			code = ICMP_UNREACH_PROTOCOL;
6964 		}
6965 		sctp_notify(inp, stcb, net, type, code,
6966 		    ntohs(inner_ip->ip_len),
6967 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
6968 	} else {
6969 		if ((stcb == NULL) && (inp != NULL)) {
6970 			/* reduce ref-count */
6971 			SCTP_INP_WLOCK(inp);
6972 			SCTP_INP_DECR_REF(inp);
6973 			SCTP_INP_WUNLOCK(inp);
6974 		}
6975 		if (stcb) {
6976 			SCTP_TCB_UNLOCK(stcb);
6977 		}
6978 	}
6979 	return;
6980 }
6981 #endif
6982 
6983 #ifdef INET6
6984 static void
6985 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
6986 {
6987 	struct ip6ctlparam *ip6cp;
6988 	struct sctp_inpcb *inp;
6989 	struct sctp_tcb *stcb;
6990 	struct sctp_nets *net;
6991 	struct sctphdr sh;
6992 	struct udphdr udp;
6993 	struct sockaddr_in6 src, dst;
6994 	uint8_t type, code;
6995 
6996 	ip6cp = (struct ip6ctlparam *)d;
6997 	/*
6998 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
6999 	 */
7000 	if (ip6cp->ip6c_m == NULL) {
7001 		return;
7002 	}
7003 	/*
7004 	 * Check if we can safely examine the ports and the verification tag
7005 	 * of the SCTP common header.
7006 	 */
7007 	if (ip6cp->ip6c_m->m_pkthdr.len <
7008 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
7009 		return;
7010 	}
7011 	/* Copy out the UDP header. */
7012 	memset(&udp, 0, sizeof(struct udphdr));
7013 	m_copydata(ip6cp->ip6c_m,
7014 	    ip6cp->ip6c_off,
7015 	    sizeof(struct udphdr),
7016 	    (caddr_t)&udp);
7017 	/* Copy out the port numbers and the verification tag. */
7018 	memset(&sh, 0, sizeof(struct sctphdr));
7019 	m_copydata(ip6cp->ip6c_m,
7020 	    ip6cp->ip6c_off + sizeof(struct udphdr),
7021 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
7022 	    (caddr_t)&sh);
7023 	memset(&src, 0, sizeof(struct sockaddr_in6));
7024 	src.sin6_family = AF_INET6;
7025 	src.sin6_len = sizeof(struct sockaddr_in6);
7026 	src.sin6_port = sh.src_port;
7027 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7028 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7029 		return;
7030 	}
7031 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7032 	dst.sin6_family = AF_INET6;
7033 	dst.sin6_len = sizeof(struct sockaddr_in6);
7034 	dst.sin6_port = sh.dest_port;
7035 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7036 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7037 		return;
7038 	}
7039 	inp = NULL;
7040 	net = NULL;
7041 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7042 	    (struct sockaddr *)&src,
7043 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7044 	if ((stcb != NULL) &&
7045 	    (net != NULL) &&
7046 	    (inp != NULL)) {
7047 		/* Check the UDP port numbers */
7048 		if ((udp.uh_dport != net->port) ||
7049 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7050 			SCTP_TCB_UNLOCK(stcb);
7051 			return;
7052 		}
7053 		/* Check the verification tag */
7054 		if (ntohl(sh.v_tag) != 0) {
7055 			/*
7056 			 * This must be the verification tag used for
7057 			 * sending out packets. We don't consider packets
7058 			 * reflecting the verification tag.
7059 			 */
7060 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7061 				SCTP_TCB_UNLOCK(stcb);
7062 				return;
7063 			}
7064 		} else {
7065 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7066 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7067 			    sizeof(struct sctphdr) +
7068 			    sizeof(struct sctp_chunkhdr) +
7069 			    offsetof(struct sctp_init, a_rwnd)) {
7070 				/*
7071 				 * In this case we can check if we got an
7072 				 * INIT chunk and if the initiate tag
7073 				 * matches.
7074 				 */
7075 				uint32_t initiate_tag;
7076 				uint8_t chunk_type;
7077 
7078 				m_copydata(ip6cp->ip6c_m,
7079 				    ip6cp->ip6c_off +
7080 				    sizeof(struct udphdr) +
7081 				    sizeof(struct sctphdr),
7082 				    sizeof(uint8_t),
7083 				    (caddr_t)&chunk_type);
7084 				m_copydata(ip6cp->ip6c_m,
7085 				    ip6cp->ip6c_off +
7086 				    sizeof(struct udphdr) +
7087 				    sizeof(struct sctphdr) +
7088 				    sizeof(struct sctp_chunkhdr),
7089 				    sizeof(uint32_t),
7090 				    (caddr_t)&initiate_tag);
7091 				if ((chunk_type != SCTP_INITIATION) ||
7092 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7093 					SCTP_TCB_UNLOCK(stcb);
7094 					return;
7095 				}
7096 			} else {
7097 				SCTP_TCB_UNLOCK(stcb);
7098 				return;
7099 			}
7100 		}
7101 		type = ip6cp->ip6c_icmp6->icmp6_type;
7102 		code = ip6cp->ip6c_icmp6->icmp6_code;
7103 		if ((type == ICMP6_DST_UNREACH) &&
7104 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7105 			type = ICMP6_PARAM_PROB;
7106 			code = ICMP6_PARAMPROB_NEXTHEADER;
7107 		}
7108 		sctp6_notify(inp, stcb, net, type, code,
7109 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7110 	} else {
7111 		if ((stcb == NULL) && (inp != NULL)) {
7112 			/* reduce inp's ref-count */
7113 			SCTP_INP_WLOCK(inp);
7114 			SCTP_INP_DECR_REF(inp);
7115 			SCTP_INP_WUNLOCK(inp);
7116 		}
7117 		if (stcb) {
7118 			SCTP_TCB_UNLOCK(stcb);
7119 		}
7120 	}
7121 }
7122 #endif
7123 
7124 void
7125 sctp_over_udp_stop(void)
7126 {
7127 	/*
7128 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7129 	 * for writting!
7130 	 */
7131 #ifdef INET
7132 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7133 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7134 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7135 	}
7136 #endif
7137 #ifdef INET6
7138 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7139 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7140 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7141 	}
7142 #endif
7143 }
7144 
7145 int
7146 sctp_over_udp_start(void)
7147 {
7148 	uint16_t port;
7149 	int ret;
7150 #ifdef INET
7151 	struct sockaddr_in sin;
7152 #endif
7153 #ifdef INET6
7154 	struct sockaddr_in6 sin6;
7155 #endif
7156 	/*
7157 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7158 	 * for writting!
7159 	 */
7160 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7161 	if (ntohs(port) == 0) {
7162 		/* Must have a port set */
7163 		return (EINVAL);
7164 	}
7165 #ifdef INET
7166 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7167 		/* Already running -- must stop first */
7168 		return (EALREADY);
7169 	}
7170 #endif
7171 #ifdef INET6
7172 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7173 		/* Already running -- must stop first */
7174 		return (EALREADY);
7175 	}
7176 #endif
7177 #ifdef INET
7178 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7179 	    SOCK_DGRAM, IPPROTO_UDP,
7180 	    curthread->td_ucred, curthread))) {
7181 		sctp_over_udp_stop();
7182 		return (ret);
7183 	}
7184 	/* Call the special UDP hook. */
7185 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7186 	    sctp_recv_udp_tunneled_packet,
7187 	    sctp_recv_icmp_tunneled_packet,
7188 	    NULL))) {
7189 		sctp_over_udp_stop();
7190 		return (ret);
7191 	}
7192 	/* Ok, we have a socket, bind it to the port. */
7193 	memset(&sin, 0, sizeof(struct sockaddr_in));
7194 	sin.sin_len = sizeof(struct sockaddr_in);
7195 	sin.sin_family = AF_INET;
7196 	sin.sin_port = htons(port);
7197 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7198 	    (struct sockaddr *)&sin, curthread))) {
7199 		sctp_over_udp_stop();
7200 		return (ret);
7201 	}
7202 #endif
7203 #ifdef INET6
7204 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7205 	    SOCK_DGRAM, IPPROTO_UDP,
7206 	    curthread->td_ucred, curthread))) {
7207 		sctp_over_udp_stop();
7208 		return (ret);
7209 	}
7210 	/* Call the special UDP hook. */
7211 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7212 	    sctp_recv_udp_tunneled_packet,
7213 	    sctp_recv_icmp6_tunneled_packet,
7214 	    NULL))) {
7215 		sctp_over_udp_stop();
7216 		return (ret);
7217 	}
7218 	/* Ok, we have a socket, bind it to the port. */
7219 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7220 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7221 	sin6.sin6_family = AF_INET6;
7222 	sin6.sin6_port = htons(port);
7223 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7224 	    (struct sockaddr *)&sin6, curthread))) {
7225 		sctp_over_udp_stop();
7226 		return (ret);
7227 	}
7228 #endif
7229 	return (0);
7230 }
7231 
7232 /*
7233  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7234  * If all arguments are zero, zero is returned.
7235  */
7236 uint32_t
7237 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7238 {
7239 	if (mtu1 > 0) {
7240 		if (mtu2 > 0) {
7241 			if (mtu3 > 0) {
7242 				return (min(mtu1, min(mtu2, mtu3)));
7243 			} else {
7244 				return (min(mtu1, mtu2));
7245 			}
7246 		} else {
7247 			if (mtu3 > 0) {
7248 				return (min(mtu1, mtu3));
7249 			} else {
7250 				return (mtu1);
7251 			}
7252 		}
7253 	} else {
7254 		if (mtu2 > 0) {
7255 			if (mtu3 > 0) {
7256 				return (min(mtu2, mtu3));
7257 			} else {
7258 				return (mtu2);
7259 			}
7260 		} else {
7261 			return (mtu3);
7262 		}
7263 	}
7264 }
7265 
7266 void
7267 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7268 {
7269 	struct in_conninfo inc;
7270 
7271 	memset(&inc, 0, sizeof(struct in_conninfo));
7272 	inc.inc_fibnum = fibnum;
7273 	switch (addr->sa.sa_family) {
7274 #ifdef INET
7275 	case AF_INET:
7276 		inc.inc_faddr = addr->sin.sin_addr;
7277 		break;
7278 #endif
7279 #ifdef INET6
7280 	case AF_INET6:
7281 		inc.inc_flags |= INC_ISIPV6;
7282 		inc.inc6_faddr = addr->sin6.sin6_addr;
7283 		break;
7284 #endif
7285 	default:
7286 		return;
7287 	}
7288 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7289 }
7290 
7291 uint32_t
7292 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7293 {
7294 	struct in_conninfo inc;
7295 
7296 	memset(&inc, 0, sizeof(struct in_conninfo));
7297 	inc.inc_fibnum = fibnum;
7298 	switch (addr->sa.sa_family) {
7299 #ifdef INET
7300 	case AF_INET:
7301 		inc.inc_faddr = addr->sin.sin_addr;
7302 		break;
7303 #endif
7304 #ifdef INET6
7305 	case AF_INET6:
7306 		inc.inc_flags |= INC_ISIPV6;
7307 		inc.inc6_faddr = addr->sin6.sin6_addr;
7308 		break;
7309 #endif
7310 	default:
7311 		return (0);
7312 	}
7313 	return ((uint32_t)tcp_hc_getmtu(&inc));
7314 }
7315