xref: /freebsd/sys/netinet/sctputil.c (revision a2aef24aa3c8458e4036735dd6928b4ef77294e5)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #if defined(INET6) || defined(INET)
53 #include <netinet/tcp_var.h>
54 #endif
55 #include <netinet/udp.h>
56 #include <netinet/udp_var.h>
57 #include <sys/proc.h>
58 #ifdef INET6
59 #include <netinet/icmp6.h>
60 #endif
61 
62 
63 #ifndef KTR_SCTP
64 #define KTR_SCTP KTR_SUBSYS
65 #endif
66 
67 extern const struct sctp_cc_functions sctp_cc_functions[];
68 extern const struct sctp_ss_functions sctp_ss_functions[];
69 
70 void
71 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
72 {
73 	struct sctp_cwnd_log sctp_clog;
74 
75 	sctp_clog.x.sb.stcb = stcb;
76 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
77 	if (stcb)
78 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
79 	else
80 		sctp_clog.x.sb.stcb_sbcc = 0;
81 	sctp_clog.x.sb.incr = incr;
82 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
83 	    SCTP_LOG_EVENT_SB,
84 	    from,
85 	    sctp_clog.x.misc.log1,
86 	    sctp_clog.x.misc.log2,
87 	    sctp_clog.x.misc.log3,
88 	    sctp_clog.x.misc.log4);
89 }
90 
91 void
92 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
93 {
94 	struct sctp_cwnd_log sctp_clog;
95 
96 	sctp_clog.x.close.inp = (void *)inp;
97 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
98 	if (stcb) {
99 		sctp_clog.x.close.stcb = (void *)stcb;
100 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
101 	} else {
102 		sctp_clog.x.close.stcb = 0;
103 		sctp_clog.x.close.state = 0;
104 	}
105 	sctp_clog.x.close.loc = loc;
106 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
107 	    SCTP_LOG_EVENT_CLOSE,
108 	    0,
109 	    sctp_clog.x.misc.log1,
110 	    sctp_clog.x.misc.log2,
111 	    sctp_clog.x.misc.log3,
112 	    sctp_clog.x.misc.log4);
113 }
114 
115 void
116 rto_logging(struct sctp_nets *net, int from)
117 {
118 	struct sctp_cwnd_log sctp_clog;
119 
120 	memset(&sctp_clog, 0, sizeof(sctp_clog));
121 	sctp_clog.x.rto.net = (void *)net;
122 	sctp_clog.x.rto.rtt = net->rtt / 1000;
123 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
124 	    SCTP_LOG_EVENT_RTT,
125 	    from,
126 	    sctp_clog.x.misc.log1,
127 	    sctp_clog.x.misc.log2,
128 	    sctp_clog.x.misc.log3,
129 	    sctp_clog.x.misc.log4);
130 }
131 
132 void
133 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
134 {
135 	struct sctp_cwnd_log sctp_clog;
136 
137 	sctp_clog.x.strlog.stcb = stcb;
138 	sctp_clog.x.strlog.n_tsn = tsn;
139 	sctp_clog.x.strlog.n_sseq = sseq;
140 	sctp_clog.x.strlog.e_tsn = 0;
141 	sctp_clog.x.strlog.e_sseq = 0;
142 	sctp_clog.x.strlog.strm = stream;
143 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
144 	    SCTP_LOG_EVENT_STRM,
145 	    from,
146 	    sctp_clog.x.misc.log1,
147 	    sctp_clog.x.misc.log2,
148 	    sctp_clog.x.misc.log3,
149 	    sctp_clog.x.misc.log4);
150 }
151 
152 void
153 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
154 {
155 	struct sctp_cwnd_log sctp_clog;
156 
157 	sctp_clog.x.nagle.stcb = (void *)stcb;
158 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
159 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
160 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
161 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
162 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
163 	    SCTP_LOG_EVENT_NAGLE,
164 	    action,
165 	    sctp_clog.x.misc.log1,
166 	    sctp_clog.x.misc.log2,
167 	    sctp_clog.x.misc.log3,
168 	    sctp_clog.x.misc.log4);
169 }
170 
171 void
172 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
173 {
174 	struct sctp_cwnd_log sctp_clog;
175 
176 	sctp_clog.x.sack.cumack = cumack;
177 	sctp_clog.x.sack.oldcumack = old_cumack;
178 	sctp_clog.x.sack.tsn = tsn;
179 	sctp_clog.x.sack.numGaps = gaps;
180 	sctp_clog.x.sack.numDups = dups;
181 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
182 	    SCTP_LOG_EVENT_SACK,
183 	    from,
184 	    sctp_clog.x.misc.log1,
185 	    sctp_clog.x.misc.log2,
186 	    sctp_clog.x.misc.log3,
187 	    sctp_clog.x.misc.log4);
188 }
189 
190 void
191 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
192 {
193 	struct sctp_cwnd_log sctp_clog;
194 
195 	memset(&sctp_clog, 0, sizeof(sctp_clog));
196 	sctp_clog.x.map.base = map;
197 	sctp_clog.x.map.cum = cum;
198 	sctp_clog.x.map.high = high;
199 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
200 	    SCTP_LOG_EVENT_MAP,
201 	    from,
202 	    sctp_clog.x.misc.log1,
203 	    sctp_clog.x.misc.log2,
204 	    sctp_clog.x.misc.log3,
205 	    sctp_clog.x.misc.log4);
206 }
207 
208 void
209 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
210 {
211 	struct sctp_cwnd_log sctp_clog;
212 
213 	memset(&sctp_clog, 0, sizeof(sctp_clog));
214 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
215 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
216 	sctp_clog.x.fr.tsn = tsn;
217 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
218 	    SCTP_LOG_EVENT_FR,
219 	    from,
220 	    sctp_clog.x.misc.log1,
221 	    sctp_clog.x.misc.log2,
222 	    sctp_clog.x.misc.log3,
223 	    sctp_clog.x.misc.log4);
224 }
225 
226 #ifdef SCTP_MBUF_LOGGING
227 void
228 sctp_log_mb(struct mbuf *m, int from)
229 {
230 	struct sctp_cwnd_log sctp_clog;
231 
232 	sctp_clog.x.mb.mp = m;
233 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
234 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
235 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
236 	if (SCTP_BUF_IS_EXTENDED(m)) {
237 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
238 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
239 	} else {
240 		sctp_clog.x.mb.ext = 0;
241 		sctp_clog.x.mb.refcnt = 0;
242 	}
243 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
244 	    SCTP_LOG_EVENT_MBUF,
245 	    from,
246 	    sctp_clog.x.misc.log1,
247 	    sctp_clog.x.misc.log2,
248 	    sctp_clog.x.misc.log3,
249 	    sctp_clog.x.misc.log4);
250 }
251 
252 void
253 sctp_log_mbc(struct mbuf *m, int from)
254 {
255 	struct mbuf *mat;
256 
257 	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
258 		sctp_log_mb(mat, from);
259 	}
260 }
261 #endif
262 
263 void
264 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
265 {
266 	struct sctp_cwnd_log sctp_clog;
267 
268 	if (control == NULL) {
269 		SCTP_PRINTF("Gak log of NULL?\n");
270 		return;
271 	}
272 	sctp_clog.x.strlog.stcb = control->stcb;
273 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
274 	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
275 	sctp_clog.x.strlog.strm = control->sinfo_stream;
276 	if (poschk != NULL) {
277 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
278 		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
279 	} else {
280 		sctp_clog.x.strlog.e_tsn = 0;
281 		sctp_clog.x.strlog.e_sseq = 0;
282 	}
283 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
284 	    SCTP_LOG_EVENT_STRM,
285 	    from,
286 	    sctp_clog.x.misc.log1,
287 	    sctp_clog.x.misc.log2,
288 	    sctp_clog.x.misc.log3,
289 	    sctp_clog.x.misc.log4);
290 }
291 
292 void
293 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
294 {
295 	struct sctp_cwnd_log sctp_clog;
296 
297 	sctp_clog.x.cwnd.net = net;
298 	if (stcb->asoc.send_queue_cnt > 255)
299 		sctp_clog.x.cwnd.cnt_in_send = 255;
300 	else
301 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
302 	if (stcb->asoc.stream_queue_cnt > 255)
303 		sctp_clog.x.cwnd.cnt_in_str = 255;
304 	else
305 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
306 
307 	if (net) {
308 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
309 		sctp_clog.x.cwnd.inflight = net->flight_size;
310 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
311 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
312 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
313 	}
314 	if (SCTP_CWNDLOG_PRESEND == from) {
315 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
316 	}
317 	sctp_clog.x.cwnd.cwnd_augment = augment;
318 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
319 	    SCTP_LOG_EVENT_CWND,
320 	    from,
321 	    sctp_clog.x.misc.log1,
322 	    sctp_clog.x.misc.log2,
323 	    sctp_clog.x.misc.log3,
324 	    sctp_clog.x.misc.log4);
325 }
326 
327 void
328 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
329 {
330 	struct sctp_cwnd_log sctp_clog;
331 
332 	memset(&sctp_clog, 0, sizeof(sctp_clog));
333 	if (inp) {
334 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
335 
336 	} else {
337 		sctp_clog.x.lock.sock = (void *)NULL;
338 	}
339 	sctp_clog.x.lock.inp = (void *)inp;
340 	if (stcb) {
341 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
342 	} else {
343 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
344 	}
345 	if (inp) {
346 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
347 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
348 	} else {
349 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
350 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
351 	}
352 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
353 	if (inp && (inp->sctp_socket)) {
354 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
355 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
356 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
357 	} else {
358 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
359 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
360 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
361 	}
362 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
363 	    SCTP_LOG_LOCK_EVENT,
364 	    from,
365 	    sctp_clog.x.misc.log1,
366 	    sctp_clog.x.misc.log2,
367 	    sctp_clog.x.misc.log3,
368 	    sctp_clog.x.misc.log4);
369 }
370 
371 void
372 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
373 {
374 	struct sctp_cwnd_log sctp_clog;
375 
376 	memset(&sctp_clog, 0, sizeof(sctp_clog));
377 	sctp_clog.x.cwnd.net = net;
378 	sctp_clog.x.cwnd.cwnd_new_value = error;
379 	sctp_clog.x.cwnd.inflight = net->flight_size;
380 	sctp_clog.x.cwnd.cwnd_augment = burst;
381 	if (stcb->asoc.send_queue_cnt > 255)
382 		sctp_clog.x.cwnd.cnt_in_send = 255;
383 	else
384 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
385 	if (stcb->asoc.stream_queue_cnt > 255)
386 		sctp_clog.x.cwnd.cnt_in_str = 255;
387 	else
388 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
389 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
390 	    SCTP_LOG_EVENT_MAXBURST,
391 	    from,
392 	    sctp_clog.x.misc.log1,
393 	    sctp_clog.x.misc.log2,
394 	    sctp_clog.x.misc.log3,
395 	    sctp_clog.x.misc.log4);
396 }
397 
398 void
399 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
400 {
401 	struct sctp_cwnd_log sctp_clog;
402 
403 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
404 	sctp_clog.x.rwnd.send_size = snd_size;
405 	sctp_clog.x.rwnd.overhead = overhead;
406 	sctp_clog.x.rwnd.new_rwnd = 0;
407 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
408 	    SCTP_LOG_EVENT_RWND,
409 	    from,
410 	    sctp_clog.x.misc.log1,
411 	    sctp_clog.x.misc.log2,
412 	    sctp_clog.x.misc.log3,
413 	    sctp_clog.x.misc.log4);
414 }
415 
416 void
417 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
418 {
419 	struct sctp_cwnd_log sctp_clog;
420 
421 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
422 	sctp_clog.x.rwnd.send_size = flight_size;
423 	sctp_clog.x.rwnd.overhead = overhead;
424 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
425 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
426 	    SCTP_LOG_EVENT_RWND,
427 	    from,
428 	    sctp_clog.x.misc.log1,
429 	    sctp_clog.x.misc.log2,
430 	    sctp_clog.x.misc.log3,
431 	    sctp_clog.x.misc.log4);
432 }
433 
434 #ifdef SCTP_MBCNT_LOGGING
435 static void
436 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
437 {
438 	struct sctp_cwnd_log sctp_clog;
439 
440 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
441 	sctp_clog.x.mbcnt.size_change = book;
442 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
443 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
444 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
445 	    SCTP_LOG_EVENT_MBCNT,
446 	    from,
447 	    sctp_clog.x.misc.log1,
448 	    sctp_clog.x.misc.log2,
449 	    sctp_clog.x.misc.log3,
450 	    sctp_clog.x.misc.log4);
451 }
452 #endif
453 
454 void
455 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
456 {
457 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
458 	    SCTP_LOG_MISC_EVENT,
459 	    from,
460 	    a, b, c, d);
461 }
462 
463 void
464 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
465 {
466 	struct sctp_cwnd_log sctp_clog;
467 
468 	sctp_clog.x.wake.stcb = (void *)stcb;
469 	sctp_clog.x.wake.wake_cnt = wake_cnt;
470 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
471 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
472 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
473 
474 	if (stcb->asoc.stream_queue_cnt < 0xff)
475 		sctp_clog.x.wake.stream_qcnt = (uint8_t)stcb->asoc.stream_queue_cnt;
476 	else
477 		sctp_clog.x.wake.stream_qcnt = 0xff;
478 
479 	if (stcb->asoc.chunks_on_out_queue < 0xff)
480 		sctp_clog.x.wake.chunks_on_oque = (uint8_t)stcb->asoc.chunks_on_out_queue;
481 	else
482 		sctp_clog.x.wake.chunks_on_oque = 0xff;
483 
484 	sctp_clog.x.wake.sctpflags = 0;
485 	/* set in the defered mode stuff */
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
487 		sctp_clog.x.wake.sctpflags |= 1;
488 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
489 		sctp_clog.x.wake.sctpflags |= 2;
490 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
491 		sctp_clog.x.wake.sctpflags |= 4;
492 	/* what about the sb */
493 	if (stcb->sctp_socket) {
494 		struct socket *so = stcb->sctp_socket;
495 
496 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
497 	} else {
498 		sctp_clog.x.wake.sbflags = 0xff;
499 	}
500 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
501 	    SCTP_LOG_EVENT_WAKE,
502 	    from,
503 	    sctp_clog.x.misc.log1,
504 	    sctp_clog.x.misc.log2,
505 	    sctp_clog.x.misc.log3,
506 	    sctp_clog.x.misc.log4);
507 }
508 
509 void
510 sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
511 {
512 	struct sctp_cwnd_log sctp_clog;
513 
514 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
515 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t)(asoc->send_queue_cnt + asoc->sent_queue_cnt);
516 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
517 	sctp_clog.x.blk.stream_qcnt = (uint16_t)asoc->stream_queue_cnt;
518 	sctp_clog.x.blk.chunks_on_oque = (uint16_t)asoc->chunks_on_out_queue;
519 	sctp_clog.x.blk.flight_size = (uint16_t)(asoc->total_flight / 1024);
520 	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
521 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
522 	    SCTP_LOG_EVENT_BLOCK,
523 	    from,
524 	    sctp_clog.x.misc.log1,
525 	    sctp_clog.x.misc.log2,
526 	    sctp_clog.x.misc.log3,
527 	    sctp_clog.x.misc.log4);
528 }
529 
530 int
531 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
532 {
533 	/* May need to fix this if ktrdump does not work */
534 	return (0);
535 }
536 
537 #ifdef SCTP_AUDITING_ENABLED
538 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
539 static int sctp_audit_indx = 0;
540 
541 static
542 void
543 sctp_print_audit_report(void)
544 {
545 	int i;
546 	int cnt;
547 
548 	cnt = 0;
549 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
550 		if ((sctp_audit_data[i][0] == 0xe0) &&
551 		    (sctp_audit_data[i][1] == 0x01)) {
552 			cnt = 0;
553 			SCTP_PRINTF("\n");
554 		} else if (sctp_audit_data[i][0] == 0xf0) {
555 			cnt = 0;
556 			SCTP_PRINTF("\n");
557 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
558 		    (sctp_audit_data[i][1] == 0x01)) {
559 			SCTP_PRINTF("\n");
560 			cnt = 0;
561 		}
562 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
563 		    (uint32_t)sctp_audit_data[i][1]);
564 		cnt++;
565 		if ((cnt % 14) == 0)
566 			SCTP_PRINTF("\n");
567 	}
568 	for (i = 0; i < sctp_audit_indx; i++) {
569 		if ((sctp_audit_data[i][0] == 0xe0) &&
570 		    (sctp_audit_data[i][1] == 0x01)) {
571 			cnt = 0;
572 			SCTP_PRINTF("\n");
573 		} else if (sctp_audit_data[i][0] == 0xf0) {
574 			cnt = 0;
575 			SCTP_PRINTF("\n");
576 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
577 		    (sctp_audit_data[i][1] == 0x01)) {
578 			SCTP_PRINTF("\n");
579 			cnt = 0;
580 		}
581 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t)sctp_audit_data[i][0],
582 		    (uint32_t)sctp_audit_data[i][1]);
583 		cnt++;
584 		if ((cnt % 14) == 0)
585 			SCTP_PRINTF("\n");
586 	}
587 	SCTP_PRINTF("\n");
588 }
589 
590 void
591 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
592     struct sctp_nets *net)
593 {
594 	int resend_cnt, tot_out, rep, tot_book_cnt;
595 	struct sctp_nets *lnet;
596 	struct sctp_tmit_chunk *chk;
597 
598 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
599 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
600 	sctp_audit_indx++;
601 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
602 		sctp_audit_indx = 0;
603 	}
604 	if (inp == NULL) {
605 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
606 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
607 		sctp_audit_indx++;
608 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
609 			sctp_audit_indx = 0;
610 		}
611 		return;
612 	}
613 	if (stcb == NULL) {
614 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
615 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
616 		sctp_audit_indx++;
617 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
618 			sctp_audit_indx = 0;
619 		}
620 		return;
621 	}
622 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
623 	sctp_audit_data[sctp_audit_indx][1] =
624 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
625 	sctp_audit_indx++;
626 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
627 		sctp_audit_indx = 0;
628 	}
629 	rep = 0;
630 	tot_book_cnt = 0;
631 	resend_cnt = tot_out = 0;
632 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
633 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
634 			resend_cnt++;
635 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
636 			tot_out += chk->book_size;
637 			tot_book_cnt++;
638 		}
639 	}
640 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
641 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
642 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
643 		sctp_audit_indx++;
644 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
645 			sctp_audit_indx = 0;
646 		}
647 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
648 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
649 		rep = 1;
650 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
651 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
652 		sctp_audit_data[sctp_audit_indx][1] =
653 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
654 		sctp_audit_indx++;
655 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
656 			sctp_audit_indx = 0;
657 		}
658 	}
659 	if (tot_out != stcb->asoc.total_flight) {
660 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
661 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
662 		sctp_audit_indx++;
663 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
664 			sctp_audit_indx = 0;
665 		}
666 		rep = 1;
667 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
668 		    (int)stcb->asoc.total_flight);
669 		stcb->asoc.total_flight = tot_out;
670 	}
671 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
672 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
673 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
674 		sctp_audit_indx++;
675 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
676 			sctp_audit_indx = 0;
677 		}
678 		rep = 1;
679 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
680 
681 		stcb->asoc.total_flight_count = tot_book_cnt;
682 	}
683 	tot_out = 0;
684 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
685 		tot_out += lnet->flight_size;
686 	}
687 	if (tot_out != stcb->asoc.total_flight) {
688 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
689 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
690 		sctp_audit_indx++;
691 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
692 			sctp_audit_indx = 0;
693 		}
694 		rep = 1;
695 		SCTP_PRINTF("real flight:%d net total was %d\n",
696 		    stcb->asoc.total_flight, tot_out);
697 		/* now corrective action */
698 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
699 
700 			tot_out = 0;
701 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
702 				if ((chk->whoTo == lnet) &&
703 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
704 					tot_out += chk->book_size;
705 				}
706 			}
707 			if (lnet->flight_size != tot_out) {
708 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
709 				    (void *)lnet, lnet->flight_size,
710 				    tot_out);
711 				lnet->flight_size = tot_out;
712 			}
713 		}
714 	}
715 	if (rep) {
716 		sctp_print_audit_report();
717 	}
718 }
719 
720 void
721 sctp_audit_log(uint8_t ev, uint8_t fd)
722 {
723 
724 	sctp_audit_data[sctp_audit_indx][0] = ev;
725 	sctp_audit_data[sctp_audit_indx][1] = fd;
726 	sctp_audit_indx++;
727 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
728 		sctp_audit_indx = 0;
729 	}
730 }
731 
732 #endif
733 
734 /*
735  * sctp_stop_timers_for_shutdown() should be called
736  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
737  * state to make sure that all timers are stopped.
738  */
739 void
740 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
741 {
742 	struct sctp_association *asoc;
743 	struct sctp_nets *net;
744 
745 	asoc = &stcb->asoc;
746 
747 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
748 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
749 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
750 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
751 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
752 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
753 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
754 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
755 	}
756 }
757 
758 /*
759  * a list of sizes based on typical mtu's, used only if next hop size not
760  * returned.
761  */
762 static uint32_t sctp_mtu_sizes[] = {
763 	68,
764 	296,
765 	508,
766 	512,
767 	544,
768 	576,
769 	1006,
770 	1492,
771 	1500,
772 	1536,
773 	2002,
774 	2048,
775 	4352,
776 	4464,
777 	8166,
778 	17914,
779 	32000,
780 	65535
781 };
782 
783 /*
784  * Return the largest MTU smaller than val. If there is no
785  * entry, just return val.
786  */
787 uint32_t
788 sctp_get_prev_mtu(uint32_t val)
789 {
790 	uint32_t i;
791 
792 	if (val <= sctp_mtu_sizes[0]) {
793 		return (val);
794 	}
795 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
796 		if (val <= sctp_mtu_sizes[i]) {
797 			break;
798 		}
799 	}
800 	return (sctp_mtu_sizes[i - 1]);
801 }
802 
803 /*
804  * Return the smallest MTU larger than val. If there is no
805  * entry, just return val.
806  */
807 uint32_t
808 sctp_get_next_mtu(uint32_t val)
809 {
810 	/* select another MTU that is just bigger than this one */
811 	uint32_t i;
812 
813 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
814 		if (val < sctp_mtu_sizes[i]) {
815 			return (sctp_mtu_sizes[i]);
816 		}
817 	}
818 	return (val);
819 }
820 
821 void
822 sctp_fill_random_store(struct sctp_pcb *m)
823 {
824 	/*
825 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
826 	 * our counter. The result becomes our good random numbers and we
827 	 * then setup to give these out. Note that we do no locking to
828 	 * protect this. This is ok, since if competing folks call this we
829 	 * will get more gobbled gook in the random store which is what we
830 	 * want. There is a danger that two guys will use the same random
831 	 * numbers, but thats ok too since that is random as well :->
832 	 */
833 	m->store_at = 0;
834 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
835 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
836 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
837 	m->random_counter++;
838 }
839 
840 uint32_t
841 sctp_select_initial_TSN(struct sctp_pcb *inp)
842 {
843 	/*
844 	 * A true implementation should use random selection process to get
845 	 * the initial stream sequence number, using RFC1750 as a good
846 	 * guideline
847 	 */
848 	uint32_t x, *xp;
849 	uint8_t *p;
850 	int store_at, new_store;
851 
852 	if (inp->initial_sequence_debug != 0) {
853 		uint32_t ret;
854 
855 		ret = inp->initial_sequence_debug;
856 		inp->initial_sequence_debug++;
857 		return (ret);
858 	}
859 retry:
860 	store_at = inp->store_at;
861 	new_store = store_at + sizeof(uint32_t);
862 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
863 		new_store = 0;
864 	}
865 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
866 		goto retry;
867 	}
868 	if (new_store == 0) {
869 		/* Refill the random store */
870 		sctp_fill_random_store(inp);
871 	}
872 	p = &inp->random_store[store_at];
873 	xp = (uint32_t *)p;
874 	x = *xp;
875 	return (x);
876 }
877 
878 uint32_t
879 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
880 {
881 	uint32_t x;
882 	struct timeval now;
883 
884 	if (check) {
885 		(void)SCTP_GETTIME_TIMEVAL(&now);
886 	}
887 	for (;;) {
888 		x = sctp_select_initial_TSN(&inp->sctp_ep);
889 		if (x == 0) {
890 			/* we never use 0 */
891 			continue;
892 		}
893 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
894 			break;
895 		}
896 	}
897 	return (x);
898 }
899 
900 int32_t
901 sctp_map_assoc_state(int kernel_state)
902 {
903 	int32_t user_state;
904 
905 	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
906 		user_state = SCTP_CLOSED;
907 	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
908 		user_state = SCTP_SHUTDOWN_PENDING;
909 	} else {
910 		switch (kernel_state & SCTP_STATE_MASK) {
911 		case SCTP_STATE_EMPTY:
912 			user_state = SCTP_CLOSED;
913 			break;
914 		case SCTP_STATE_INUSE:
915 			user_state = SCTP_CLOSED;
916 			break;
917 		case SCTP_STATE_COOKIE_WAIT:
918 			user_state = SCTP_COOKIE_WAIT;
919 			break;
920 		case SCTP_STATE_COOKIE_ECHOED:
921 			user_state = SCTP_COOKIE_ECHOED;
922 			break;
923 		case SCTP_STATE_OPEN:
924 			user_state = SCTP_ESTABLISHED;
925 			break;
926 		case SCTP_STATE_SHUTDOWN_SENT:
927 			user_state = SCTP_SHUTDOWN_SENT;
928 			break;
929 		case SCTP_STATE_SHUTDOWN_RECEIVED:
930 			user_state = SCTP_SHUTDOWN_RECEIVED;
931 			break;
932 		case SCTP_STATE_SHUTDOWN_ACK_SENT:
933 			user_state = SCTP_SHUTDOWN_ACK_SENT;
934 			break;
935 		default:
936 			user_state = SCTP_CLOSED;
937 			break;
938 		}
939 	}
940 	return (user_state);
941 }
942 
943 int
944 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
945     uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
946 {
947 	struct sctp_association *asoc;
948 
949 	/*
950 	 * Anything set to zero is taken care of by the allocation routine's
951 	 * bzero
952 	 */
953 
954 	/*
955 	 * Up front select what scoping to apply on addresses I tell my peer
956 	 * Not sure what to do with these right now, we will need to come up
957 	 * with a way to set them. We may need to pass them through from the
958 	 * caller in the sctp_aloc_assoc() function.
959 	 */
960 	int i;
961 #if defined(SCTP_DETAILED_STR_STATS)
962 	int j;
963 #endif
964 
965 	asoc = &stcb->asoc;
966 	/* init all variables to a known value. */
967 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
968 	asoc->max_burst = inp->sctp_ep.max_burst;
969 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
970 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
971 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
972 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
973 	asoc->ecn_supported = inp->ecn_supported;
974 	asoc->prsctp_supported = inp->prsctp_supported;
975 	asoc->idata_supported = inp->idata_supported;
976 	asoc->auth_supported = inp->auth_supported;
977 	asoc->asconf_supported = inp->asconf_supported;
978 	asoc->reconfig_supported = inp->reconfig_supported;
979 	asoc->nrsack_supported = inp->nrsack_supported;
980 	asoc->pktdrop_supported = inp->pktdrop_supported;
981 	asoc->idata_supported = inp->idata_supported;
982 	asoc->sctp_cmt_pf = (uint8_t)0;
983 	asoc->sctp_frag_point = inp->sctp_frag_point;
984 	asoc->sctp_features = inp->sctp_features;
985 	asoc->default_dscp = inp->sctp_ep.default_dscp;
986 	asoc->max_cwnd = inp->max_cwnd;
987 #ifdef INET6
988 	if (inp->sctp_ep.default_flowlabel) {
989 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
990 	} else {
991 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
992 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
993 			asoc->default_flowlabel &= 0x000fffff;
994 			asoc->default_flowlabel |= 0x80000000;
995 		} else {
996 			asoc->default_flowlabel = 0;
997 		}
998 	}
999 #endif
1000 	asoc->sb_send_resv = 0;
1001 	if (override_tag) {
1002 		asoc->my_vtag = override_tag;
1003 	} else {
1004 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1005 	}
1006 	/* Get the nonce tags */
1007 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1008 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1009 	asoc->vrf_id = vrf_id;
1010 
1011 #ifdef SCTP_ASOCLOG_OF_TSNS
1012 	asoc->tsn_in_at = 0;
1013 	asoc->tsn_out_at = 0;
1014 	asoc->tsn_in_wrapped = 0;
1015 	asoc->tsn_out_wrapped = 0;
1016 	asoc->cumack_log_at = 0;
1017 	asoc->cumack_log_atsnt = 0;
1018 #endif
1019 #ifdef SCTP_FS_SPEC_LOG
1020 	asoc->fs_index = 0;
1021 #endif
1022 	asoc->refcnt = 0;
1023 	asoc->assoc_up_sent = 0;
1024 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1025 	    sctp_select_initial_TSN(&inp->sctp_ep);
1026 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1027 	/* we are optimisitic here */
1028 	asoc->peer_supports_nat = 0;
1029 	asoc->sent_queue_retran_cnt = 0;
1030 
1031 	/* for CMT */
1032 	asoc->last_net_cmt_send_started = NULL;
1033 
1034 	/* This will need to be adjusted */
1035 	asoc->last_acked_seq = asoc->init_seq_number - 1;
1036 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1037 	asoc->asconf_seq_in = asoc->last_acked_seq;
1038 
1039 	/* here we are different, we hold the next one we expect */
1040 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1041 
1042 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1043 	asoc->initial_rto = inp->sctp_ep.initial_rto;
1044 
1045 	asoc->max_init_times = inp->sctp_ep.max_init_times;
1046 	asoc->max_send_times = inp->sctp_ep.max_send_times;
1047 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1048 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1049 	asoc->free_chunk_cnt = 0;
1050 
1051 	asoc->iam_blocking = 0;
1052 	asoc->context = inp->sctp_context;
1053 	asoc->local_strreset_support = inp->local_strreset_support;
1054 	asoc->def_send = inp->def_send;
1055 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1056 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1057 	asoc->pr_sctp_cnt = 0;
1058 	asoc->total_output_queue_size = 0;
1059 
1060 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1061 		asoc->scope.ipv6_addr_legal = 1;
1062 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1063 			asoc->scope.ipv4_addr_legal = 1;
1064 		} else {
1065 			asoc->scope.ipv4_addr_legal = 0;
1066 		}
1067 	} else {
1068 		asoc->scope.ipv6_addr_legal = 0;
1069 		asoc->scope.ipv4_addr_legal = 1;
1070 	}
1071 
1072 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1073 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1074 
1075 	asoc->smallest_mtu = inp->sctp_frag_point;
1076 	asoc->minrto = inp->sctp_ep.sctp_minrto;
1077 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1078 
1079 	asoc->stream_locked_on = 0;
1080 	asoc->ecn_echo_cnt_onq = 0;
1081 	asoc->stream_locked = 0;
1082 
1083 	asoc->send_sack = 1;
1084 
1085 	LIST_INIT(&asoc->sctp_restricted_addrs);
1086 
1087 	TAILQ_INIT(&asoc->nets);
1088 	TAILQ_INIT(&asoc->pending_reply_queue);
1089 	TAILQ_INIT(&asoc->asconf_ack_sent);
1090 	/* Setup to fill the hb random cache at first HB */
1091 	asoc->hb_random_idx = 4;
1092 
1093 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1094 
1095 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1096 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1097 
1098 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1099 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1100 
1101 	/*
1102 	 * Now the stream parameters, here we allocate space for all streams
1103 	 * that we request by default.
1104 	 */
1105 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1106 	    o_strms;
1107 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1108 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1109 	    SCTP_M_STRMO);
1110 	if (asoc->strmout == NULL) {
1111 		/* big trouble no memory */
1112 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1113 		return (ENOMEM);
1114 	}
1115 	for (i = 0; i < asoc->streamoutcnt; i++) {
1116 		/*
1117 		 * inbound side must be set to 0xffff, also NOTE when we get
1118 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1119 		 * count (streamoutcnt) but first check if we sent to any of
1120 		 * the upper streams that were dropped (if some were). Those
1121 		 * that were dropped must be notified to the upper layer as
1122 		 * failed to send.
1123 		 */
1124 		asoc->strmout[i].next_mid_ordered = 0;
1125 		asoc->strmout[i].next_mid_unordered = 0;
1126 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1127 		asoc->strmout[i].chunks_on_queues = 0;
1128 #if defined(SCTP_DETAILED_STR_STATS)
1129 		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1130 			asoc->strmout[i].abandoned_sent[j] = 0;
1131 			asoc->strmout[i].abandoned_unsent[j] = 0;
1132 		}
1133 #else
1134 		asoc->strmout[i].abandoned_sent[0] = 0;
1135 		asoc->strmout[i].abandoned_unsent[0] = 0;
1136 #endif
1137 		asoc->strmout[i].sid = i;
1138 		asoc->strmout[i].last_msg_incomplete = 0;
1139 		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1140 		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
1141 	}
1142 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1143 
1144 	/* Now the mapping array */
1145 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1146 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1147 	    SCTP_M_MAP);
1148 	if (asoc->mapping_array == NULL) {
1149 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1150 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1151 		return (ENOMEM);
1152 	}
1153 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1154 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1155 	    SCTP_M_MAP);
1156 	if (asoc->nr_mapping_array == NULL) {
1157 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1158 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1159 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1160 		return (ENOMEM);
1161 	}
1162 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1163 
1164 	/* Now the init of the other outqueues */
1165 	TAILQ_INIT(&asoc->free_chunks);
1166 	TAILQ_INIT(&asoc->control_send_queue);
1167 	TAILQ_INIT(&asoc->asconf_send_queue);
1168 	TAILQ_INIT(&asoc->send_queue);
1169 	TAILQ_INIT(&asoc->sent_queue);
1170 	TAILQ_INIT(&asoc->resetHead);
1171 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1172 	TAILQ_INIT(&asoc->asconf_queue);
1173 	/* authentication fields */
1174 	asoc->authinfo.random = NULL;
1175 	asoc->authinfo.active_keyid = 0;
1176 	asoc->authinfo.assoc_key = NULL;
1177 	asoc->authinfo.assoc_keyid = 0;
1178 	asoc->authinfo.recv_key = NULL;
1179 	asoc->authinfo.recv_keyid = 0;
1180 	LIST_INIT(&asoc->shared_keys);
1181 	asoc->marked_retrans = 0;
1182 	asoc->port = inp->sctp_ep.port;
1183 	asoc->timoinit = 0;
1184 	asoc->timodata = 0;
1185 	asoc->timosack = 0;
1186 	asoc->timoshutdown = 0;
1187 	asoc->timoheartbeat = 0;
1188 	asoc->timocookie = 0;
1189 	asoc->timoshutdownack = 0;
1190 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1191 	asoc->discontinuity_time = asoc->start_time;
1192 	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1193 		asoc->abandoned_unsent[i] = 0;
1194 		asoc->abandoned_sent[i] = 0;
1195 	}
1196 	/*
1197 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1198 	 * freed later when the association is freed.
1199 	 */
1200 	return (0);
1201 }
1202 
1203 void
1204 sctp_print_mapping_array(struct sctp_association *asoc)
1205 {
1206 	unsigned int i, limit;
1207 
1208 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1209 	    asoc->mapping_array_size,
1210 	    asoc->mapping_array_base_tsn,
1211 	    asoc->cumulative_tsn,
1212 	    asoc->highest_tsn_inside_map,
1213 	    asoc->highest_tsn_inside_nr_map);
1214 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1215 		if (asoc->mapping_array[limit - 1] != 0) {
1216 			break;
1217 		}
1218 	}
1219 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1220 	for (i = 0; i < limit; i++) {
1221 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1222 	}
1223 	if (limit % 16)
1224 		SCTP_PRINTF("\n");
1225 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1226 		if (asoc->nr_mapping_array[limit - 1]) {
1227 			break;
1228 		}
1229 	}
1230 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1231 	for (i = 0; i < limit; i++) {
1232 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1233 	}
1234 	if (limit % 16)
1235 		SCTP_PRINTF("\n");
1236 }
1237 
1238 int
1239 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1240 {
1241 	/* mapping array needs to grow */
1242 	uint8_t *new_array1, *new_array2;
1243 	uint32_t new_size;
1244 
1245 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1246 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1247 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1248 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1249 		/* can't get more, forget it */
1250 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1251 		if (new_array1) {
1252 			SCTP_FREE(new_array1, SCTP_M_MAP);
1253 		}
1254 		if (new_array2) {
1255 			SCTP_FREE(new_array2, SCTP_M_MAP);
1256 		}
1257 		return (-1);
1258 	}
1259 	memset(new_array1, 0, new_size);
1260 	memset(new_array2, 0, new_size);
1261 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1262 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1263 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1264 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1265 	asoc->mapping_array = new_array1;
1266 	asoc->nr_mapping_array = new_array2;
1267 	asoc->mapping_array_size = new_size;
1268 	return (0);
1269 }
1270 
1271 
1272 static void
1273 sctp_iterator_work(struct sctp_iterator *it)
1274 {
1275 	int iteration_count = 0;
1276 	int inp_skip = 0;
1277 	int first_in = 1;
1278 	struct sctp_inpcb *tinp;
1279 
1280 	SCTP_INP_INFO_RLOCK();
1281 	SCTP_ITERATOR_LOCK();
1282 	sctp_it_ctl.cur_it = it;
1283 	if (it->inp) {
1284 		SCTP_INP_RLOCK(it->inp);
1285 		SCTP_INP_DECR_REF(it->inp);
1286 	}
1287 	if (it->inp == NULL) {
1288 		/* iterator is complete */
1289 done_with_iterator:
1290 		sctp_it_ctl.cur_it = NULL;
1291 		SCTP_ITERATOR_UNLOCK();
1292 		SCTP_INP_INFO_RUNLOCK();
1293 		if (it->function_atend != NULL) {
1294 			(*it->function_atend) (it->pointer, it->val);
1295 		}
1296 		SCTP_FREE(it, SCTP_M_ITER);
1297 		return;
1298 	}
1299 select_a_new_ep:
1300 	if (first_in) {
1301 		first_in = 0;
1302 	} else {
1303 		SCTP_INP_RLOCK(it->inp);
1304 	}
1305 	while (((it->pcb_flags) &&
1306 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1307 	    ((it->pcb_features) &&
1308 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1309 		/* endpoint flags or features don't match, so keep looking */
1310 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1311 			SCTP_INP_RUNLOCK(it->inp);
1312 			goto done_with_iterator;
1313 		}
1314 		tinp = it->inp;
1315 		it->inp = LIST_NEXT(it->inp, sctp_list);
1316 		SCTP_INP_RUNLOCK(tinp);
1317 		if (it->inp == NULL) {
1318 			goto done_with_iterator;
1319 		}
1320 		SCTP_INP_RLOCK(it->inp);
1321 	}
1322 	/* now go through each assoc which is in the desired state */
1323 	if (it->done_current_ep == 0) {
1324 		if (it->function_inp != NULL)
1325 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1326 		it->done_current_ep = 1;
1327 	}
1328 	if (it->stcb == NULL) {
1329 		/* run the per instance function */
1330 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1331 	}
1332 	if ((inp_skip) || it->stcb == NULL) {
1333 		if (it->function_inp_end != NULL) {
1334 			inp_skip = (*it->function_inp_end) (it->inp,
1335 			    it->pointer,
1336 			    it->val);
1337 		}
1338 		SCTP_INP_RUNLOCK(it->inp);
1339 		goto no_stcb;
1340 	}
1341 	while (it->stcb) {
1342 		SCTP_TCB_LOCK(it->stcb);
1343 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1344 			/* not in the right state... keep looking */
1345 			SCTP_TCB_UNLOCK(it->stcb);
1346 			goto next_assoc;
1347 		}
1348 		/* see if we have limited out the iterator loop */
1349 		iteration_count++;
1350 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1351 			/* Pause to let others grab the lock */
1352 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1353 			SCTP_TCB_UNLOCK(it->stcb);
1354 			SCTP_INP_INCR_REF(it->inp);
1355 			SCTP_INP_RUNLOCK(it->inp);
1356 			SCTP_ITERATOR_UNLOCK();
1357 			SCTP_INP_INFO_RUNLOCK();
1358 			SCTP_INP_INFO_RLOCK();
1359 			SCTP_ITERATOR_LOCK();
1360 			if (sctp_it_ctl.iterator_flags) {
1361 				/* We won't be staying here */
1362 				SCTP_INP_DECR_REF(it->inp);
1363 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1364 				if (sctp_it_ctl.iterator_flags &
1365 				    SCTP_ITERATOR_STOP_CUR_IT) {
1366 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1367 					goto done_with_iterator;
1368 				}
1369 				if (sctp_it_ctl.iterator_flags &
1370 				    SCTP_ITERATOR_STOP_CUR_INP) {
1371 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1372 					goto no_stcb;
1373 				}
1374 				/* If we reach here huh? */
1375 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1376 				    sctp_it_ctl.iterator_flags);
1377 				sctp_it_ctl.iterator_flags = 0;
1378 			}
1379 			SCTP_INP_RLOCK(it->inp);
1380 			SCTP_INP_DECR_REF(it->inp);
1381 			SCTP_TCB_LOCK(it->stcb);
1382 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1383 			iteration_count = 0;
1384 		}
1385 		/* run function on this one */
1386 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1387 
1388 		/*
1389 		 * we lie here, it really needs to have its own type but
1390 		 * first I must verify that this won't effect things :-0
1391 		 */
1392 		if (it->no_chunk_output == 0)
1393 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1394 
1395 		SCTP_TCB_UNLOCK(it->stcb);
1396 next_assoc:
1397 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1398 		if (it->stcb == NULL) {
1399 			/* Run last function */
1400 			if (it->function_inp_end != NULL) {
1401 				inp_skip = (*it->function_inp_end) (it->inp,
1402 				    it->pointer,
1403 				    it->val);
1404 			}
1405 		}
1406 	}
1407 	SCTP_INP_RUNLOCK(it->inp);
1408 no_stcb:
1409 	/* done with all assocs on this endpoint, move on to next endpoint */
1410 	it->done_current_ep = 0;
1411 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1412 		it->inp = NULL;
1413 	} else {
1414 		it->inp = LIST_NEXT(it->inp, sctp_list);
1415 	}
1416 	if (it->inp == NULL) {
1417 		goto done_with_iterator;
1418 	}
1419 	goto select_a_new_ep;
1420 }
1421 
1422 void
1423 sctp_iterator_worker(void)
1424 {
1425 	struct sctp_iterator *it, *nit;
1426 
1427 	/* This function is called with the WQ lock in place */
1428 
1429 	sctp_it_ctl.iterator_running = 1;
1430 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1431 		/* now lets work on this one */
1432 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1433 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1434 		CURVNET_SET(it->vn);
1435 		sctp_iterator_work(it);
1436 		CURVNET_RESTORE();
1437 		SCTP_IPI_ITERATOR_WQ_LOCK();
1438 		/* sa_ignore FREED_MEMORY */
1439 	}
1440 	sctp_it_ctl.iterator_running = 0;
1441 	return;
1442 }
1443 
1444 
1445 static void
1446 sctp_handle_addr_wq(void)
1447 {
1448 	/* deal with the ADDR wq from the rtsock calls */
1449 	struct sctp_laddr *wi, *nwi;
1450 	struct sctp_asconf_iterator *asc;
1451 
1452 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1453 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1454 	if (asc == NULL) {
1455 		/* Try later, no memory */
1456 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1457 		    (struct sctp_inpcb *)NULL,
1458 		    (struct sctp_tcb *)NULL,
1459 		    (struct sctp_nets *)NULL);
1460 		return;
1461 	}
1462 	LIST_INIT(&asc->list_of_work);
1463 	asc->cnt = 0;
1464 
1465 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1466 		LIST_REMOVE(wi, sctp_nxt_addr);
1467 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1468 		asc->cnt++;
1469 	}
1470 
1471 	if (asc->cnt == 0) {
1472 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1473 	} else {
1474 		int ret;
1475 
1476 		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
1477 		    sctp_asconf_iterator_stcb,
1478 		    NULL,	/* No ep end for boundall */
1479 		    SCTP_PCB_FLAGS_BOUNDALL,
1480 		    SCTP_PCB_ANY_FEATURES,
1481 		    SCTP_ASOC_ANY_STATE,
1482 		    (void *)asc, 0,
1483 		    sctp_asconf_iterator_end, NULL, 0);
1484 		if (ret) {
1485 			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
1486 			/*
1487 			 * Freeing if we are stopping or put back on the
1488 			 * addr_wq.
1489 			 */
1490 			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
1491 				sctp_asconf_iterator_end(asc, 0);
1492 			} else {
1493 				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
1494 					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
1495 				}
1496 				SCTP_FREE(asc, SCTP_M_ASC_IT);
1497 			}
1498 		}
1499 	}
1500 }
1501 
1502 void
1503 sctp_timeout_handler(void *t)
1504 {
1505 	struct sctp_inpcb *inp;
1506 	struct sctp_tcb *stcb;
1507 	struct sctp_nets *net;
1508 	struct sctp_timer *tmr;
1509 	struct mbuf *op_err;
1510 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1511 	struct socket *so;
1512 #endif
1513 	int did_output;
1514 	int type;
1515 
1516 	tmr = (struct sctp_timer *)t;
1517 	inp = (struct sctp_inpcb *)tmr->ep;
1518 	stcb = (struct sctp_tcb *)tmr->tcb;
1519 	net = (struct sctp_nets *)tmr->net;
1520 	CURVNET_SET((struct vnet *)tmr->vnet);
1521 	did_output = 1;
1522 
1523 #ifdef SCTP_AUDITING_ENABLED
1524 	sctp_audit_log(0xF0, (uint8_t)tmr->type);
1525 	sctp_auditing(3, inp, stcb, net);
1526 #endif
1527 
1528 	/* sanity checks... */
1529 	if (tmr->self != (void *)tmr) {
1530 		/*
1531 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1532 		 * (void *)tmr);
1533 		 */
1534 		CURVNET_RESTORE();
1535 		return;
1536 	}
1537 	tmr->stopped_from = 0xa001;
1538 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1539 		/*
1540 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1541 		 * tmr->type);
1542 		 */
1543 		CURVNET_RESTORE();
1544 		return;
1545 	}
1546 	tmr->stopped_from = 0xa002;
1547 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1548 		CURVNET_RESTORE();
1549 		return;
1550 	}
1551 	/* if this is an iterator timeout, get the struct and clear inp */
1552 	tmr->stopped_from = 0xa003;
1553 	if (inp) {
1554 		SCTP_INP_INCR_REF(inp);
1555 		if ((inp->sctp_socket == NULL) &&
1556 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1557 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1558 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1559 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1560 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1561 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1562 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1563 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1564 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))) {
1565 			SCTP_INP_DECR_REF(inp);
1566 			CURVNET_RESTORE();
1567 			return;
1568 		}
1569 	}
1570 	tmr->stopped_from = 0xa004;
1571 	if (stcb) {
1572 		atomic_add_int(&stcb->asoc.refcnt, 1);
1573 		if (stcb->asoc.state == 0) {
1574 			atomic_add_int(&stcb->asoc.refcnt, -1);
1575 			if (inp) {
1576 				SCTP_INP_DECR_REF(inp);
1577 			}
1578 			CURVNET_RESTORE();
1579 			return;
1580 		}
1581 	}
1582 	type = tmr->type;
1583 	tmr->stopped_from = 0xa005;
1584 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
1585 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1586 		if (inp) {
1587 			SCTP_INP_DECR_REF(inp);
1588 		}
1589 		if (stcb) {
1590 			atomic_add_int(&stcb->asoc.refcnt, -1);
1591 		}
1592 		CURVNET_RESTORE();
1593 		return;
1594 	}
1595 	tmr->stopped_from = 0xa006;
1596 
1597 	if (stcb) {
1598 		SCTP_TCB_LOCK(stcb);
1599 		atomic_add_int(&stcb->asoc.refcnt, -1);
1600 		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
1601 		    ((stcb->asoc.state == 0) ||
1602 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1603 			SCTP_TCB_UNLOCK(stcb);
1604 			if (inp) {
1605 				SCTP_INP_DECR_REF(inp);
1606 			}
1607 			CURVNET_RESTORE();
1608 			return;
1609 		}
1610 	} else if (inp != NULL) {
1611 		if (type != SCTP_TIMER_TYPE_INPKILL) {
1612 			SCTP_INP_WLOCK(inp);
1613 		}
1614 	} else {
1615 		SCTP_WQ_ADDR_LOCK();
1616 	}
1617 	/* record in stopped what t-o occurred */
1618 	tmr->stopped_from = type;
1619 
1620 	/* mark as being serviced now */
1621 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1622 		/*
1623 		 * Callout has been rescheduled.
1624 		 */
1625 		goto get_out;
1626 	}
1627 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1628 		/*
1629 		 * Not active, so no action.
1630 		 */
1631 		goto get_out;
1632 	}
1633 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1634 
1635 	/* call the handler for the appropriate timer type */
1636 	switch (type) {
1637 	case SCTP_TIMER_TYPE_ADDR_WQ:
1638 		sctp_handle_addr_wq();
1639 		break;
1640 	case SCTP_TIMER_TYPE_SEND:
1641 		if ((stcb == NULL) || (inp == NULL)) {
1642 			break;
1643 		}
1644 		SCTP_STAT_INCR(sctps_timodata);
1645 		stcb->asoc.timodata++;
1646 		stcb->asoc.num_send_timers_up--;
1647 		if (stcb->asoc.num_send_timers_up < 0) {
1648 			stcb->asoc.num_send_timers_up = 0;
1649 		}
1650 		SCTP_TCB_LOCK_ASSERT(stcb);
1651 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1652 			/* no need to unlock on tcb its gone */
1653 
1654 			goto out_decr;
1655 		}
1656 		SCTP_TCB_LOCK_ASSERT(stcb);
1657 #ifdef SCTP_AUDITING_ENABLED
1658 		sctp_auditing(4, inp, stcb, net);
1659 #endif
1660 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1661 		if ((stcb->asoc.num_send_timers_up == 0) &&
1662 		    (stcb->asoc.sent_queue_cnt > 0)) {
1663 			struct sctp_tmit_chunk *chk;
1664 
1665 			/*
1666 			 * safeguard. If there on some on the sent queue
1667 			 * somewhere but no timers running something is
1668 			 * wrong... so we start a timer on the first chunk
1669 			 * on the send queue on whatever net it is sent to.
1670 			 */
1671 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1672 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1673 			    chk->whoTo);
1674 		}
1675 		break;
1676 	case SCTP_TIMER_TYPE_INIT:
1677 		if ((stcb == NULL) || (inp == NULL)) {
1678 			break;
1679 		}
1680 		SCTP_STAT_INCR(sctps_timoinit);
1681 		stcb->asoc.timoinit++;
1682 		if (sctp_t1init_timer(inp, stcb, net)) {
1683 			/* no need to unlock on tcb its gone */
1684 			goto out_decr;
1685 		}
1686 		/* We do output but not here */
1687 		did_output = 0;
1688 		break;
1689 	case SCTP_TIMER_TYPE_RECV:
1690 		if ((stcb == NULL) || (inp == NULL)) {
1691 			break;
1692 		}
1693 		SCTP_STAT_INCR(sctps_timosack);
1694 		stcb->asoc.timosack++;
1695 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1696 #ifdef SCTP_AUDITING_ENABLED
1697 		sctp_auditing(4, inp, stcb, net);
1698 #endif
1699 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1700 		break;
1701 	case SCTP_TIMER_TYPE_SHUTDOWN:
1702 		if ((stcb == NULL) || (inp == NULL)) {
1703 			break;
1704 		}
1705 		if (sctp_shutdown_timer(inp, stcb, net)) {
1706 			/* no need to unlock on tcb its gone */
1707 			goto out_decr;
1708 		}
1709 		SCTP_STAT_INCR(sctps_timoshutdown);
1710 		stcb->asoc.timoshutdown++;
1711 #ifdef SCTP_AUDITING_ENABLED
1712 		sctp_auditing(4, inp, stcb, net);
1713 #endif
1714 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1715 		break;
1716 	case SCTP_TIMER_TYPE_HEARTBEAT:
1717 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1718 			break;
1719 		}
1720 		SCTP_STAT_INCR(sctps_timoheartbeat);
1721 		stcb->asoc.timoheartbeat++;
1722 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1723 			/* no need to unlock on tcb its gone */
1724 			goto out_decr;
1725 		}
1726 #ifdef SCTP_AUDITING_ENABLED
1727 		sctp_auditing(4, inp, stcb, net);
1728 #endif
1729 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1730 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1731 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1732 		}
1733 		break;
1734 	case SCTP_TIMER_TYPE_COOKIE:
1735 		if ((stcb == NULL) || (inp == NULL)) {
1736 			break;
1737 		}
1738 		if (sctp_cookie_timer(inp, stcb, net)) {
1739 			/* no need to unlock on tcb its gone */
1740 			goto out_decr;
1741 		}
1742 		SCTP_STAT_INCR(sctps_timocookie);
1743 		stcb->asoc.timocookie++;
1744 #ifdef SCTP_AUDITING_ENABLED
1745 		sctp_auditing(4, inp, stcb, net);
1746 #endif
1747 		/*
1748 		 * We consider T3 and Cookie timer pretty much the same with
1749 		 * respect to where from in chunk_output.
1750 		 */
1751 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1752 		break;
1753 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1754 		{
1755 			struct timeval tv;
1756 			int i, secret;
1757 
1758 			if (inp == NULL) {
1759 				break;
1760 			}
1761 			SCTP_STAT_INCR(sctps_timosecret);
1762 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1763 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1764 			inp->sctp_ep.last_secret_number =
1765 			    inp->sctp_ep.current_secret_number;
1766 			inp->sctp_ep.current_secret_number++;
1767 			if (inp->sctp_ep.current_secret_number >=
1768 			    SCTP_HOW_MANY_SECRETS) {
1769 				inp->sctp_ep.current_secret_number = 0;
1770 			}
1771 			secret = (int)inp->sctp_ep.current_secret_number;
1772 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1773 				inp->sctp_ep.secret_key[secret][i] =
1774 				    sctp_select_initial_TSN(&inp->sctp_ep);
1775 			}
1776 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1777 		}
1778 		did_output = 0;
1779 		break;
1780 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1781 		if ((stcb == NULL) || (inp == NULL)) {
1782 			break;
1783 		}
1784 		SCTP_STAT_INCR(sctps_timopathmtu);
1785 		sctp_pathmtu_timer(inp, stcb, net);
1786 		did_output = 0;
1787 		break;
1788 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1789 		if ((stcb == NULL) || (inp == NULL)) {
1790 			break;
1791 		}
1792 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1793 			/* no need to unlock on tcb its gone */
1794 			goto out_decr;
1795 		}
1796 		SCTP_STAT_INCR(sctps_timoshutdownack);
1797 		stcb->asoc.timoshutdownack++;
1798 #ifdef SCTP_AUDITING_ENABLED
1799 		sctp_auditing(4, inp, stcb, net);
1800 #endif
1801 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1802 		break;
1803 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1804 		if ((stcb == NULL) || (inp == NULL)) {
1805 			break;
1806 		}
1807 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1808 		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1809 		    "Shutdown guard timer expired");
1810 		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1811 		/* no need to unlock on tcb its gone */
1812 		goto out_decr;
1813 
1814 	case SCTP_TIMER_TYPE_STRRESET:
1815 		if ((stcb == NULL) || (inp == NULL)) {
1816 			break;
1817 		}
1818 		if (sctp_strreset_timer(inp, stcb, net)) {
1819 			/* no need to unlock on tcb its gone */
1820 			goto out_decr;
1821 		}
1822 		SCTP_STAT_INCR(sctps_timostrmrst);
1823 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1824 		break;
1825 	case SCTP_TIMER_TYPE_ASCONF:
1826 		if ((stcb == NULL) || (inp == NULL)) {
1827 			break;
1828 		}
1829 		if (sctp_asconf_timer(inp, stcb, net)) {
1830 			/* no need to unlock on tcb its gone */
1831 			goto out_decr;
1832 		}
1833 		SCTP_STAT_INCR(sctps_timoasconf);
1834 #ifdef SCTP_AUDITING_ENABLED
1835 		sctp_auditing(4, inp, stcb, net);
1836 #endif
1837 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1838 		break;
1839 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1840 		if ((stcb == NULL) || (inp == NULL)) {
1841 			break;
1842 		}
1843 		sctp_delete_prim_timer(inp, stcb, net);
1844 		SCTP_STAT_INCR(sctps_timodelprim);
1845 		break;
1846 
1847 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1848 		if ((stcb == NULL) || (inp == NULL)) {
1849 			break;
1850 		}
1851 		SCTP_STAT_INCR(sctps_timoautoclose);
1852 		sctp_autoclose_timer(inp, stcb, net);
1853 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1854 		did_output = 0;
1855 		break;
1856 	case SCTP_TIMER_TYPE_ASOCKILL:
1857 		if ((stcb == NULL) || (inp == NULL)) {
1858 			break;
1859 		}
1860 		SCTP_STAT_INCR(sctps_timoassockill);
1861 		/* Can we free it yet? */
1862 		SCTP_INP_DECR_REF(inp);
1863 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1864 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1865 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1866 		so = SCTP_INP_SO(inp);
1867 		atomic_add_int(&stcb->asoc.refcnt, 1);
1868 		SCTP_TCB_UNLOCK(stcb);
1869 		SCTP_SOCKET_LOCK(so, 1);
1870 		SCTP_TCB_LOCK(stcb);
1871 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1872 #endif
1873 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1874 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1875 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1876 		SCTP_SOCKET_UNLOCK(so, 1);
1877 #endif
1878 		/*
1879 		 * free asoc, always unlocks (or destroy's) so prevent
1880 		 * duplicate unlock or unlock of a free mtx :-0
1881 		 */
1882 		stcb = NULL;
1883 		goto out_no_decr;
1884 	case SCTP_TIMER_TYPE_INPKILL:
1885 		SCTP_STAT_INCR(sctps_timoinpkill);
1886 		if (inp == NULL) {
1887 			break;
1888 		}
1889 		/*
1890 		 * special case, take away our increment since WE are the
1891 		 * killer
1892 		 */
1893 		SCTP_INP_DECR_REF(inp);
1894 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1895 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1896 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1897 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1898 		inp = NULL;
1899 		goto out_no_decr;
1900 	default:
1901 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1902 		    type);
1903 		break;
1904 	}
1905 #ifdef SCTP_AUDITING_ENABLED
1906 	sctp_audit_log(0xF1, (uint8_t)type);
1907 	if (inp)
1908 		sctp_auditing(5, inp, stcb, net);
1909 #endif
1910 	if ((did_output) && stcb) {
1911 		/*
1912 		 * Now we need to clean up the control chunk chain if an
1913 		 * ECNE is on it. It must be marked as UNSENT again so next
1914 		 * call will continue to send it until such time that we get
1915 		 * a CWR, to remove it. It is, however, less likely that we
1916 		 * will find a ecn echo on the chain though.
1917 		 */
1918 		sctp_fix_ecn_echo(&stcb->asoc);
1919 	}
1920 get_out:
1921 	if (stcb) {
1922 		SCTP_TCB_UNLOCK(stcb);
1923 	} else if (inp != NULL) {
1924 		SCTP_INP_WUNLOCK(inp);
1925 	} else {
1926 		SCTP_WQ_ADDR_UNLOCK();
1927 	}
1928 
1929 out_decr:
1930 	if (inp) {
1931 		SCTP_INP_DECR_REF(inp);
1932 	}
1933 out_no_decr:
1934 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
1935 	CURVNET_RESTORE();
1936 }
1937 
1938 void
1939 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1940     struct sctp_nets *net)
1941 {
1942 	uint32_t to_ticks;
1943 	struct sctp_timer *tmr;
1944 
1945 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1946 		return;
1947 
1948 	tmr = NULL;
1949 	if (stcb) {
1950 		SCTP_TCB_LOCK_ASSERT(stcb);
1951 	}
1952 	switch (t_type) {
1953 	case SCTP_TIMER_TYPE_ADDR_WQ:
1954 		/* Only 1 tick away :-) */
1955 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1956 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1957 		break;
1958 	case SCTP_TIMER_TYPE_SEND:
1959 		/* Here we use the RTO timer */
1960 		{
1961 			int rto_val;
1962 
1963 			if ((stcb == NULL) || (net == NULL)) {
1964 				return;
1965 			}
1966 			tmr = &net->rxt_timer;
1967 			if (net->RTO == 0) {
1968 				rto_val = stcb->asoc.initial_rto;
1969 			} else {
1970 				rto_val = net->RTO;
1971 			}
1972 			to_ticks = MSEC_TO_TICKS(rto_val);
1973 		}
1974 		break;
1975 	case SCTP_TIMER_TYPE_INIT:
1976 		/*
1977 		 * Here we use the INIT timer default usually about 1
1978 		 * minute.
1979 		 */
1980 		if ((stcb == NULL) || (net == NULL)) {
1981 			return;
1982 		}
1983 		tmr = &net->rxt_timer;
1984 		if (net->RTO == 0) {
1985 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1986 		} else {
1987 			to_ticks = MSEC_TO_TICKS(net->RTO);
1988 		}
1989 		break;
1990 	case SCTP_TIMER_TYPE_RECV:
1991 		/*
1992 		 * Here we use the Delayed-Ack timer value from the inp
1993 		 * ususually about 200ms.
1994 		 */
1995 		if (stcb == NULL) {
1996 			return;
1997 		}
1998 		tmr = &stcb->asoc.dack_timer;
1999 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2000 		break;
2001 	case SCTP_TIMER_TYPE_SHUTDOWN:
2002 		/* Here we use the RTO of the destination. */
2003 		if ((stcb == NULL) || (net == NULL)) {
2004 			return;
2005 		}
2006 		if (net->RTO == 0) {
2007 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2008 		} else {
2009 			to_ticks = MSEC_TO_TICKS(net->RTO);
2010 		}
2011 		tmr = &net->rxt_timer;
2012 		break;
2013 	case SCTP_TIMER_TYPE_HEARTBEAT:
2014 		/*
2015 		 * the net is used here so that we can add in the RTO. Even
2016 		 * though we use a different timer. We also add the HB timer
2017 		 * PLUS a random jitter.
2018 		 */
2019 		if ((stcb == NULL) || (net == NULL)) {
2020 			return;
2021 		} else {
2022 			uint32_t rndval;
2023 			uint32_t jitter;
2024 
2025 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2026 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2027 				return;
2028 			}
2029 			if (net->RTO == 0) {
2030 				to_ticks = stcb->asoc.initial_rto;
2031 			} else {
2032 				to_ticks = net->RTO;
2033 			}
2034 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2035 			jitter = rndval % to_ticks;
2036 			if (jitter >= (to_ticks >> 1)) {
2037 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2038 			} else {
2039 				to_ticks = to_ticks - jitter;
2040 			}
2041 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2042 			    !(net->dest_state & SCTP_ADDR_PF)) {
2043 				to_ticks += net->heart_beat_delay;
2044 			}
2045 			/*
2046 			 * Now we must convert the to_ticks that are now in
2047 			 * ms to ticks.
2048 			 */
2049 			to_ticks = MSEC_TO_TICKS(to_ticks);
2050 			tmr = &net->hb_timer;
2051 		}
2052 		break;
2053 	case SCTP_TIMER_TYPE_COOKIE:
2054 		/*
2055 		 * Here we can use the RTO timer from the network since one
2056 		 * RTT was compelete. If a retran happened then we will be
2057 		 * using the RTO initial value.
2058 		 */
2059 		if ((stcb == NULL) || (net == NULL)) {
2060 			return;
2061 		}
2062 		if (net->RTO == 0) {
2063 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2064 		} else {
2065 			to_ticks = MSEC_TO_TICKS(net->RTO);
2066 		}
2067 		tmr = &net->rxt_timer;
2068 		break;
2069 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2070 		/*
2071 		 * nothing needed but the endpoint here ususually about 60
2072 		 * minutes.
2073 		 */
2074 		tmr = &inp->sctp_ep.signature_change;
2075 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2076 		break;
2077 	case SCTP_TIMER_TYPE_ASOCKILL:
2078 		if (stcb == NULL) {
2079 			return;
2080 		}
2081 		tmr = &stcb->asoc.strreset_timer;
2082 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2083 		break;
2084 	case SCTP_TIMER_TYPE_INPKILL:
2085 		/*
2086 		 * The inp is setup to die. We re-use the signature_chage
2087 		 * timer since that has stopped and we are in the GONE
2088 		 * state.
2089 		 */
2090 		tmr = &inp->sctp_ep.signature_change;
2091 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2092 		break;
2093 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2094 		/*
2095 		 * Here we use the value found in the EP for PMTU ususually
2096 		 * about 10 minutes.
2097 		 */
2098 		if ((stcb == NULL) || (net == NULL)) {
2099 			return;
2100 		}
2101 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2102 			return;
2103 		}
2104 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2105 		tmr = &net->pmtu_timer;
2106 		break;
2107 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2108 		/* Here we use the RTO of the destination */
2109 		if ((stcb == NULL) || (net == NULL)) {
2110 			return;
2111 		}
2112 		if (net->RTO == 0) {
2113 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2114 		} else {
2115 			to_ticks = MSEC_TO_TICKS(net->RTO);
2116 		}
2117 		tmr = &net->rxt_timer;
2118 		break;
2119 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2120 		/*
2121 		 * Here we use the endpoints shutdown guard timer usually
2122 		 * about 3 minutes.
2123 		 */
2124 		if (stcb == NULL) {
2125 			return;
2126 		}
2127 		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2128 			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2129 		} else {
2130 			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2131 		}
2132 		tmr = &stcb->asoc.shut_guard_timer;
2133 		break;
2134 	case SCTP_TIMER_TYPE_STRRESET:
2135 		/*
2136 		 * Here the timer comes from the stcb but its value is from
2137 		 * the net's RTO.
2138 		 */
2139 		if ((stcb == NULL) || (net == NULL)) {
2140 			return;
2141 		}
2142 		if (net->RTO == 0) {
2143 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2144 		} else {
2145 			to_ticks = MSEC_TO_TICKS(net->RTO);
2146 		}
2147 		tmr = &stcb->asoc.strreset_timer;
2148 		break;
2149 	case SCTP_TIMER_TYPE_ASCONF:
2150 		/*
2151 		 * Here the timer comes from the stcb but its value is from
2152 		 * the net's RTO.
2153 		 */
2154 		if ((stcb == NULL) || (net == NULL)) {
2155 			return;
2156 		}
2157 		if (net->RTO == 0) {
2158 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2159 		} else {
2160 			to_ticks = MSEC_TO_TICKS(net->RTO);
2161 		}
2162 		tmr = &stcb->asoc.asconf_timer;
2163 		break;
2164 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2165 		if ((stcb == NULL) || (net != NULL)) {
2166 			return;
2167 		}
2168 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2169 		tmr = &stcb->asoc.delete_prim_timer;
2170 		break;
2171 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2172 		if (stcb == NULL) {
2173 			return;
2174 		}
2175 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2176 			/*
2177 			 * Really an error since stcb is NOT set to
2178 			 * autoclose
2179 			 */
2180 			return;
2181 		}
2182 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2183 		tmr = &stcb->asoc.autoclose_timer;
2184 		break;
2185 	default:
2186 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2187 		    __func__, t_type);
2188 		return;
2189 		break;
2190 	}
2191 	if ((to_ticks <= 0) || (tmr == NULL)) {
2192 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2193 		    __func__, t_type, to_ticks, (void *)tmr);
2194 		return;
2195 	}
2196 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2197 		/*
2198 		 * we do NOT allow you to have it already running. if it is
2199 		 * we leave the current one up unchanged
2200 		 */
2201 		return;
2202 	}
2203 	/* At this point we can proceed */
2204 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2205 		stcb->asoc.num_send_timers_up++;
2206 	}
2207 	tmr->stopped_from = 0;
2208 	tmr->type = t_type;
2209 	tmr->ep = (void *)inp;
2210 	tmr->tcb = (void *)stcb;
2211 	tmr->net = (void *)net;
2212 	tmr->self = (void *)tmr;
2213 	tmr->vnet = (void *)curvnet;
2214 	tmr->ticks = sctp_get_tick_count();
2215 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2216 	return;
2217 }
2218 
2219 void
2220 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2221     struct sctp_nets *net, uint32_t from)
2222 {
2223 	struct sctp_timer *tmr;
2224 
2225 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2226 	    (inp == NULL))
2227 		return;
2228 
2229 	tmr = NULL;
2230 	if (stcb) {
2231 		SCTP_TCB_LOCK_ASSERT(stcb);
2232 	}
2233 	switch (t_type) {
2234 	case SCTP_TIMER_TYPE_ADDR_WQ:
2235 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2236 		break;
2237 	case SCTP_TIMER_TYPE_SEND:
2238 		if ((stcb == NULL) || (net == NULL)) {
2239 			return;
2240 		}
2241 		tmr = &net->rxt_timer;
2242 		break;
2243 	case SCTP_TIMER_TYPE_INIT:
2244 		if ((stcb == NULL) || (net == NULL)) {
2245 			return;
2246 		}
2247 		tmr = &net->rxt_timer;
2248 		break;
2249 	case SCTP_TIMER_TYPE_RECV:
2250 		if (stcb == NULL) {
2251 			return;
2252 		}
2253 		tmr = &stcb->asoc.dack_timer;
2254 		break;
2255 	case SCTP_TIMER_TYPE_SHUTDOWN:
2256 		if ((stcb == NULL) || (net == NULL)) {
2257 			return;
2258 		}
2259 		tmr = &net->rxt_timer;
2260 		break;
2261 	case SCTP_TIMER_TYPE_HEARTBEAT:
2262 		if ((stcb == NULL) || (net == NULL)) {
2263 			return;
2264 		}
2265 		tmr = &net->hb_timer;
2266 		break;
2267 	case SCTP_TIMER_TYPE_COOKIE:
2268 		if ((stcb == NULL) || (net == NULL)) {
2269 			return;
2270 		}
2271 		tmr = &net->rxt_timer;
2272 		break;
2273 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2274 		/* nothing needed but the endpoint here */
2275 		tmr = &inp->sctp_ep.signature_change;
2276 		/*
2277 		 * We re-use the newcookie timer for the INP kill timer. We
2278 		 * must assure that we do not kill it by accident.
2279 		 */
2280 		break;
2281 	case SCTP_TIMER_TYPE_ASOCKILL:
2282 		/*
2283 		 * Stop the asoc kill timer.
2284 		 */
2285 		if (stcb == NULL) {
2286 			return;
2287 		}
2288 		tmr = &stcb->asoc.strreset_timer;
2289 		break;
2290 
2291 	case SCTP_TIMER_TYPE_INPKILL:
2292 		/*
2293 		 * The inp is setup to die. We re-use the signature_chage
2294 		 * timer since that has stopped and we are in the GONE
2295 		 * state.
2296 		 */
2297 		tmr = &inp->sctp_ep.signature_change;
2298 		break;
2299 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2300 		if ((stcb == NULL) || (net == NULL)) {
2301 			return;
2302 		}
2303 		tmr = &net->pmtu_timer;
2304 		break;
2305 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2306 		if ((stcb == NULL) || (net == NULL)) {
2307 			return;
2308 		}
2309 		tmr = &net->rxt_timer;
2310 		break;
2311 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2312 		if (stcb == NULL) {
2313 			return;
2314 		}
2315 		tmr = &stcb->asoc.shut_guard_timer;
2316 		break;
2317 	case SCTP_TIMER_TYPE_STRRESET:
2318 		if (stcb == NULL) {
2319 			return;
2320 		}
2321 		tmr = &stcb->asoc.strreset_timer;
2322 		break;
2323 	case SCTP_TIMER_TYPE_ASCONF:
2324 		if (stcb == NULL) {
2325 			return;
2326 		}
2327 		tmr = &stcb->asoc.asconf_timer;
2328 		break;
2329 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2330 		if (stcb == NULL) {
2331 			return;
2332 		}
2333 		tmr = &stcb->asoc.delete_prim_timer;
2334 		break;
2335 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2336 		if (stcb == NULL) {
2337 			return;
2338 		}
2339 		tmr = &stcb->asoc.autoclose_timer;
2340 		break;
2341 	default:
2342 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2343 		    __func__, t_type);
2344 		break;
2345 	}
2346 	if (tmr == NULL) {
2347 		return;
2348 	}
2349 	if ((tmr->type != t_type) && tmr->type) {
2350 		/*
2351 		 * Ok we have a timer that is under joint use. Cookie timer
2352 		 * per chance with the SEND timer. We therefore are NOT
2353 		 * running the timer that the caller wants stopped.  So just
2354 		 * return.
2355 		 */
2356 		return;
2357 	}
2358 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2359 		stcb->asoc.num_send_timers_up--;
2360 		if (stcb->asoc.num_send_timers_up < 0) {
2361 			stcb->asoc.num_send_timers_up = 0;
2362 		}
2363 	}
2364 	tmr->self = NULL;
2365 	tmr->stopped_from = from;
2366 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2367 	return;
2368 }
2369 
2370 uint32_t
2371 sctp_calculate_len(struct mbuf *m)
2372 {
2373 	uint32_t tlen = 0;
2374 	struct mbuf *at;
2375 
2376 	at = m;
2377 	while (at) {
2378 		tlen += SCTP_BUF_LEN(at);
2379 		at = SCTP_BUF_NEXT(at);
2380 	}
2381 	return (tlen);
2382 }
2383 
2384 void
2385 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2386     struct sctp_association *asoc, uint32_t mtu)
2387 {
2388 	/*
2389 	 * Reset the P-MTU size on this association, this involves changing
2390 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2391 	 * allow the DF flag to be cleared.
2392 	 */
2393 	struct sctp_tmit_chunk *chk;
2394 	unsigned int eff_mtu, ovh;
2395 
2396 	asoc->smallest_mtu = mtu;
2397 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2398 		ovh = SCTP_MIN_OVERHEAD;
2399 	} else {
2400 		ovh = SCTP_MIN_V4_OVERHEAD;
2401 	}
2402 	eff_mtu = mtu - ovh;
2403 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2404 		if (chk->send_size > eff_mtu) {
2405 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2406 		}
2407 	}
2408 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2409 		if (chk->send_size > eff_mtu) {
2410 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2411 		}
2412 	}
2413 }
2414 
2415 
2416 /*
2417  * given an association and starting time of the current RTT period return
2418  * RTO in number of msecs net should point to the current network
2419  */
2420 
2421 uint32_t
2422 sctp_calculate_rto(struct sctp_tcb *stcb,
2423     struct sctp_association *asoc,
2424     struct sctp_nets *net,
2425     struct timeval *old,
2426     int rtt_from_sack)
2427 {
2428 	/*-
2429 	 * given an association and the starting time of the current RTT
2430 	 * period (in value1/value2) return RTO in number of msecs.
2431 	 */
2432 	int32_t rtt;		/* RTT in ms */
2433 	uint32_t new_rto;
2434 	int first_measure = 0;
2435 	struct timeval now;
2436 
2437 	/************************/
2438 	/* 1. calculate new RTT */
2439 	/************************/
2440 	/* get the current time */
2441 	if (stcb->asoc.use_precise_time) {
2442 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2443 	} else {
2444 		(void)SCTP_GETTIME_TIMEVAL(&now);
2445 	}
2446 	timevalsub(&now, old);
2447 	/* store the current RTT in us */
2448 	net->rtt = (uint64_t)1000000 *(uint64_t)now.tv_sec +
2449 	        (uint64_t)now.tv_usec;
2450 
2451 	/* compute rtt in ms */
2452 	rtt = (int32_t)(net->rtt / 1000);
2453 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2454 		/*
2455 		 * Tell the CC module that a new update has just occurred
2456 		 * from a sack
2457 		 */
2458 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2459 	}
2460 	/*
2461 	 * Do we need to determine the lan? We do this only on sacks i.e.
2462 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2463 	 */
2464 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2465 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2466 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2467 			net->lan_type = SCTP_LAN_INTERNET;
2468 		} else {
2469 			net->lan_type = SCTP_LAN_LOCAL;
2470 		}
2471 	}
2472 	/***************************/
2473 	/* 2. update RTTVAR & SRTT */
2474 	/***************************/
2475 	/*-
2476 	 * Compute the scaled average lastsa and the
2477 	 * scaled variance lastsv as described in van Jacobson
2478 	 * Paper "Congestion Avoidance and Control", Annex A.
2479 	 *
2480 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2481 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2482 	 */
2483 	if (net->RTO_measured) {
2484 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2485 		net->lastsa += rtt;
2486 		if (rtt < 0) {
2487 			rtt = -rtt;
2488 		}
2489 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2490 		net->lastsv += rtt;
2491 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2492 			rto_logging(net, SCTP_LOG_RTTVAR);
2493 		}
2494 	} else {
2495 		/* First RTO measurment */
2496 		net->RTO_measured = 1;
2497 		first_measure = 1;
2498 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2499 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2500 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2501 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2502 		}
2503 	}
2504 	if (net->lastsv == 0) {
2505 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2506 	}
2507 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2508 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2509 	    (stcb->asoc.sat_network_lockout == 0)) {
2510 		stcb->asoc.sat_network = 1;
2511 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2512 		stcb->asoc.sat_network = 0;
2513 		stcb->asoc.sat_network_lockout = 1;
2514 	}
2515 	/* bound it, per C6/C7 in Section 5.3.1 */
2516 	if (new_rto < stcb->asoc.minrto) {
2517 		new_rto = stcb->asoc.minrto;
2518 	}
2519 	if (new_rto > stcb->asoc.maxrto) {
2520 		new_rto = stcb->asoc.maxrto;
2521 	}
2522 	/* we are now returning the RTO */
2523 	return (new_rto);
2524 }
2525 
2526 /*
2527  * return a pointer to a contiguous piece of data from the given mbuf chain
2528  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2529  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2530  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2531  */
2532 caddr_t
2533 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t *in_ptr)
2534 {
2535 	uint32_t count;
2536 	uint8_t *ptr;
2537 
2538 	ptr = in_ptr;
2539 	if ((off < 0) || (len <= 0))
2540 		return (NULL);
2541 
2542 	/* find the desired start location */
2543 	while ((m != NULL) && (off > 0)) {
2544 		if (off < SCTP_BUF_LEN(m))
2545 			break;
2546 		off -= SCTP_BUF_LEN(m);
2547 		m = SCTP_BUF_NEXT(m);
2548 	}
2549 	if (m == NULL)
2550 		return (NULL);
2551 
2552 	/* is the current mbuf large enough (eg. contiguous)? */
2553 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2554 		return (mtod(m, caddr_t)+off);
2555 	} else {
2556 		/* else, it spans more than one mbuf, so save a temp copy... */
2557 		while ((m != NULL) && (len > 0)) {
2558 			count = min(SCTP_BUF_LEN(m) - off, len);
2559 			memcpy(ptr, mtod(m, caddr_t)+off, count);
2560 			len -= count;
2561 			ptr += count;
2562 			off = 0;
2563 			m = SCTP_BUF_NEXT(m);
2564 		}
2565 		if ((m == NULL) && (len > 0))
2566 			return (NULL);
2567 		else
2568 			return ((caddr_t)in_ptr);
2569 	}
2570 }
2571 
2572 
2573 
2574 struct sctp_paramhdr *
2575 sctp_get_next_param(struct mbuf *m,
2576     int offset,
2577     struct sctp_paramhdr *pull,
2578     int pull_limit)
2579 {
2580 	/* This just provides a typed signature to Peter's Pull routine */
2581 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2582 	    (uint8_t *)pull));
2583 }
2584 
2585 
2586 struct mbuf *
2587 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2588 {
2589 	struct mbuf *m_last;
2590 	caddr_t dp;
2591 
2592 	if (padlen > 3) {
2593 		return (NULL);
2594 	}
2595 	if (padlen <= M_TRAILINGSPACE(m)) {
2596 		/*
2597 		 * The easy way. We hope the majority of the time we hit
2598 		 * here :)
2599 		 */
2600 		m_last = m;
2601 	} else {
2602 		/* Hard way we must grow the mbuf chain */
2603 		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2604 		if (m_last == NULL) {
2605 			return (NULL);
2606 		}
2607 		SCTP_BUF_LEN(m_last) = 0;
2608 		SCTP_BUF_NEXT(m_last) = NULL;
2609 		SCTP_BUF_NEXT(m) = m_last;
2610 	}
2611 	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2612 	SCTP_BUF_LEN(m_last) += padlen;
2613 	memset(dp, 0, padlen);
2614 	return (m_last);
2615 }
2616 
2617 struct mbuf *
2618 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2619 {
2620 	/* find the last mbuf in chain and pad it */
2621 	struct mbuf *m_at;
2622 
2623 	if (last_mbuf != NULL) {
2624 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2625 	} else {
2626 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2627 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2628 				return (sctp_add_pad_tombuf(m_at, padval));
2629 			}
2630 		}
2631 	}
2632 	return (NULL);
2633 }
2634 
2635 static void
2636 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2637     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2638 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2639     SCTP_UNUSED
2640 #endif
2641 )
2642 {
2643 	struct mbuf *m_notify;
2644 	struct sctp_assoc_change *sac;
2645 	struct sctp_queued_to_read *control;
2646 	unsigned int notif_len;
2647 	uint16_t abort_len;
2648 	unsigned int i;
2649 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2650 	struct socket *so;
2651 #endif
2652 
2653 	if (stcb == NULL) {
2654 		return;
2655 	}
2656 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2657 		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2658 		if (abort != NULL) {
2659 			abort_len = ntohs(abort->ch.chunk_length);
2660 		} else {
2661 			abort_len = 0;
2662 		}
2663 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2664 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2665 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2666 			notif_len += abort_len;
2667 		}
2668 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2669 		if (m_notify == NULL) {
2670 			/* Retry with smaller value. */
2671 			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
2672 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2673 			if (m_notify == NULL) {
2674 				goto set_error;
2675 			}
2676 		}
2677 		SCTP_BUF_NEXT(m_notify) = NULL;
2678 		sac = mtod(m_notify, struct sctp_assoc_change *);
2679 		memset(sac, 0, notif_len);
2680 		sac->sac_type = SCTP_ASSOC_CHANGE;
2681 		sac->sac_flags = 0;
2682 		sac->sac_length = sizeof(struct sctp_assoc_change);
2683 		sac->sac_state = state;
2684 		sac->sac_error = error;
2685 		/* XXX verify these stream counts */
2686 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2687 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2688 		sac->sac_assoc_id = sctp_get_associd(stcb);
2689 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2690 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2691 				i = 0;
2692 				if (stcb->asoc.prsctp_supported == 1) {
2693 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2694 				}
2695 				if (stcb->asoc.auth_supported == 1) {
2696 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2697 				}
2698 				if (stcb->asoc.asconf_supported == 1) {
2699 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2700 				}
2701 				if (stcb->asoc.idata_supported == 1) {
2702 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
2703 				}
2704 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2705 				if (stcb->asoc.reconfig_supported == 1) {
2706 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2707 				}
2708 				sac->sac_length += i;
2709 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2710 				memcpy(sac->sac_info, abort, abort_len);
2711 				sac->sac_length += abort_len;
2712 			}
2713 		}
2714 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2715 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2716 		    0, 0, stcb->asoc.context, 0, 0, 0,
2717 		    m_notify);
2718 		if (control != NULL) {
2719 			control->length = SCTP_BUF_LEN(m_notify);
2720 			control->spec_flags = M_NOTIFICATION;
2721 			/* not that we need this */
2722 			control->tail_mbuf = m_notify;
2723 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2724 			    control,
2725 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2726 			    so_locked);
2727 		} else {
2728 			sctp_m_freem(m_notify);
2729 		}
2730 	}
2731 	/*
2732 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2733 	 * comes in.
2734 	 */
2735 set_error:
2736 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2737 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2738 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2739 		SOCK_LOCK(stcb->sctp_socket);
2740 		if (from_peer) {
2741 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2742 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2743 				stcb->sctp_socket->so_error = ECONNREFUSED;
2744 			} else {
2745 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2746 				stcb->sctp_socket->so_error = ECONNRESET;
2747 			}
2748 		} else {
2749 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2750 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2751 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2752 				stcb->sctp_socket->so_error = ETIMEDOUT;
2753 			} else {
2754 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2755 				stcb->sctp_socket->so_error = ECONNABORTED;
2756 			}
2757 		}
2758 		SOCK_UNLOCK(stcb->sctp_socket);
2759 	}
2760 	/* Wake ANY sleepers */
2761 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2762 	so = SCTP_INP_SO(stcb->sctp_ep);
2763 	if (!so_locked) {
2764 		atomic_add_int(&stcb->asoc.refcnt, 1);
2765 		SCTP_TCB_UNLOCK(stcb);
2766 		SCTP_SOCKET_LOCK(so, 1);
2767 		SCTP_TCB_LOCK(stcb);
2768 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2769 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2770 			SCTP_SOCKET_UNLOCK(so, 1);
2771 			return;
2772 		}
2773 	}
2774 #endif
2775 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2776 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2777 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2778 		socantrcvmore(stcb->sctp_socket);
2779 	}
2780 	sorwakeup(stcb->sctp_socket);
2781 	sowwakeup(stcb->sctp_socket);
2782 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2783 	if (!so_locked) {
2784 		SCTP_SOCKET_UNLOCK(so, 1);
2785 	}
2786 #endif
2787 }
2788 
2789 static void
2790 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2791     struct sockaddr *sa, uint32_t error, int so_locked
2792 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2793     SCTP_UNUSED
2794 #endif
2795 )
2796 {
2797 	struct mbuf *m_notify;
2798 	struct sctp_paddr_change *spc;
2799 	struct sctp_queued_to_read *control;
2800 
2801 	if ((stcb == NULL) ||
2802 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2803 		/* event not enabled */
2804 		return;
2805 	}
2806 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2807 	if (m_notify == NULL)
2808 		return;
2809 	SCTP_BUF_LEN(m_notify) = 0;
2810 	spc = mtod(m_notify, struct sctp_paddr_change *);
2811 	memset(spc, 0, sizeof(struct sctp_paddr_change));
2812 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2813 	spc->spc_flags = 0;
2814 	spc->spc_length = sizeof(struct sctp_paddr_change);
2815 	switch (sa->sa_family) {
2816 #ifdef INET
2817 	case AF_INET:
2818 #ifdef INET6
2819 		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2820 			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2821 			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2822 		} else {
2823 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2824 		}
2825 #else
2826 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2827 #endif
2828 		break;
2829 #endif
2830 #ifdef INET6
2831 	case AF_INET6:
2832 		{
2833 			struct sockaddr_in6 *sin6;
2834 
2835 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2836 
2837 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2838 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2839 				if (sin6->sin6_scope_id == 0) {
2840 					/* recover scope_id for user */
2841 					(void)sa6_recoverscope(sin6);
2842 				} else {
2843 					/* clear embedded scope_id for user */
2844 					in6_clearscope(&sin6->sin6_addr);
2845 				}
2846 			}
2847 			break;
2848 		}
2849 #endif
2850 	default:
2851 		/* TSNH */
2852 		break;
2853 	}
2854 	spc->spc_state = state;
2855 	spc->spc_error = error;
2856 	spc->spc_assoc_id = sctp_get_associd(stcb);
2857 
2858 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2859 	SCTP_BUF_NEXT(m_notify) = NULL;
2860 
2861 	/* append to socket */
2862 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2863 	    0, 0, stcb->asoc.context, 0, 0, 0,
2864 	    m_notify);
2865 	if (control == NULL) {
2866 		/* no memory */
2867 		sctp_m_freem(m_notify);
2868 		return;
2869 	}
2870 	control->length = SCTP_BUF_LEN(m_notify);
2871 	control->spec_flags = M_NOTIFICATION;
2872 	/* not that we need this */
2873 	control->tail_mbuf = m_notify;
2874 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2875 	    control,
2876 	    &stcb->sctp_socket->so_rcv, 1,
2877 	    SCTP_READ_LOCK_NOT_HELD,
2878 	    so_locked);
2879 }
2880 
2881 
2882 static void
2883 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2884     struct sctp_tmit_chunk *chk, int so_locked
2885 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2886     SCTP_UNUSED
2887 #endif
2888 )
2889 {
2890 	struct mbuf *m_notify;
2891 	struct sctp_send_failed *ssf;
2892 	struct sctp_send_failed_event *ssfe;
2893 	struct sctp_queued_to_read *control;
2894 	struct sctp_chunkhdr *chkhdr;
2895 	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
2896 
2897 	if ((stcb == NULL) ||
2898 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2899 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2900 		/* event not enabled */
2901 		return;
2902 	}
2903 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2904 		notifhdr_len = sizeof(struct sctp_send_failed_event);
2905 	} else {
2906 		notifhdr_len = sizeof(struct sctp_send_failed);
2907 	}
2908 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
2909 	if (m_notify == NULL)
2910 		/* no space left */
2911 		return;
2912 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
2913 	if (stcb->asoc.idata_supported) {
2914 		chkhdr_len = sizeof(struct sctp_idata_chunk);
2915 	} else {
2916 		chkhdr_len = sizeof(struct sctp_data_chunk);
2917 	}
2918 	/* Use some defaults in case we can't access the chunk header */
2919 	if (chk->send_size >= chkhdr_len) {
2920 		payload_len = chk->send_size - chkhdr_len;
2921 	} else {
2922 		payload_len = 0;
2923 	}
2924 	padding_len = 0;
2925 	if (chk->data != NULL) {
2926 		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
2927 		if (chkhdr != NULL) {
2928 			chk_len = ntohs(chkhdr->chunk_length);
2929 			if ((chk_len >= chkhdr_len) &&
2930 			    (chk->send_size >= chk_len) &&
2931 			    (chk->send_size - chk_len < 4)) {
2932 				padding_len = chk->send_size - chk_len;
2933 				payload_len = chk->send_size - chkhdr_len - padding_len;
2934 			}
2935 		}
2936 	}
2937 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2938 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2939 		memset(ssfe, 0, notifhdr_len);
2940 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2941 		if (sent) {
2942 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2943 		} else {
2944 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2945 		}
2946 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
2947 		ssfe->ssfe_error = error;
2948 		/* not exactly what the user sent in, but should be close :) */
2949 		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
2950 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2951 		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
2952 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2953 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2954 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2955 	} else {
2956 		ssf = mtod(m_notify, struct sctp_send_failed *);
2957 		memset(ssf, 0, notifhdr_len);
2958 		ssf->ssf_type = SCTP_SEND_FAILED;
2959 		if (sent) {
2960 			ssf->ssf_flags = SCTP_DATA_SENT;
2961 		} else {
2962 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2963 		}
2964 		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
2965 		ssf->ssf_error = error;
2966 		/* not exactly what the user sent in, but should be close :) */
2967 		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
2968 		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
2969 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2970 		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
2971 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2972 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2973 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2974 	}
2975 	if (chk->data != NULL) {
2976 		/* Trim off the sctp chunk header (it should be there) */
2977 		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
2978 			m_adj(chk->data, chkhdr_len);
2979 			m_adj(chk->data, -padding_len);
2980 			sctp_mbuf_crush(chk->data);
2981 			chk->send_size -= (chkhdr_len + padding_len);
2982 		}
2983 	}
2984 	SCTP_BUF_NEXT(m_notify) = chk->data;
2985 	/* Steal off the mbuf */
2986 	chk->data = NULL;
2987 	/*
2988 	 * For this case, we check the actual socket buffer, since the assoc
2989 	 * is going away we don't want to overfill the socket buffer for a
2990 	 * non-reader
2991 	 */
2992 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2993 		sctp_m_freem(m_notify);
2994 		return;
2995 	}
2996 	/* append to socket */
2997 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2998 	    0, 0, stcb->asoc.context, 0, 0, 0,
2999 	    m_notify);
3000 	if (control == NULL) {
3001 		/* no memory */
3002 		sctp_m_freem(m_notify);
3003 		return;
3004 	}
3005 	control->length = SCTP_BUF_LEN(m_notify);
3006 	control->spec_flags = M_NOTIFICATION;
3007 	/* not that we need this */
3008 	control->tail_mbuf = m_notify;
3009 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3010 	    control,
3011 	    &stcb->sctp_socket->so_rcv, 1,
3012 	    SCTP_READ_LOCK_NOT_HELD,
3013 	    so_locked);
3014 }
3015 
3016 
3017 static void
3018 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3019     struct sctp_stream_queue_pending *sp, int so_locked
3020 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3021     SCTP_UNUSED
3022 #endif
3023 )
3024 {
3025 	struct mbuf *m_notify;
3026 	struct sctp_send_failed *ssf;
3027 	struct sctp_send_failed_event *ssfe;
3028 	struct sctp_queued_to_read *control;
3029 	int notifhdr_len;
3030 
3031 	if ((stcb == NULL) ||
3032 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3033 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3034 		/* event not enabled */
3035 		return;
3036 	}
3037 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3038 		notifhdr_len = sizeof(struct sctp_send_failed_event);
3039 	} else {
3040 		notifhdr_len = sizeof(struct sctp_send_failed);
3041 	}
3042 	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
3043 	if (m_notify == NULL) {
3044 		/* no space left */
3045 		return;
3046 	}
3047 	SCTP_BUF_LEN(m_notify) = notifhdr_len;
3048 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3049 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3050 		memset(ssfe, 0, notifhdr_len);
3051 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3052 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3053 		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
3054 		ssfe->ssfe_error = error;
3055 		/* not exactly what the user sent in, but should be close :) */
3056 		ssfe->ssfe_info.snd_sid = sp->sid;
3057 		if (sp->some_taken) {
3058 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3059 		} else {
3060 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3061 		}
3062 		ssfe->ssfe_info.snd_ppid = sp->ppid;
3063 		ssfe->ssfe_info.snd_context = sp->context;
3064 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3065 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3066 	} else {
3067 		ssf = mtod(m_notify, struct sctp_send_failed *);
3068 		memset(ssf, 0, notifhdr_len);
3069 		ssf->ssf_type = SCTP_SEND_FAILED;
3070 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3071 		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
3072 		ssf->ssf_error = error;
3073 		/* not exactly what the user sent in, but should be close :) */
3074 		ssf->ssf_info.sinfo_stream = sp->sid;
3075 		ssf->ssf_info.sinfo_ssn = 0;
3076 		if (sp->some_taken) {
3077 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3078 		} else {
3079 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3080 		}
3081 		ssf->ssf_info.sinfo_ppid = sp->ppid;
3082 		ssf->ssf_info.sinfo_context = sp->context;
3083 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3084 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3085 	}
3086 	SCTP_BUF_NEXT(m_notify) = sp->data;
3087 
3088 	/* Steal off the mbuf */
3089 	sp->data = NULL;
3090 	/*
3091 	 * For this case, we check the actual socket buffer, since the assoc
3092 	 * is going away we don't want to overfill the socket buffer for a
3093 	 * non-reader
3094 	 */
3095 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3096 		sctp_m_freem(m_notify);
3097 		return;
3098 	}
3099 	/* append to socket */
3100 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3101 	    0, 0, stcb->asoc.context, 0, 0, 0,
3102 	    m_notify);
3103 	if (control == NULL) {
3104 		/* no memory */
3105 		sctp_m_freem(m_notify);
3106 		return;
3107 	}
3108 	control->length = SCTP_BUF_LEN(m_notify);
3109 	control->spec_flags = M_NOTIFICATION;
3110 	/* not that we need this */
3111 	control->tail_mbuf = m_notify;
3112 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3113 	    control,
3114 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3115 }
3116 
3117 
3118 
3119 static void
3120 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3121 {
3122 	struct mbuf *m_notify;
3123 	struct sctp_adaptation_event *sai;
3124 	struct sctp_queued_to_read *control;
3125 
3126 	if ((stcb == NULL) ||
3127 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3128 		/* event not enabled */
3129 		return;
3130 	}
3131 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3132 	if (m_notify == NULL)
3133 		/* no space left */
3134 		return;
3135 	SCTP_BUF_LEN(m_notify) = 0;
3136 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3137 	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3138 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3139 	sai->sai_flags = 0;
3140 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3141 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3142 	sai->sai_assoc_id = sctp_get_associd(stcb);
3143 
3144 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3145 	SCTP_BUF_NEXT(m_notify) = NULL;
3146 
3147 	/* append to socket */
3148 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3149 	    0, 0, stcb->asoc.context, 0, 0, 0,
3150 	    m_notify);
3151 	if (control == NULL) {
3152 		/* no memory */
3153 		sctp_m_freem(m_notify);
3154 		return;
3155 	}
3156 	control->length = SCTP_BUF_LEN(m_notify);
3157 	control->spec_flags = M_NOTIFICATION;
3158 	/* not that we need this */
3159 	control->tail_mbuf = m_notify;
3160 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3161 	    control,
3162 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3163 }
3164 
3165 /* This always must be called with the read-queue LOCKED in the INP */
3166 static void
3167 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3168     uint32_t val, int so_locked
3169 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3170     SCTP_UNUSED
3171 #endif
3172 )
3173 {
3174 	struct mbuf *m_notify;
3175 	struct sctp_pdapi_event *pdapi;
3176 	struct sctp_queued_to_read *control;
3177 	struct sockbuf *sb;
3178 
3179 	if ((stcb == NULL) ||
3180 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3181 		/* event not enabled */
3182 		return;
3183 	}
3184 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3185 		return;
3186 	}
3187 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3188 	if (m_notify == NULL)
3189 		/* no space left */
3190 		return;
3191 	SCTP_BUF_LEN(m_notify) = 0;
3192 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3193 	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3194 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3195 	pdapi->pdapi_flags = 0;
3196 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3197 	pdapi->pdapi_indication = error;
3198 	pdapi->pdapi_stream = (val >> 16);
3199 	pdapi->pdapi_seq = (val & 0x0000ffff);
3200 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3201 
3202 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3203 	SCTP_BUF_NEXT(m_notify) = NULL;
3204 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3205 	    0, 0, stcb->asoc.context, 0, 0, 0,
3206 	    m_notify);
3207 	if (control == NULL) {
3208 		/* no memory */
3209 		sctp_m_freem(m_notify);
3210 		return;
3211 	}
3212 	control->length = SCTP_BUF_LEN(m_notify);
3213 	control->spec_flags = M_NOTIFICATION;
3214 	/* not that we need this */
3215 	control->tail_mbuf = m_notify;
3216 	sb = &stcb->sctp_socket->so_rcv;
3217 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3218 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3219 	}
3220 	sctp_sballoc(stcb, sb, m_notify);
3221 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3222 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3223 	}
3224 	control->end_added = 1;
3225 	if (stcb->asoc.control_pdapi)
3226 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3227 	else {
3228 		/* we really should not see this case */
3229 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3230 	}
3231 	if (stcb->sctp_ep && stcb->sctp_socket) {
3232 		/* This should always be the case */
3233 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3234 		struct socket *so;
3235 
3236 		so = SCTP_INP_SO(stcb->sctp_ep);
3237 		if (!so_locked) {
3238 			atomic_add_int(&stcb->asoc.refcnt, 1);
3239 			SCTP_TCB_UNLOCK(stcb);
3240 			SCTP_SOCKET_LOCK(so, 1);
3241 			SCTP_TCB_LOCK(stcb);
3242 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3243 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3244 				SCTP_SOCKET_UNLOCK(so, 1);
3245 				return;
3246 			}
3247 		}
3248 #endif
3249 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3250 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3251 		if (!so_locked) {
3252 			SCTP_SOCKET_UNLOCK(so, 1);
3253 		}
3254 #endif
3255 	}
3256 }
3257 
3258 static void
3259 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3260 {
3261 	struct mbuf *m_notify;
3262 	struct sctp_shutdown_event *sse;
3263 	struct sctp_queued_to_read *control;
3264 
3265 	/*
3266 	 * For TCP model AND UDP connected sockets we will send an error up
3267 	 * when an SHUTDOWN completes
3268 	 */
3269 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3270 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3271 		/* mark socket closed for read/write and wakeup! */
3272 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3273 		struct socket *so;
3274 
3275 		so = SCTP_INP_SO(stcb->sctp_ep);
3276 		atomic_add_int(&stcb->asoc.refcnt, 1);
3277 		SCTP_TCB_UNLOCK(stcb);
3278 		SCTP_SOCKET_LOCK(so, 1);
3279 		SCTP_TCB_LOCK(stcb);
3280 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3281 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3282 			SCTP_SOCKET_UNLOCK(so, 1);
3283 			return;
3284 		}
3285 #endif
3286 		socantsendmore(stcb->sctp_socket);
3287 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3288 		SCTP_SOCKET_UNLOCK(so, 1);
3289 #endif
3290 	}
3291 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3292 		/* event not enabled */
3293 		return;
3294 	}
3295 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3296 	if (m_notify == NULL)
3297 		/* no space left */
3298 		return;
3299 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3300 	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3301 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3302 	sse->sse_flags = 0;
3303 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3304 	sse->sse_assoc_id = sctp_get_associd(stcb);
3305 
3306 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3307 	SCTP_BUF_NEXT(m_notify) = NULL;
3308 
3309 	/* append to socket */
3310 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3311 	    0, 0, stcb->asoc.context, 0, 0, 0,
3312 	    m_notify);
3313 	if (control == NULL) {
3314 		/* no memory */
3315 		sctp_m_freem(m_notify);
3316 		return;
3317 	}
3318 	control->length = SCTP_BUF_LEN(m_notify);
3319 	control->spec_flags = M_NOTIFICATION;
3320 	/* not that we need this */
3321 	control->tail_mbuf = m_notify;
3322 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3323 	    control,
3324 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3325 }
3326 
3327 static void
3328 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3329     int so_locked
3330 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3331     SCTP_UNUSED
3332 #endif
3333 )
3334 {
3335 	struct mbuf *m_notify;
3336 	struct sctp_sender_dry_event *event;
3337 	struct sctp_queued_to_read *control;
3338 
3339 	if ((stcb == NULL) ||
3340 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3341 		/* event not enabled */
3342 		return;
3343 	}
3344 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3345 	if (m_notify == NULL) {
3346 		/* no space left */
3347 		return;
3348 	}
3349 	SCTP_BUF_LEN(m_notify) = 0;
3350 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3351 	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3352 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3353 	event->sender_dry_flags = 0;
3354 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3355 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3356 
3357 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3358 	SCTP_BUF_NEXT(m_notify) = NULL;
3359 
3360 	/* append to socket */
3361 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3362 	    0, 0, stcb->asoc.context, 0, 0, 0,
3363 	    m_notify);
3364 	if (control == NULL) {
3365 		/* no memory */
3366 		sctp_m_freem(m_notify);
3367 		return;
3368 	}
3369 	control->length = SCTP_BUF_LEN(m_notify);
3370 	control->spec_flags = M_NOTIFICATION;
3371 	/* not that we need this */
3372 	control->tail_mbuf = m_notify;
3373 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3374 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3375 }
3376 
3377 
3378 void
3379 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3380 {
3381 	struct mbuf *m_notify;
3382 	struct sctp_queued_to_read *control;
3383 	struct sctp_stream_change_event *stradd;
3384 
3385 	if ((stcb == NULL) ||
3386 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3387 		/* event not enabled */
3388 		return;
3389 	}
3390 	if ((stcb->asoc.peer_req_out) && flag) {
3391 		/* Peer made the request, don't tell the local user */
3392 		stcb->asoc.peer_req_out = 0;
3393 		return;
3394 	}
3395 	stcb->asoc.peer_req_out = 0;
3396 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3397 	if (m_notify == NULL)
3398 		/* no space left */
3399 		return;
3400 	SCTP_BUF_LEN(m_notify) = 0;
3401 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3402 	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3403 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3404 	stradd->strchange_flags = flag;
3405 	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3406 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3407 	stradd->strchange_instrms = numberin;
3408 	stradd->strchange_outstrms = numberout;
3409 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3410 	SCTP_BUF_NEXT(m_notify) = NULL;
3411 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3412 		/* no space */
3413 		sctp_m_freem(m_notify);
3414 		return;
3415 	}
3416 	/* append to socket */
3417 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3418 	    0, 0, stcb->asoc.context, 0, 0, 0,
3419 	    m_notify);
3420 	if (control == NULL) {
3421 		/* no memory */
3422 		sctp_m_freem(m_notify);
3423 		return;
3424 	}
3425 	control->length = SCTP_BUF_LEN(m_notify);
3426 	control->spec_flags = M_NOTIFICATION;
3427 	/* not that we need this */
3428 	control->tail_mbuf = m_notify;
3429 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3430 	    control,
3431 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3432 }
3433 
3434 void
3435 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3436 {
3437 	struct mbuf *m_notify;
3438 	struct sctp_queued_to_read *control;
3439 	struct sctp_assoc_reset_event *strasoc;
3440 
3441 	if ((stcb == NULL) ||
3442 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3443 		/* event not enabled */
3444 		return;
3445 	}
3446 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3447 	if (m_notify == NULL)
3448 		/* no space left */
3449 		return;
3450 	SCTP_BUF_LEN(m_notify) = 0;
3451 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3452 	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3453 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3454 	strasoc->assocreset_flags = flag;
3455 	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3456 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3457 	strasoc->assocreset_local_tsn = sending_tsn;
3458 	strasoc->assocreset_remote_tsn = recv_tsn;
3459 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3460 	SCTP_BUF_NEXT(m_notify) = NULL;
3461 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3462 		/* no space */
3463 		sctp_m_freem(m_notify);
3464 		return;
3465 	}
3466 	/* append to socket */
3467 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3468 	    0, 0, stcb->asoc.context, 0, 0, 0,
3469 	    m_notify);
3470 	if (control == NULL) {
3471 		/* no memory */
3472 		sctp_m_freem(m_notify);
3473 		return;
3474 	}
3475 	control->length = SCTP_BUF_LEN(m_notify);
3476 	control->spec_flags = M_NOTIFICATION;
3477 	/* not that we need this */
3478 	control->tail_mbuf = m_notify;
3479 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3480 	    control,
3481 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3482 }
3483 
3484 
3485 
3486 static void
3487 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3488     int number_entries, uint16_t *list, int flag)
3489 {
3490 	struct mbuf *m_notify;
3491 	struct sctp_queued_to_read *control;
3492 	struct sctp_stream_reset_event *strreset;
3493 	int len;
3494 
3495 	if ((stcb == NULL) ||
3496 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3497 		/* event not enabled */
3498 		return;
3499 	}
3500 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3501 	if (m_notify == NULL)
3502 		/* no space left */
3503 		return;
3504 	SCTP_BUF_LEN(m_notify) = 0;
3505 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3506 	if (len > M_TRAILINGSPACE(m_notify)) {
3507 		/* never enough room */
3508 		sctp_m_freem(m_notify);
3509 		return;
3510 	}
3511 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3512 	memset(strreset, 0, len);
3513 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3514 	strreset->strreset_flags = flag;
3515 	strreset->strreset_length = len;
3516 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3517 	if (number_entries) {
3518 		int i;
3519 
3520 		for (i = 0; i < number_entries; i++) {
3521 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3522 		}
3523 	}
3524 	SCTP_BUF_LEN(m_notify) = len;
3525 	SCTP_BUF_NEXT(m_notify) = NULL;
3526 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3527 		/* no space */
3528 		sctp_m_freem(m_notify);
3529 		return;
3530 	}
3531 	/* append to socket */
3532 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3533 	    0, 0, stcb->asoc.context, 0, 0, 0,
3534 	    m_notify);
3535 	if (control == NULL) {
3536 		/* no memory */
3537 		sctp_m_freem(m_notify);
3538 		return;
3539 	}
3540 	control->length = SCTP_BUF_LEN(m_notify);
3541 	control->spec_flags = M_NOTIFICATION;
3542 	/* not that we need this */
3543 	control->tail_mbuf = m_notify;
3544 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3545 	    control,
3546 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3547 }
3548 
3549 
3550 static void
3551 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3552 {
3553 	struct mbuf *m_notify;
3554 	struct sctp_remote_error *sre;
3555 	struct sctp_queued_to_read *control;
3556 	unsigned int notif_len;
3557 	uint16_t chunk_len;
3558 
3559 	if ((stcb == NULL) ||
3560 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3561 		return;
3562 	}
3563 	if (chunk != NULL) {
3564 		chunk_len = ntohs(chunk->ch.chunk_length);
3565 	} else {
3566 		chunk_len = 0;
3567 	}
3568 	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
3569 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3570 	if (m_notify == NULL) {
3571 		/* Retry with smaller value. */
3572 		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
3573 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3574 		if (m_notify == NULL) {
3575 			return;
3576 		}
3577 	}
3578 	SCTP_BUF_NEXT(m_notify) = NULL;
3579 	sre = mtod(m_notify, struct sctp_remote_error *);
3580 	memset(sre, 0, notif_len);
3581 	sre->sre_type = SCTP_REMOTE_ERROR;
3582 	sre->sre_flags = 0;
3583 	sre->sre_length = sizeof(struct sctp_remote_error);
3584 	sre->sre_error = error;
3585 	sre->sre_assoc_id = sctp_get_associd(stcb);
3586 	if (notif_len > sizeof(struct sctp_remote_error)) {
3587 		memcpy(sre->sre_data, chunk, chunk_len);
3588 		sre->sre_length += chunk_len;
3589 	}
3590 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3591 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3592 	    0, 0, stcb->asoc.context, 0, 0, 0,
3593 	    m_notify);
3594 	if (control != NULL) {
3595 		control->length = SCTP_BUF_LEN(m_notify);
3596 		control->spec_flags = M_NOTIFICATION;
3597 		/* not that we need this */
3598 		control->tail_mbuf = m_notify;
3599 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3600 		    control,
3601 		    &stcb->sctp_socket->so_rcv, 1,
3602 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3603 	} else {
3604 		sctp_m_freem(m_notify);
3605 	}
3606 }
3607 
3608 
3609 void
3610 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3611     uint32_t error, void *data, int so_locked
3612 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3613     SCTP_UNUSED
3614 #endif
3615 )
3616 {
3617 	if ((stcb == NULL) ||
3618 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3619 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3620 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3621 		/* If the socket is gone we are out of here */
3622 		return;
3623 	}
3624 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3625 		return;
3626 	}
3627 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3628 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3629 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3630 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3631 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3632 			/* Don't report these in front states */
3633 			return;
3634 		}
3635 	}
3636 	switch (notification) {
3637 	case SCTP_NOTIFY_ASSOC_UP:
3638 		if (stcb->asoc.assoc_up_sent == 0) {
3639 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3640 			stcb->asoc.assoc_up_sent = 1;
3641 		}
3642 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3643 			sctp_notify_adaptation_layer(stcb);
3644 		}
3645 		if (stcb->asoc.auth_supported == 0) {
3646 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3647 			    NULL, so_locked);
3648 		}
3649 		break;
3650 	case SCTP_NOTIFY_ASSOC_DOWN:
3651 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3652 		break;
3653 	case SCTP_NOTIFY_INTERFACE_DOWN:
3654 		{
3655 			struct sctp_nets *net;
3656 
3657 			net = (struct sctp_nets *)data;
3658 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3659 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3660 			break;
3661 		}
3662 	case SCTP_NOTIFY_INTERFACE_UP:
3663 		{
3664 			struct sctp_nets *net;
3665 
3666 			net = (struct sctp_nets *)data;
3667 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3668 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3669 			break;
3670 		}
3671 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3672 		{
3673 			struct sctp_nets *net;
3674 
3675 			net = (struct sctp_nets *)data;
3676 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3677 			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3678 			break;
3679 		}
3680 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3681 		sctp_notify_send_failed2(stcb, error,
3682 		    (struct sctp_stream_queue_pending *)data, so_locked);
3683 		break;
3684 	case SCTP_NOTIFY_SENT_DG_FAIL:
3685 		sctp_notify_send_failed(stcb, 1, error,
3686 		    (struct sctp_tmit_chunk *)data, so_locked);
3687 		break;
3688 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3689 		sctp_notify_send_failed(stcb, 0, error,
3690 		    (struct sctp_tmit_chunk *)data, so_locked);
3691 		break;
3692 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3693 		{
3694 			uint32_t val;
3695 
3696 			val = *((uint32_t *)data);
3697 
3698 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3699 			break;
3700 		}
3701 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3702 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3703 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3704 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3705 		} else {
3706 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3707 		}
3708 		break;
3709 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3710 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3711 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3712 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3713 		} else {
3714 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3715 		}
3716 		break;
3717 	case SCTP_NOTIFY_ASSOC_RESTART:
3718 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3719 		if (stcb->asoc.auth_supported == 0) {
3720 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3721 			    NULL, so_locked);
3722 		}
3723 		break;
3724 	case SCTP_NOTIFY_STR_RESET_SEND:
3725 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_OUTGOING_SSN);
3726 		break;
3727 	case SCTP_NOTIFY_STR_RESET_RECV:
3728 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data), SCTP_STREAM_RESET_INCOMING);
3729 		break;
3730 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3731 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3732 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3733 		break;
3734 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3735 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3736 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3737 		break;
3738 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3739 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3740 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3741 		break;
3742 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3743 		sctp_notify_stream_reset(stcb, error, ((uint16_t *)data),
3744 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3745 		break;
3746 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3747 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3748 		    error, so_locked);
3749 		break;
3750 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3751 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3752 		    error, so_locked);
3753 		break;
3754 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3755 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3756 		    error, so_locked);
3757 		break;
3758 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3759 		sctp_notify_shutdown_event(stcb);
3760 		break;
3761 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3762 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3763 		    (uint16_t)(uintptr_t)data,
3764 		    so_locked);
3765 		break;
3766 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3767 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3768 		    (uint16_t)(uintptr_t)data,
3769 		    so_locked);
3770 		break;
3771 	case SCTP_NOTIFY_NO_PEER_AUTH:
3772 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3773 		    (uint16_t)(uintptr_t)data,
3774 		    so_locked);
3775 		break;
3776 	case SCTP_NOTIFY_SENDER_DRY:
3777 		sctp_notify_sender_dry_event(stcb, so_locked);
3778 		break;
3779 	case SCTP_NOTIFY_REMOTE_ERROR:
3780 		sctp_notify_remote_error(stcb, error, data);
3781 		break;
3782 	default:
3783 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3784 		    __func__, notification, notification);
3785 		break;
3786 	}			/* end switch */
3787 }
3788 
3789 void
3790 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3791 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3792     SCTP_UNUSED
3793 #endif
3794 )
3795 {
3796 	struct sctp_association *asoc;
3797 	struct sctp_stream_out *outs;
3798 	struct sctp_tmit_chunk *chk, *nchk;
3799 	struct sctp_stream_queue_pending *sp, *nsp;
3800 	int i;
3801 
3802 	if (stcb == NULL) {
3803 		return;
3804 	}
3805 	asoc = &stcb->asoc;
3806 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3807 		/* already being freed */
3808 		return;
3809 	}
3810 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3811 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3812 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3813 		return;
3814 	}
3815 	/* now through all the gunk freeing chunks */
3816 	if (holds_lock == 0) {
3817 		SCTP_TCB_SEND_LOCK(stcb);
3818 	}
3819 	/* sent queue SHOULD be empty */
3820 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3821 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3822 		asoc->sent_queue_cnt--;
3823 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3824 			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3825 				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3826 #ifdef INVARIANTS
3827 			} else {
3828 				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3829 #endif
3830 			}
3831 		}
3832 		if (chk->data != NULL) {
3833 			sctp_free_bufspace(stcb, asoc, chk, 1);
3834 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3835 			    error, chk, so_locked);
3836 			if (chk->data) {
3837 				sctp_m_freem(chk->data);
3838 				chk->data = NULL;
3839 			}
3840 		}
3841 		sctp_free_a_chunk(stcb, chk, so_locked);
3842 		/* sa_ignore FREED_MEMORY */
3843 	}
3844 	/* pending send queue SHOULD be empty */
3845 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3846 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3847 		asoc->send_queue_cnt--;
3848 		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
3849 			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
3850 #ifdef INVARIANTS
3851 		} else {
3852 			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
3853 #endif
3854 		}
3855 		if (chk->data != NULL) {
3856 			sctp_free_bufspace(stcb, asoc, chk, 1);
3857 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3858 			    error, chk, so_locked);
3859 			if (chk->data) {
3860 				sctp_m_freem(chk->data);
3861 				chk->data = NULL;
3862 			}
3863 		}
3864 		sctp_free_a_chunk(stcb, chk, so_locked);
3865 		/* sa_ignore FREED_MEMORY */
3866 	}
3867 	for (i = 0; i < asoc->streamoutcnt; i++) {
3868 		/* For each stream */
3869 		outs = &asoc->strmout[i];
3870 		/* clean up any sends there */
3871 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3872 			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
3873 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3874 			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock);
3875 			sctp_free_spbufspace(stcb, asoc, sp);
3876 			if (sp->data) {
3877 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3878 				    error, (void *)sp, so_locked);
3879 				if (sp->data) {
3880 					sctp_m_freem(sp->data);
3881 					sp->data = NULL;
3882 					sp->tail_mbuf = NULL;
3883 					sp->length = 0;
3884 				}
3885 			}
3886 			if (sp->net) {
3887 				sctp_free_remote_addr(sp->net);
3888 				sp->net = NULL;
3889 			}
3890 			/* Free the chunk */
3891 			sctp_free_a_strmoq(stcb, sp, so_locked);
3892 			/* sa_ignore FREED_MEMORY */
3893 		}
3894 	}
3895 
3896 	if (holds_lock == 0) {
3897 		SCTP_TCB_SEND_UNLOCK(stcb);
3898 	}
3899 }
3900 
3901 void
3902 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3903     struct sctp_abort_chunk *abort, int so_locked
3904 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3905     SCTP_UNUSED
3906 #endif
3907 )
3908 {
3909 	if (stcb == NULL) {
3910 		return;
3911 	}
3912 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3913 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3914 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3915 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3916 	}
3917 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3918 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3919 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3920 		return;
3921 	}
3922 	/* Tell them we lost the asoc */
3923 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3924 	if (from_peer) {
3925 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3926 	} else {
3927 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3928 	}
3929 }
3930 
3931 void
3932 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3933     struct mbuf *m, int iphlen,
3934     struct sockaddr *src, struct sockaddr *dst,
3935     struct sctphdr *sh, struct mbuf *op_err,
3936     uint8_t mflowtype, uint32_t mflowid,
3937     uint32_t vrf_id, uint16_t port)
3938 {
3939 	uint32_t vtag;
3940 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3941 	struct socket *so;
3942 #endif
3943 
3944 	vtag = 0;
3945 	if (stcb != NULL) {
3946 		vtag = stcb->asoc.peer_vtag;
3947 		vrf_id = stcb->asoc.vrf_id;
3948 	}
3949 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3950 	    mflowtype, mflowid, inp->fibnum,
3951 	    vrf_id, port);
3952 	if (stcb != NULL) {
3953 		/* We have a TCB to abort, send notification too */
3954 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3955 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3956 		/* Ok, now lets free it */
3957 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3958 		so = SCTP_INP_SO(inp);
3959 		atomic_add_int(&stcb->asoc.refcnt, 1);
3960 		SCTP_TCB_UNLOCK(stcb);
3961 		SCTP_SOCKET_LOCK(so, 1);
3962 		SCTP_TCB_LOCK(stcb);
3963 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3964 #endif
3965 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3966 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3967 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3968 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3969 		}
3970 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
3971 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3972 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3973 		SCTP_SOCKET_UNLOCK(so, 1);
3974 #endif
3975 	}
3976 }
3977 #ifdef SCTP_ASOCLOG_OF_TSNS
3978 void
3979 sctp_print_out_track_log(struct sctp_tcb *stcb)
3980 {
3981 #ifdef NOSIY_PRINTS
3982 	int i;
3983 
3984 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3985 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3986 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3987 		SCTP_PRINTF("None rcvd\n");
3988 		goto none_in;
3989 	}
3990 	if (stcb->asoc.tsn_in_wrapped) {
3991 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3992 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3993 			    stcb->asoc.in_tsnlog[i].tsn,
3994 			    stcb->asoc.in_tsnlog[i].strm,
3995 			    stcb->asoc.in_tsnlog[i].seq,
3996 			    stcb->asoc.in_tsnlog[i].flgs,
3997 			    stcb->asoc.in_tsnlog[i].sz);
3998 		}
3999 	}
4000 	if (stcb->asoc.tsn_in_at) {
4001 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4002 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4003 			    stcb->asoc.in_tsnlog[i].tsn,
4004 			    stcb->asoc.in_tsnlog[i].strm,
4005 			    stcb->asoc.in_tsnlog[i].seq,
4006 			    stcb->asoc.in_tsnlog[i].flgs,
4007 			    stcb->asoc.in_tsnlog[i].sz);
4008 		}
4009 	}
4010 none_in:
4011 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4012 	if ((stcb->asoc.tsn_out_at == 0) &&
4013 	    (stcb->asoc.tsn_out_wrapped == 0)) {
4014 		SCTP_PRINTF("None sent\n");
4015 	}
4016 	if (stcb->asoc.tsn_out_wrapped) {
4017 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4018 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4019 			    stcb->asoc.out_tsnlog[i].tsn,
4020 			    stcb->asoc.out_tsnlog[i].strm,
4021 			    stcb->asoc.out_tsnlog[i].seq,
4022 			    stcb->asoc.out_tsnlog[i].flgs,
4023 			    stcb->asoc.out_tsnlog[i].sz);
4024 		}
4025 	}
4026 	if (stcb->asoc.tsn_out_at) {
4027 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4028 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4029 			    stcb->asoc.out_tsnlog[i].tsn,
4030 			    stcb->asoc.out_tsnlog[i].strm,
4031 			    stcb->asoc.out_tsnlog[i].seq,
4032 			    stcb->asoc.out_tsnlog[i].flgs,
4033 			    stcb->asoc.out_tsnlog[i].sz);
4034 		}
4035 	}
4036 #endif
4037 }
4038 #endif
4039 
4040 void
4041 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4042     struct mbuf *op_err,
4043     int so_locked
4044 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4045     SCTP_UNUSED
4046 #endif
4047 )
4048 {
4049 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4050 	struct socket *so;
4051 #endif
4052 
4053 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4054 	so = SCTP_INP_SO(inp);
4055 #endif
4056 	if (stcb == NULL) {
4057 		/* Got to have a TCB */
4058 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4059 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4060 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4061 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4062 			}
4063 		}
4064 		return;
4065 	} else {
4066 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4067 	}
4068 	/* notify the peer */
4069 	sctp_send_abort_tcb(stcb, op_err, so_locked);
4070 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4071 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4072 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4073 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4074 	}
4075 	/* notify the ulp */
4076 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4077 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4078 	}
4079 	/* now free the asoc */
4080 #ifdef SCTP_ASOCLOG_OF_TSNS
4081 	sctp_print_out_track_log(stcb);
4082 #endif
4083 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4084 	if (!so_locked) {
4085 		atomic_add_int(&stcb->asoc.refcnt, 1);
4086 		SCTP_TCB_UNLOCK(stcb);
4087 		SCTP_SOCKET_LOCK(so, 1);
4088 		SCTP_TCB_LOCK(stcb);
4089 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4090 	}
4091 #endif
4092 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4093 	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4094 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4095 	if (!so_locked) {
4096 		SCTP_SOCKET_UNLOCK(so, 1);
4097 	}
4098 #endif
4099 }
4100 
4101 void
4102 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4103     struct sockaddr *src, struct sockaddr *dst,
4104     struct sctphdr *sh, struct sctp_inpcb *inp,
4105     struct mbuf *cause,
4106     uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4107     uint32_t vrf_id, uint16_t port)
4108 {
4109 	struct sctp_chunkhdr *ch, chunk_buf;
4110 	unsigned int chk_length;
4111 	int contains_init_chunk;
4112 
4113 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4114 	/* Generate a TO address for future reference */
4115 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4116 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4117 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4118 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4119 		}
4120 	}
4121 	contains_init_chunk = 0;
4122 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4123 	    sizeof(*ch), (uint8_t *)&chunk_buf);
4124 	while (ch != NULL) {
4125 		chk_length = ntohs(ch->chunk_length);
4126 		if (chk_length < sizeof(*ch)) {
4127 			/* break to abort land */
4128 			break;
4129 		}
4130 		switch (ch->chunk_type) {
4131 		case SCTP_INIT:
4132 			contains_init_chunk = 1;
4133 			break;
4134 		case SCTP_PACKET_DROPPED:
4135 			/* we don't respond to pkt-dropped */
4136 			return;
4137 		case SCTP_ABORT_ASSOCIATION:
4138 			/* we don't respond with an ABORT to an ABORT */
4139 			return;
4140 		case SCTP_SHUTDOWN_COMPLETE:
4141 			/*
4142 			 * we ignore it since we are not waiting for it and
4143 			 * peer is gone
4144 			 */
4145 			return;
4146 		case SCTP_SHUTDOWN_ACK:
4147 			sctp_send_shutdown_complete2(src, dst, sh,
4148 			    mflowtype, mflowid, fibnum,
4149 			    vrf_id, port);
4150 			return;
4151 		default:
4152 			break;
4153 		}
4154 		offset += SCTP_SIZE32(chk_length);
4155 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4156 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4157 	}
4158 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4159 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4160 	    (contains_init_chunk == 0))) {
4161 		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4162 		    mflowtype, mflowid, fibnum,
4163 		    vrf_id, port);
4164 	}
4165 }
4166 
4167 /*
4168  * check the inbound datagram to make sure there is not an abort inside it,
4169  * if there is return 1, else return 0.
4170  */
4171 int
4172 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t *vtagfill)
4173 {
4174 	struct sctp_chunkhdr *ch;
4175 	struct sctp_init_chunk *init_chk, chunk_buf;
4176 	int offset;
4177 	unsigned int chk_length;
4178 
4179 	offset = iphlen + sizeof(struct sctphdr);
4180 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4181 	    (uint8_t *)&chunk_buf);
4182 	while (ch != NULL) {
4183 		chk_length = ntohs(ch->chunk_length);
4184 		if (chk_length < sizeof(*ch)) {
4185 			/* packet is probably corrupt */
4186 			break;
4187 		}
4188 		/* we seem to be ok, is it an abort? */
4189 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4190 			/* yep, tell them */
4191 			return (1);
4192 		}
4193 		if (ch->chunk_type == SCTP_INITIATION) {
4194 			/* need to update the Vtag */
4195 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4196 			    offset, sizeof(*init_chk), (uint8_t *)&chunk_buf);
4197 			if (init_chk != NULL) {
4198 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4199 			}
4200 		}
4201 		/* Nope, move to the next chunk */
4202 		offset += SCTP_SIZE32(chk_length);
4203 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4204 		    sizeof(*ch), (uint8_t *)&chunk_buf);
4205 	}
4206 	return (0);
4207 }
4208 
4209 /*
4210  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4211  * set (i.e. it's 0) so, create this function to compare link local scopes
4212  */
4213 #ifdef INET6
4214 uint32_t
4215 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4216 {
4217 	struct sockaddr_in6 a, b;
4218 
4219 	/* save copies */
4220 	a = *addr1;
4221 	b = *addr2;
4222 
4223 	if (a.sin6_scope_id == 0)
4224 		if (sa6_recoverscope(&a)) {
4225 			/* can't get scope, so can't match */
4226 			return (0);
4227 		}
4228 	if (b.sin6_scope_id == 0)
4229 		if (sa6_recoverscope(&b)) {
4230 			/* can't get scope, so can't match */
4231 			return (0);
4232 		}
4233 	if (a.sin6_scope_id != b.sin6_scope_id)
4234 		return (0);
4235 
4236 	return (1);
4237 }
4238 
4239 /*
4240  * returns a sockaddr_in6 with embedded scope recovered and removed
4241  */
4242 struct sockaddr_in6 *
4243 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4244 {
4245 	/* check and strip embedded scope junk */
4246 	if (addr->sin6_family == AF_INET6) {
4247 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4248 			if (addr->sin6_scope_id == 0) {
4249 				*store = *addr;
4250 				if (!sa6_recoverscope(store)) {
4251 					/* use the recovered scope */
4252 					addr = store;
4253 				}
4254 			} else {
4255 				/* else, return the original "to" addr */
4256 				in6_clearscope(&addr->sin6_addr);
4257 			}
4258 		}
4259 	}
4260 	return (addr);
4261 }
4262 #endif
4263 
4264 /*
4265  * are the two addresses the same?  currently a "scopeless" check returns: 1
4266  * if same, 0 if not
4267  */
4268 int
4269 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4270 {
4271 
4272 	/* must be valid */
4273 	if (sa1 == NULL || sa2 == NULL)
4274 		return (0);
4275 
4276 	/* must be the same family */
4277 	if (sa1->sa_family != sa2->sa_family)
4278 		return (0);
4279 
4280 	switch (sa1->sa_family) {
4281 #ifdef INET6
4282 	case AF_INET6:
4283 		{
4284 			/* IPv6 addresses */
4285 			struct sockaddr_in6 *sin6_1, *sin6_2;
4286 
4287 			sin6_1 = (struct sockaddr_in6 *)sa1;
4288 			sin6_2 = (struct sockaddr_in6 *)sa2;
4289 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4290 			    sin6_2));
4291 		}
4292 #endif
4293 #ifdef INET
4294 	case AF_INET:
4295 		{
4296 			/* IPv4 addresses */
4297 			struct sockaddr_in *sin_1, *sin_2;
4298 
4299 			sin_1 = (struct sockaddr_in *)sa1;
4300 			sin_2 = (struct sockaddr_in *)sa2;
4301 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4302 		}
4303 #endif
4304 	default:
4305 		/* we don't do these... */
4306 		return (0);
4307 	}
4308 }
4309 
4310 void
4311 sctp_print_address(struct sockaddr *sa)
4312 {
4313 #ifdef INET6
4314 	char ip6buf[INET6_ADDRSTRLEN];
4315 #endif
4316 
4317 	switch (sa->sa_family) {
4318 #ifdef INET6
4319 	case AF_INET6:
4320 		{
4321 			struct sockaddr_in6 *sin6;
4322 
4323 			sin6 = (struct sockaddr_in6 *)sa;
4324 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4325 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4326 			    ntohs(sin6->sin6_port),
4327 			    sin6->sin6_scope_id);
4328 			break;
4329 		}
4330 #endif
4331 #ifdef INET
4332 	case AF_INET:
4333 		{
4334 			struct sockaddr_in *sin;
4335 			unsigned char *p;
4336 
4337 			sin = (struct sockaddr_in *)sa;
4338 			p = (unsigned char *)&sin->sin_addr;
4339 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4340 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4341 			break;
4342 		}
4343 #endif
4344 	default:
4345 		SCTP_PRINTF("?\n");
4346 		break;
4347 	}
4348 }
4349 
4350 void
4351 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4352     struct sctp_inpcb *new_inp,
4353     struct sctp_tcb *stcb,
4354     int waitflags)
4355 {
4356 	/*
4357 	 * go through our old INP and pull off any control structures that
4358 	 * belong to stcb and move then to the new inp.
4359 	 */
4360 	struct socket *old_so, *new_so;
4361 	struct sctp_queued_to_read *control, *nctl;
4362 	struct sctp_readhead tmp_queue;
4363 	struct mbuf *m;
4364 	int error = 0;
4365 
4366 	old_so = old_inp->sctp_socket;
4367 	new_so = new_inp->sctp_socket;
4368 	TAILQ_INIT(&tmp_queue);
4369 	error = sblock(&old_so->so_rcv, waitflags);
4370 	if (error) {
4371 		/*
4372 		 * Gak, can't get sblock, we have a problem. data will be
4373 		 * left stranded.. and we don't dare look at it since the
4374 		 * other thread may be reading something. Oh well, its a
4375 		 * screwed up app that does a peeloff OR a accept while
4376 		 * reading from the main socket... actually its only the
4377 		 * peeloff() case, since I think read will fail on a
4378 		 * listening socket..
4379 		 */
4380 		return;
4381 	}
4382 	/* lock the socket buffers */
4383 	SCTP_INP_READ_LOCK(old_inp);
4384 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4385 		/* Pull off all for out target stcb */
4386 		if (control->stcb == stcb) {
4387 			/* remove it we want it */
4388 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4389 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4390 			m = control->data;
4391 			while (m) {
4392 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4393 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4394 				}
4395 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4396 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4397 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4398 				}
4399 				m = SCTP_BUF_NEXT(m);
4400 			}
4401 		}
4402 	}
4403 	SCTP_INP_READ_UNLOCK(old_inp);
4404 	/* Remove the sb-lock on the old socket */
4405 
4406 	sbunlock(&old_so->so_rcv);
4407 	/* Now we move them over to the new socket buffer */
4408 	SCTP_INP_READ_LOCK(new_inp);
4409 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4410 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4411 		m = control->data;
4412 		while (m) {
4413 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4414 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4415 			}
4416 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4417 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4418 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4419 			}
4420 			m = SCTP_BUF_NEXT(m);
4421 		}
4422 	}
4423 	SCTP_INP_READ_UNLOCK(new_inp);
4424 }
4425 
4426 void
4427 sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
4428     struct sctp_tcb *stcb,
4429     int so_locked
4430 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4431     SCTP_UNUSED
4432 #endif
4433 )
4434 {
4435 	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
4436 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4437 		struct socket *so;
4438 
4439 		so = SCTP_INP_SO(inp);
4440 		if (!so_locked) {
4441 			if (stcb) {
4442 				atomic_add_int(&stcb->asoc.refcnt, 1);
4443 				SCTP_TCB_UNLOCK(stcb);
4444 			}
4445 			SCTP_SOCKET_LOCK(so, 1);
4446 			if (stcb) {
4447 				SCTP_TCB_LOCK(stcb);
4448 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4449 			}
4450 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4451 				SCTP_SOCKET_UNLOCK(so, 1);
4452 				return;
4453 			}
4454 		}
4455 #endif
4456 		sctp_sorwakeup(inp, inp->sctp_socket);
4457 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4458 		if (!so_locked) {
4459 			SCTP_SOCKET_UNLOCK(so, 1);
4460 		}
4461 #endif
4462 	}
4463 }
4464 
4465 void
4466 sctp_add_to_readq(struct sctp_inpcb *inp,
4467     struct sctp_tcb *stcb,
4468     struct sctp_queued_to_read *control,
4469     struct sockbuf *sb,
4470     int end,
4471     int inp_read_lock_held,
4472     int so_locked
4473 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4474     SCTP_UNUSED
4475 #endif
4476 )
4477 {
4478 	/*
4479 	 * Here we must place the control on the end of the socket read
4480 	 * queue AND increment sb_cc so that select will work properly on
4481 	 * read.
4482 	 */
4483 	struct mbuf *m, *prev = NULL;
4484 
4485 	if (inp == NULL) {
4486 		/* Gak, TSNH!! */
4487 #ifdef INVARIANTS
4488 		panic("Gak, inp NULL on add_to_readq");
4489 #endif
4490 		return;
4491 	}
4492 	if (inp_read_lock_held == 0)
4493 		SCTP_INP_READ_LOCK(inp);
4494 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4495 		sctp_free_remote_addr(control->whoFrom);
4496 		if (control->data) {
4497 			sctp_m_freem(control->data);
4498 			control->data = NULL;
4499 		}
4500 		sctp_free_a_readq(stcb, control);
4501 		if (inp_read_lock_held == 0)
4502 			SCTP_INP_READ_UNLOCK(inp);
4503 		return;
4504 	}
4505 	if (!(control->spec_flags & M_NOTIFICATION)) {
4506 		atomic_add_int(&inp->total_recvs, 1);
4507 		if (!control->do_not_ref_stcb) {
4508 			atomic_add_int(&stcb->total_recvs, 1);
4509 		}
4510 	}
4511 	m = control->data;
4512 	control->held_length = 0;
4513 	control->length = 0;
4514 	while (m) {
4515 		if (SCTP_BUF_LEN(m) == 0) {
4516 			/* Skip mbufs with NO length */
4517 			if (prev == NULL) {
4518 				/* First one */
4519 				control->data = sctp_m_free(m);
4520 				m = control->data;
4521 			} else {
4522 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4523 				m = SCTP_BUF_NEXT(prev);
4524 			}
4525 			if (m == NULL) {
4526 				control->tail_mbuf = prev;
4527 			}
4528 			continue;
4529 		}
4530 		prev = m;
4531 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4532 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4533 		}
4534 		sctp_sballoc(stcb, sb, m);
4535 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4536 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4537 		}
4538 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4539 		m = SCTP_BUF_NEXT(m);
4540 	}
4541 	if (prev != NULL) {
4542 		control->tail_mbuf = prev;
4543 	} else {
4544 		/* Everything got collapsed out?? */
4545 		sctp_free_remote_addr(control->whoFrom);
4546 		sctp_free_a_readq(stcb, control);
4547 		if (inp_read_lock_held == 0)
4548 			SCTP_INP_READ_UNLOCK(inp);
4549 		return;
4550 	}
4551 	if (end) {
4552 		control->end_added = 1;
4553 	}
4554 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4555 	control->on_read_q = 1;
4556 	if (inp_read_lock_held == 0)
4557 		SCTP_INP_READ_UNLOCK(inp);
4558 	if (inp && inp->sctp_socket) {
4559 		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
4560 	}
4561 }
4562 
4563 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4564  *************ALTERNATE ROUTING CODE
4565  */
4566 
4567 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4568  *************ALTERNATE ROUTING CODE
4569  */
4570 
4571 struct mbuf *
4572 sctp_generate_cause(uint16_t code, char *info)
4573 {
4574 	struct mbuf *m;
4575 	struct sctp_gen_error_cause *cause;
4576 	size_t info_len;
4577 	uint16_t len;
4578 
4579 	if ((code == 0) || (info == NULL)) {
4580 		return (NULL);
4581 	}
4582 	info_len = strlen(info);
4583 	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
4584 		return (NULL);
4585 	}
4586 	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
4587 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4588 	if (m != NULL) {
4589 		SCTP_BUF_LEN(m) = len;
4590 		cause = mtod(m, struct sctp_gen_error_cause *);
4591 		cause->code = htons(code);
4592 		cause->length = htons(len);
4593 		memcpy(cause->info, info, info_len);
4594 	}
4595 	return (m);
4596 }
4597 
4598 struct mbuf *
4599 sctp_generate_no_user_data_cause(uint32_t tsn)
4600 {
4601 	struct mbuf *m;
4602 	struct sctp_error_no_user_data *no_user_data_cause;
4603 	uint16_t len;
4604 
4605 	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
4606 	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4607 	if (m != NULL) {
4608 		SCTP_BUF_LEN(m) = len;
4609 		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4610 		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4611 		no_user_data_cause->cause.length = htons(len);
4612 		no_user_data_cause->tsn = htonl(tsn);
4613 	}
4614 	return (m);
4615 }
4616 
4617 #ifdef SCTP_MBCNT_LOGGING
4618 void
4619 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4620     struct sctp_tmit_chunk *tp1, int chk_cnt)
4621 {
4622 	if (tp1->data == NULL) {
4623 		return;
4624 	}
4625 	asoc->chunks_on_out_queue -= chk_cnt;
4626 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4627 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4628 		    asoc->total_output_queue_size,
4629 		    tp1->book_size,
4630 		    0,
4631 		    tp1->mbcnt);
4632 	}
4633 	if (asoc->total_output_queue_size >= tp1->book_size) {
4634 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4635 	} else {
4636 		asoc->total_output_queue_size = 0;
4637 	}
4638 
4639 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4640 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4641 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4642 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4643 		} else {
4644 			stcb->sctp_socket->so_snd.sb_cc = 0;
4645 
4646 		}
4647 	}
4648 }
4649 
4650 #endif
4651 
4652 int
4653 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4654     uint8_t sent, int so_locked
4655 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4656     SCTP_UNUSED
4657 #endif
4658 )
4659 {
4660 	struct sctp_stream_out *strq;
4661 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4662 	struct sctp_stream_queue_pending *sp;
4663 	uint32_t mid;
4664 	uint16_t sid;
4665 	uint8_t foundeom = 0;
4666 	int ret_sz = 0;
4667 	int notdone;
4668 	int do_wakeup_routine = 0;
4669 
4670 	sid = tp1->rec.data.sid;
4671 	mid = tp1->rec.data.mid;
4672 	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4673 		stcb->asoc.abandoned_sent[0]++;
4674 		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4675 		stcb->asoc.strmout[sid].abandoned_sent[0]++;
4676 #if defined(SCTP_DETAILED_STR_STATS)
4677 		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4678 #endif
4679 	} else {
4680 		stcb->asoc.abandoned_unsent[0]++;
4681 		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4682 		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
4683 #if defined(SCTP_DETAILED_STR_STATS)
4684 		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4685 #endif
4686 	}
4687 	do {
4688 		ret_sz += tp1->book_size;
4689 		if (tp1->data != NULL) {
4690 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4691 				sctp_flight_size_decrease(tp1);
4692 				sctp_total_flight_decrease(stcb, tp1);
4693 			}
4694 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4695 			stcb->asoc.peers_rwnd += tp1->send_size;
4696 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4697 			if (sent) {
4698 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4699 			} else {
4700 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4701 			}
4702 			if (tp1->data) {
4703 				sctp_m_freem(tp1->data);
4704 				tp1->data = NULL;
4705 			}
4706 			do_wakeup_routine = 1;
4707 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4708 				stcb->asoc.sent_queue_cnt_removeable--;
4709 			}
4710 		}
4711 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4712 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4713 		    SCTP_DATA_NOT_FRAG) {
4714 			/* not frag'ed we ae done   */
4715 			notdone = 0;
4716 			foundeom = 1;
4717 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4718 			/* end of frag, we are done */
4719 			notdone = 0;
4720 			foundeom = 1;
4721 		} else {
4722 			/*
4723 			 * Its a begin or middle piece, we must mark all of
4724 			 * it
4725 			 */
4726 			notdone = 1;
4727 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4728 		}
4729 	} while (tp1 && notdone);
4730 	if (foundeom == 0) {
4731 		/*
4732 		 * The multi-part message was scattered across the send and
4733 		 * sent queue.
4734 		 */
4735 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4736 			if ((tp1->rec.data.sid != sid) ||
4737 			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
4738 				break;
4739 			}
4740 			/*
4741 			 * save to chk in case we have some on stream out
4742 			 * queue. If so and we have an un-transmitted one we
4743 			 * don't have to fudge the TSN.
4744 			 */
4745 			chk = tp1;
4746 			ret_sz += tp1->book_size;
4747 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4748 			if (sent) {
4749 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4750 			} else {
4751 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4752 			}
4753 			if (tp1->data) {
4754 				sctp_m_freem(tp1->data);
4755 				tp1->data = NULL;
4756 			}
4757 			/* No flight involved here book the size to 0 */
4758 			tp1->book_size = 0;
4759 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4760 				foundeom = 1;
4761 			}
4762 			do_wakeup_routine = 1;
4763 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4764 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4765 			/*
4766 			 * on to the sent queue so we can wait for it to be
4767 			 * passed by.
4768 			 */
4769 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4770 			    sctp_next);
4771 			stcb->asoc.send_queue_cnt--;
4772 			stcb->asoc.sent_queue_cnt++;
4773 		}
4774 	}
4775 	if (foundeom == 0) {
4776 		/*
4777 		 * Still no eom found. That means there is stuff left on the
4778 		 * stream out queue.. yuck.
4779 		 */
4780 		SCTP_TCB_SEND_LOCK(stcb);
4781 		strq = &stcb->asoc.strmout[sid];
4782 		sp = TAILQ_FIRST(&strq->outqueue);
4783 		if (sp != NULL) {
4784 			sp->discard_rest = 1;
4785 			/*
4786 			 * We may need to put a chunk on the queue that
4787 			 * holds the TSN that would have been sent with the
4788 			 * LAST bit.
4789 			 */
4790 			if (chk == NULL) {
4791 				/* Yep, we have to */
4792 				sctp_alloc_a_chunk(stcb, chk);
4793 				if (chk == NULL) {
4794 					/*
4795 					 * we are hosed. All we can do is
4796 					 * nothing.. which will cause an
4797 					 * abort if the peer is paying
4798 					 * attention.
4799 					 */
4800 					goto oh_well;
4801 				}
4802 				memset(chk, 0, sizeof(*chk));
4803 				chk->rec.data.rcv_flags = 0;
4804 				chk->sent = SCTP_FORWARD_TSN_SKIP;
4805 				chk->asoc = &stcb->asoc;
4806 				if (stcb->asoc.idata_supported == 0) {
4807 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4808 						chk->rec.data.mid = 0;
4809 					} else {
4810 						chk->rec.data.mid = strq->next_mid_ordered;
4811 					}
4812 				} else {
4813 					if (sp->sinfo_flags & SCTP_UNORDERED) {
4814 						chk->rec.data.mid = strq->next_mid_unordered;
4815 					} else {
4816 						chk->rec.data.mid = strq->next_mid_ordered;
4817 					}
4818 				}
4819 				chk->rec.data.sid = sp->sid;
4820 				chk->rec.data.ppid = sp->ppid;
4821 				chk->rec.data.context = sp->context;
4822 				chk->flags = sp->act_flags;
4823 				chk->whoTo = NULL;
4824 				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4825 				strq->chunks_on_queues++;
4826 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4827 				stcb->asoc.sent_queue_cnt++;
4828 				stcb->asoc.pr_sctp_cnt++;
4829 			}
4830 			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4831 			if (sp->sinfo_flags & SCTP_UNORDERED) {
4832 				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
4833 			}
4834 			if (stcb->asoc.idata_supported == 0) {
4835 				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
4836 					strq->next_mid_ordered++;
4837 				}
4838 			} else {
4839 				if (sp->sinfo_flags & SCTP_UNORDERED) {
4840 					strq->next_mid_unordered++;
4841 				} else {
4842 					strq->next_mid_ordered++;
4843 				}
4844 			}
4845 	oh_well:
4846 			if (sp->data) {
4847 				/*
4848 				 * Pull any data to free up the SB and allow
4849 				 * sender to "add more" while we will throw
4850 				 * away :-)
4851 				 */
4852 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4853 				ret_sz += sp->length;
4854 				do_wakeup_routine = 1;
4855 				sp->some_taken = 1;
4856 				sctp_m_freem(sp->data);
4857 				sp->data = NULL;
4858 				sp->tail_mbuf = NULL;
4859 				sp->length = 0;
4860 			}
4861 		}
4862 		SCTP_TCB_SEND_UNLOCK(stcb);
4863 	}
4864 	if (do_wakeup_routine) {
4865 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4866 		struct socket *so;
4867 
4868 		so = SCTP_INP_SO(stcb->sctp_ep);
4869 		if (!so_locked) {
4870 			atomic_add_int(&stcb->asoc.refcnt, 1);
4871 			SCTP_TCB_UNLOCK(stcb);
4872 			SCTP_SOCKET_LOCK(so, 1);
4873 			SCTP_TCB_LOCK(stcb);
4874 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4875 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4876 				/* assoc was freed while we were unlocked */
4877 				SCTP_SOCKET_UNLOCK(so, 1);
4878 				return (ret_sz);
4879 			}
4880 		}
4881 #endif
4882 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4883 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4884 		if (!so_locked) {
4885 			SCTP_SOCKET_UNLOCK(so, 1);
4886 		}
4887 #endif
4888 	}
4889 	return (ret_sz);
4890 }
4891 
4892 /*
4893  * checks to see if the given address, sa, is one that is currently known by
4894  * the kernel note: can't distinguish the same address on multiple interfaces
4895  * and doesn't handle multiple addresses with different zone/scope id's note:
4896  * ifa_ifwithaddr() compares the entire sockaddr struct
4897  */
4898 struct sctp_ifa *
4899 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4900     int holds_lock)
4901 {
4902 	struct sctp_laddr *laddr;
4903 
4904 	if (holds_lock == 0) {
4905 		SCTP_INP_RLOCK(inp);
4906 	}
4907 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4908 		if (laddr->ifa == NULL)
4909 			continue;
4910 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4911 			continue;
4912 #ifdef INET
4913 		if (addr->sa_family == AF_INET) {
4914 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4915 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4916 				/* found him. */
4917 				if (holds_lock == 0) {
4918 					SCTP_INP_RUNLOCK(inp);
4919 				}
4920 				return (laddr->ifa);
4921 				break;
4922 			}
4923 		}
4924 #endif
4925 #ifdef INET6
4926 		if (addr->sa_family == AF_INET6) {
4927 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4928 			    &laddr->ifa->address.sin6)) {
4929 				/* found him. */
4930 				if (holds_lock == 0) {
4931 					SCTP_INP_RUNLOCK(inp);
4932 				}
4933 				return (laddr->ifa);
4934 				break;
4935 			}
4936 		}
4937 #endif
4938 	}
4939 	if (holds_lock == 0) {
4940 		SCTP_INP_RUNLOCK(inp);
4941 	}
4942 	return (NULL);
4943 }
4944 
4945 uint32_t
4946 sctp_get_ifa_hash_val(struct sockaddr *addr)
4947 {
4948 	switch (addr->sa_family) {
4949 #ifdef INET
4950 	case AF_INET:
4951 		{
4952 			struct sockaddr_in *sin;
4953 
4954 			sin = (struct sockaddr_in *)addr;
4955 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4956 		}
4957 #endif
4958 #ifdef INET6
4959 	case AF_INET6:
4960 		{
4961 			struct sockaddr_in6 *sin6;
4962 			uint32_t hash_of_addr;
4963 
4964 			sin6 = (struct sockaddr_in6 *)addr;
4965 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4966 			    sin6->sin6_addr.s6_addr32[1] +
4967 			    sin6->sin6_addr.s6_addr32[2] +
4968 			    sin6->sin6_addr.s6_addr32[3]);
4969 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4970 			return (hash_of_addr);
4971 		}
4972 #endif
4973 	default:
4974 		break;
4975 	}
4976 	return (0);
4977 }
4978 
4979 struct sctp_ifa *
4980 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4981 {
4982 	struct sctp_ifa *sctp_ifap;
4983 	struct sctp_vrf *vrf;
4984 	struct sctp_ifalist *hash_head;
4985 	uint32_t hash_of_addr;
4986 
4987 	if (holds_lock == 0)
4988 		SCTP_IPI_ADDR_RLOCK();
4989 
4990 	vrf = sctp_find_vrf(vrf_id);
4991 	if (vrf == NULL) {
4992 		if (holds_lock == 0)
4993 			SCTP_IPI_ADDR_RUNLOCK();
4994 		return (NULL);
4995 	}
4996 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4997 
4998 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4999 	if (hash_head == NULL) {
5000 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5001 		    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
5002 		    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
5003 		sctp_print_address(addr);
5004 		SCTP_PRINTF("No such bucket for address\n");
5005 		if (holds_lock == 0)
5006 			SCTP_IPI_ADDR_RUNLOCK();
5007 
5008 		return (NULL);
5009 	}
5010 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5011 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5012 			continue;
5013 #ifdef INET
5014 		if (addr->sa_family == AF_INET) {
5015 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5016 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5017 				/* found him. */
5018 				if (holds_lock == 0)
5019 					SCTP_IPI_ADDR_RUNLOCK();
5020 				return (sctp_ifap);
5021 				break;
5022 			}
5023 		}
5024 #endif
5025 #ifdef INET6
5026 		if (addr->sa_family == AF_INET6) {
5027 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5028 			    &sctp_ifap->address.sin6)) {
5029 				/* found him. */
5030 				if (holds_lock == 0)
5031 					SCTP_IPI_ADDR_RUNLOCK();
5032 				return (sctp_ifap);
5033 				break;
5034 			}
5035 		}
5036 #endif
5037 	}
5038 	if (holds_lock == 0)
5039 		SCTP_IPI_ADDR_RUNLOCK();
5040 	return (NULL);
5041 }
5042 
5043 static void
5044 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
5045     uint32_t rwnd_req)
5046 {
5047 	/* User pulled some data, do we need a rwnd update? */
5048 	int r_unlocked = 0;
5049 	uint32_t dif, rwnd;
5050 	struct socket *so = NULL;
5051 
5052 	if (stcb == NULL)
5053 		return;
5054 
5055 	atomic_add_int(&stcb->asoc.refcnt, 1);
5056 
5057 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5058 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5059 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5060 		/* Pre-check If we are freeing no update */
5061 		goto no_lock;
5062 	}
5063 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5064 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5065 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5066 		goto out;
5067 	}
5068 	so = stcb->sctp_socket;
5069 	if (so == NULL) {
5070 		goto out;
5071 	}
5072 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5073 	/* Have you have freed enough to look */
5074 	*freed_so_far = 0;
5075 	/* Yep, its worth a look and the lock overhead */
5076 
5077 	/* Figure out what the rwnd would be */
5078 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5079 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5080 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5081 	} else {
5082 		dif = 0;
5083 	}
5084 	if (dif >= rwnd_req) {
5085 		if (hold_rlock) {
5086 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5087 			r_unlocked = 1;
5088 		}
5089 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5090 			/*
5091 			 * One last check before we allow the guy possibly
5092 			 * to get in. There is a race, where the guy has not
5093 			 * reached the gate. In that case
5094 			 */
5095 			goto out;
5096 		}
5097 		SCTP_TCB_LOCK(stcb);
5098 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5099 			/* No reports here */
5100 			SCTP_TCB_UNLOCK(stcb);
5101 			goto out;
5102 		}
5103 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5104 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5105 
5106 		sctp_chunk_output(stcb->sctp_ep, stcb,
5107 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5108 		/* make sure no timer is running */
5109 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5110 		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5111 		SCTP_TCB_UNLOCK(stcb);
5112 	} else {
5113 		/* Update how much we have pending */
5114 		stcb->freed_by_sorcv_sincelast = dif;
5115 	}
5116 out:
5117 	if (so && r_unlocked && hold_rlock) {
5118 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5119 	}
5120 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5121 no_lock:
5122 	atomic_add_int(&stcb->asoc.refcnt, -1);
5123 	return;
5124 }
5125 
5126 int
5127 sctp_sorecvmsg(struct socket *so,
5128     struct uio *uio,
5129     struct mbuf **mp,
5130     struct sockaddr *from,
5131     int fromlen,
5132     int *msg_flags,
5133     struct sctp_sndrcvinfo *sinfo,
5134     int filling_sinfo)
5135 {
5136 	/*
5137 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5138 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5139 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5140 	 * On the way out we may send out any combination of:
5141 	 * MSG_NOTIFICATION MSG_EOR
5142 	 *
5143 	 */
5144 	struct sctp_inpcb *inp = NULL;
5145 	int my_len = 0;
5146 	int cp_len = 0, error = 0;
5147 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5148 	struct mbuf *m = NULL;
5149 	struct sctp_tcb *stcb = NULL;
5150 	int wakeup_read_socket = 0;
5151 	int freecnt_applied = 0;
5152 	int out_flags = 0, in_flags = 0;
5153 	int block_allowed = 1;
5154 	uint32_t freed_so_far = 0;
5155 	uint32_t copied_so_far = 0;
5156 	int in_eeor_mode = 0;
5157 	int no_rcv_needed = 0;
5158 	uint32_t rwnd_req = 0;
5159 	int hold_sblock = 0;
5160 	int hold_rlock = 0;
5161 	ssize_t slen = 0;
5162 	uint32_t held_length = 0;
5163 	int sockbuf_lock = 0;
5164 
5165 	if (uio == NULL) {
5166 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5167 		return (EINVAL);
5168 	}
5169 	if (msg_flags) {
5170 		in_flags = *msg_flags;
5171 		if (in_flags & MSG_PEEK)
5172 			SCTP_STAT_INCR(sctps_read_peeks);
5173 	} else {
5174 		in_flags = 0;
5175 	}
5176 	slen = uio->uio_resid;
5177 
5178 	/* Pull in and set up our int flags */
5179 	if (in_flags & MSG_OOB) {
5180 		/* Out of band's NOT supported */
5181 		return (EOPNOTSUPP);
5182 	}
5183 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5184 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5185 		return (EINVAL);
5186 	}
5187 	if ((in_flags & (MSG_DONTWAIT
5188 	    | MSG_NBIO
5189 	    )) ||
5190 	    SCTP_SO_IS_NBIO(so)) {
5191 		block_allowed = 0;
5192 	}
5193 	/* setup the endpoint */
5194 	inp = (struct sctp_inpcb *)so->so_pcb;
5195 	if (inp == NULL) {
5196 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5197 		return (EFAULT);
5198 	}
5199 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5200 	/* Must be at least a MTU's worth */
5201 	if (rwnd_req < SCTP_MIN_RWND)
5202 		rwnd_req = SCTP_MIN_RWND;
5203 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5204 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5205 		sctp_misc_ints(SCTP_SORECV_ENTER,
5206 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5207 	}
5208 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5209 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5210 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
5211 	}
5212 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5213 	if (error) {
5214 		goto release_unlocked;
5215 	}
5216 	sockbuf_lock = 1;
5217 restart:
5218 
5219 
5220 restart_nosblocks:
5221 	if (hold_sblock == 0) {
5222 		SOCKBUF_LOCK(&so->so_rcv);
5223 		hold_sblock = 1;
5224 	}
5225 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5226 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5227 		goto out;
5228 	}
5229 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5230 		if (so->so_error) {
5231 			error = so->so_error;
5232 			if ((in_flags & MSG_PEEK) == 0)
5233 				so->so_error = 0;
5234 			goto out;
5235 		} else {
5236 			if (so->so_rcv.sb_cc == 0) {
5237 				/* indicate EOF */
5238 				error = 0;
5239 				goto out;
5240 			}
5241 		}
5242 	}
5243 	if (so->so_rcv.sb_cc <= held_length) {
5244 		if (so->so_error) {
5245 			error = so->so_error;
5246 			if ((in_flags & MSG_PEEK) == 0) {
5247 				so->so_error = 0;
5248 			}
5249 			goto out;
5250 		}
5251 		if ((so->so_rcv.sb_cc == 0) &&
5252 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5253 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5254 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5255 				/*
5256 				 * For active open side clear flags for
5257 				 * re-use passive open is blocked by
5258 				 * connect.
5259 				 */
5260 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5261 					/*
5262 					 * You were aborted, passive side
5263 					 * always hits here
5264 					 */
5265 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5266 					error = ECONNRESET;
5267 				}
5268 				so->so_state &= ~(SS_ISCONNECTING |
5269 				    SS_ISDISCONNECTING |
5270 				    SS_ISCONFIRMING |
5271 				    SS_ISCONNECTED);
5272 				if (error == 0) {
5273 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5274 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5275 						error = ENOTCONN;
5276 					}
5277 				}
5278 				goto out;
5279 			}
5280 		}
5281 		if (block_allowed) {
5282 			error = sbwait(&so->so_rcv);
5283 			if (error) {
5284 				goto out;
5285 			}
5286 			held_length = 0;
5287 			goto restart_nosblocks;
5288 		} else {
5289 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5290 			error = EWOULDBLOCK;
5291 			goto out;
5292 		}
5293 	}
5294 	if (hold_sblock == 1) {
5295 		SOCKBUF_UNLOCK(&so->so_rcv);
5296 		hold_sblock = 0;
5297 	}
5298 	/* we possibly have data we can read */
5299 	/* sa_ignore FREED_MEMORY */
5300 	control = TAILQ_FIRST(&inp->read_queue);
5301 	if (control == NULL) {
5302 		/*
5303 		 * This could be happening since the appender did the
5304 		 * increment but as not yet did the tailq insert onto the
5305 		 * read_queue
5306 		 */
5307 		if (hold_rlock == 0) {
5308 			SCTP_INP_READ_LOCK(inp);
5309 		}
5310 		control = TAILQ_FIRST(&inp->read_queue);
5311 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5312 #ifdef INVARIANTS
5313 			panic("Huh, its non zero and nothing on control?");
5314 #endif
5315 			so->so_rcv.sb_cc = 0;
5316 		}
5317 		SCTP_INP_READ_UNLOCK(inp);
5318 		hold_rlock = 0;
5319 		goto restart;
5320 	}
5321 	if ((control->length == 0) &&
5322 	    (control->do_not_ref_stcb)) {
5323 		/*
5324 		 * Clean up code for freeing assoc that left behind a
5325 		 * pdapi.. maybe a peer in EEOR that just closed after
5326 		 * sending and never indicated a EOR.
5327 		 */
5328 		if (hold_rlock == 0) {
5329 			hold_rlock = 1;
5330 			SCTP_INP_READ_LOCK(inp);
5331 		}
5332 		control->held_length = 0;
5333 		if (control->data) {
5334 			/* Hmm there is data here .. fix */
5335 			struct mbuf *m_tmp;
5336 			int cnt = 0;
5337 
5338 			m_tmp = control->data;
5339 			while (m_tmp) {
5340 				cnt += SCTP_BUF_LEN(m_tmp);
5341 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5342 					control->tail_mbuf = m_tmp;
5343 					control->end_added = 1;
5344 				}
5345 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5346 			}
5347 			control->length = cnt;
5348 		} else {
5349 			/* remove it */
5350 			TAILQ_REMOVE(&inp->read_queue, control, next);
5351 			/* Add back any hiddend data */
5352 			sctp_free_remote_addr(control->whoFrom);
5353 			sctp_free_a_readq(stcb, control);
5354 		}
5355 		if (hold_rlock) {
5356 			hold_rlock = 0;
5357 			SCTP_INP_READ_UNLOCK(inp);
5358 		}
5359 		goto restart;
5360 	}
5361 	if ((control->length == 0) &&
5362 	    (control->end_added == 1)) {
5363 		/*
5364 		 * Do we also need to check for (control->pdapi_aborted ==
5365 		 * 1)?
5366 		 */
5367 		if (hold_rlock == 0) {
5368 			hold_rlock = 1;
5369 			SCTP_INP_READ_LOCK(inp);
5370 		}
5371 		TAILQ_REMOVE(&inp->read_queue, control, next);
5372 		if (control->data) {
5373 #ifdef INVARIANTS
5374 			panic("control->data not null but control->length == 0");
5375 #else
5376 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5377 			sctp_m_freem(control->data);
5378 			control->data = NULL;
5379 #endif
5380 		}
5381 		if (control->aux_data) {
5382 			sctp_m_free(control->aux_data);
5383 			control->aux_data = NULL;
5384 		}
5385 #ifdef INVARIANTS
5386 		if (control->on_strm_q) {
5387 			panic("About to free ctl:%p so:%p and its in %d",
5388 			    control, so, control->on_strm_q);
5389 		}
5390 #endif
5391 		sctp_free_remote_addr(control->whoFrom);
5392 		sctp_free_a_readq(stcb, control);
5393 		if (hold_rlock) {
5394 			hold_rlock = 0;
5395 			SCTP_INP_READ_UNLOCK(inp);
5396 		}
5397 		goto restart;
5398 	}
5399 	if (control->length == 0) {
5400 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5401 		    (filling_sinfo)) {
5402 			/* find a more suitable one then this */
5403 			ctl = TAILQ_NEXT(control, next);
5404 			while (ctl) {
5405 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5406 				    (ctl->some_taken ||
5407 				    (ctl->spec_flags & M_NOTIFICATION) ||
5408 				    ((ctl->do_not_ref_stcb == 0) &&
5409 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5410 				    ) {
5411 					/*-
5412 					 * If we have a different TCB next, and there is data
5413 					 * present. If we have already taken some (pdapi), OR we can
5414 					 * ref the tcb and no delivery as started on this stream, we
5415 					 * take it. Note we allow a notification on a different
5416 					 * assoc to be delivered..
5417 					 */
5418 					control = ctl;
5419 					goto found_one;
5420 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5421 					    (ctl->length) &&
5422 					    ((ctl->some_taken) ||
5423 					    ((ctl->do_not_ref_stcb == 0) &&
5424 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5425 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5426 					/*-
5427 					 * If we have the same tcb, and there is data present, and we
5428 					 * have the strm interleave feature present. Then if we have
5429 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5430 					 * not started a delivery for this stream, we can take it.
5431 					 * Note we do NOT allow a notificaiton on the same assoc to
5432 					 * be delivered.
5433 					 */
5434 					control = ctl;
5435 					goto found_one;
5436 				}
5437 				ctl = TAILQ_NEXT(ctl, next);
5438 			}
5439 		}
5440 		/*
5441 		 * if we reach here, not suitable replacement is available
5442 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5443 		 * into the our held count, and its time to sleep again.
5444 		 */
5445 		held_length = so->so_rcv.sb_cc;
5446 		control->held_length = so->so_rcv.sb_cc;
5447 		goto restart;
5448 	}
5449 	/* Clear the held length since there is something to read */
5450 	control->held_length = 0;
5451 found_one:
5452 	/*
5453 	 * If we reach here, control has a some data for us to read off.
5454 	 * Note that stcb COULD be NULL.
5455 	 */
5456 	if (hold_rlock == 0) {
5457 		hold_rlock = 1;
5458 		SCTP_INP_READ_LOCK(inp);
5459 	}
5460 	control->some_taken++;
5461 	stcb = control->stcb;
5462 	if (stcb) {
5463 		if ((control->do_not_ref_stcb == 0) &&
5464 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5465 			if (freecnt_applied == 0)
5466 				stcb = NULL;
5467 		} else if (control->do_not_ref_stcb == 0) {
5468 			/* you can't free it on me please */
5469 			/*
5470 			 * The lock on the socket buffer protects us so the
5471 			 * free code will stop. But since we used the
5472 			 * socketbuf lock and the sender uses the tcb_lock
5473 			 * to increment, we need to use the atomic add to
5474 			 * the refcnt
5475 			 */
5476 			if (freecnt_applied) {
5477 #ifdef INVARIANTS
5478 				panic("refcnt already incremented");
5479 #else
5480 				SCTP_PRINTF("refcnt already incremented?\n");
5481 #endif
5482 			} else {
5483 				atomic_add_int(&stcb->asoc.refcnt, 1);
5484 				freecnt_applied = 1;
5485 			}
5486 			/*
5487 			 * Setup to remember how much we have not yet told
5488 			 * the peer our rwnd has opened up. Note we grab the
5489 			 * value from the tcb from last time. Note too that
5490 			 * sack sending clears this when a sack is sent,
5491 			 * which is fine. Once we hit the rwnd_req, we then
5492 			 * will go to the sctp_user_rcvd() that will not
5493 			 * lock until it KNOWs it MUST send a WUP-SACK.
5494 			 */
5495 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5496 			stcb->freed_by_sorcv_sincelast = 0;
5497 		}
5498 	}
5499 	if (stcb &&
5500 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5501 	    control->do_not_ref_stcb == 0) {
5502 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5503 	}
5504 	/* First lets get off the sinfo and sockaddr info */
5505 	if ((sinfo != NULL) && (filling_sinfo != 0)) {
5506 		sinfo->sinfo_stream = control->sinfo_stream;
5507 		sinfo->sinfo_ssn = (uint16_t)control->mid;
5508 		sinfo->sinfo_flags = control->sinfo_flags;
5509 		sinfo->sinfo_ppid = control->sinfo_ppid;
5510 		sinfo->sinfo_context = control->sinfo_context;
5511 		sinfo->sinfo_timetolive = control->sinfo_timetolive;
5512 		sinfo->sinfo_tsn = control->sinfo_tsn;
5513 		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
5514 		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
5515 		nxt = TAILQ_NEXT(control, next);
5516 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5517 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5518 			struct sctp_extrcvinfo *s_extra;
5519 
5520 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5521 			if ((nxt) &&
5522 			    (nxt->length)) {
5523 				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5524 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5525 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5526 				}
5527 				if (nxt->spec_flags & M_NOTIFICATION) {
5528 					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5529 				}
5530 				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
5531 				s_extra->serinfo_next_length = nxt->length;
5532 				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
5533 				s_extra->serinfo_next_stream = nxt->sinfo_stream;
5534 				if (nxt->tail_mbuf != NULL) {
5535 					if (nxt->end_added) {
5536 						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5537 					}
5538 				}
5539 			} else {
5540 				/*
5541 				 * we explicitly 0 this, since the memcpy
5542 				 * got some other things beyond the older
5543 				 * sinfo_ that is on the control's structure
5544 				 * :-D
5545 				 */
5546 				nxt = NULL;
5547 				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
5548 				s_extra->serinfo_next_aid = 0;
5549 				s_extra->serinfo_next_length = 0;
5550 				s_extra->serinfo_next_ppid = 0;
5551 				s_extra->serinfo_next_stream = 0;
5552 			}
5553 		}
5554 		/*
5555 		 * update off the real current cum-ack, if we have an stcb.
5556 		 */
5557 		if ((control->do_not_ref_stcb == 0) && stcb)
5558 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5559 		/*
5560 		 * mask off the high bits, we keep the actual chunk bits in
5561 		 * there.
5562 		 */
5563 		sinfo->sinfo_flags &= 0x00ff;
5564 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5565 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5566 		}
5567 	}
5568 #ifdef SCTP_ASOCLOG_OF_TSNS
5569 	{
5570 		int index, newindex;
5571 		struct sctp_pcbtsn_rlog *entry;
5572 
5573 		do {
5574 			index = inp->readlog_index;
5575 			newindex = index + 1;
5576 			if (newindex >= SCTP_READ_LOG_SIZE) {
5577 				newindex = 0;
5578 			}
5579 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5580 		entry = &inp->readlog[index];
5581 		entry->vtag = control->sinfo_assoc_id;
5582 		entry->strm = control->sinfo_stream;
5583 		entry->seq = (uint16_t)control->mid;
5584 		entry->sz = control->length;
5585 		entry->flgs = control->sinfo_flags;
5586 	}
5587 #endif
5588 	if ((fromlen > 0) && (from != NULL)) {
5589 		union sctp_sockstore store;
5590 		size_t len;
5591 
5592 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5593 #ifdef INET6
5594 		case AF_INET6:
5595 			len = sizeof(struct sockaddr_in6);
5596 			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5597 			store.sin6.sin6_port = control->port_from;
5598 			break;
5599 #endif
5600 #ifdef INET
5601 		case AF_INET:
5602 #ifdef INET6
5603 			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5604 				len = sizeof(struct sockaddr_in6);
5605 				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5606 				    &store.sin6);
5607 				store.sin6.sin6_port = control->port_from;
5608 			} else {
5609 				len = sizeof(struct sockaddr_in);
5610 				store.sin = control->whoFrom->ro._l_addr.sin;
5611 				store.sin.sin_port = control->port_from;
5612 			}
5613 #else
5614 			len = sizeof(struct sockaddr_in);
5615 			store.sin = control->whoFrom->ro._l_addr.sin;
5616 			store.sin.sin_port = control->port_from;
5617 #endif
5618 			break;
5619 #endif
5620 		default:
5621 			len = 0;
5622 			break;
5623 		}
5624 		memcpy(from, &store, min((size_t)fromlen, len));
5625 #ifdef INET6
5626 		{
5627 			struct sockaddr_in6 lsa6, *from6;
5628 
5629 			from6 = (struct sockaddr_in6 *)from;
5630 			sctp_recover_scope_mac(from6, (&lsa6));
5631 		}
5632 #endif
5633 	}
5634 	if (hold_rlock) {
5635 		SCTP_INP_READ_UNLOCK(inp);
5636 		hold_rlock = 0;
5637 	}
5638 	if (hold_sblock) {
5639 		SOCKBUF_UNLOCK(&so->so_rcv);
5640 		hold_sblock = 0;
5641 	}
5642 	/* now copy out what data we can */
5643 	if (mp == NULL) {
5644 		/* copy out each mbuf in the chain up to length */
5645 get_more_data:
5646 		m = control->data;
5647 		while (m) {
5648 			/* Move out all we can */
5649 			cp_len = (int)uio->uio_resid;
5650 			my_len = (int)SCTP_BUF_LEN(m);
5651 			if (cp_len > my_len) {
5652 				/* not enough in this buf */
5653 				cp_len = my_len;
5654 			}
5655 			if (hold_rlock) {
5656 				SCTP_INP_READ_UNLOCK(inp);
5657 				hold_rlock = 0;
5658 			}
5659 			if (cp_len > 0)
5660 				error = uiomove(mtod(m, char *), cp_len, uio);
5661 			/* re-read */
5662 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5663 				goto release;
5664 			}
5665 			if ((control->do_not_ref_stcb == 0) && stcb &&
5666 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5667 				no_rcv_needed = 1;
5668 			}
5669 			if (error) {
5670 				/* error we are out of here */
5671 				goto release;
5672 			}
5673 			SCTP_INP_READ_LOCK(inp);
5674 			hold_rlock = 1;
5675 			if (cp_len == SCTP_BUF_LEN(m)) {
5676 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5677 				    (control->end_added)) {
5678 					out_flags |= MSG_EOR;
5679 					if ((control->do_not_ref_stcb == 0) &&
5680 					    (control->stcb != NULL) &&
5681 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5682 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5683 				}
5684 				if (control->spec_flags & M_NOTIFICATION) {
5685 					out_flags |= MSG_NOTIFICATION;
5686 				}
5687 				/* we ate up the mbuf */
5688 				if (in_flags & MSG_PEEK) {
5689 					/* just looking */
5690 					m = SCTP_BUF_NEXT(m);
5691 					copied_so_far += cp_len;
5692 				} else {
5693 					/* dispose of the mbuf */
5694 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5695 						sctp_sblog(&so->so_rcv,
5696 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5697 					}
5698 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5699 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5700 						sctp_sblog(&so->so_rcv,
5701 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5702 					}
5703 					copied_so_far += cp_len;
5704 					freed_so_far += cp_len;
5705 					freed_so_far += MSIZE;
5706 					atomic_subtract_int(&control->length, cp_len);
5707 					control->data = sctp_m_free(m);
5708 					m = control->data;
5709 					/*
5710 					 * been through it all, must hold sb
5711 					 * lock ok to null tail
5712 					 */
5713 					if (control->data == NULL) {
5714 #ifdef INVARIANTS
5715 						if ((control->end_added == 0) ||
5716 						    (TAILQ_NEXT(control, next) == NULL)) {
5717 							/*
5718 							 * If the end is not
5719 							 * added, OR the
5720 							 * next is NOT null
5721 							 * we MUST have the
5722 							 * lock.
5723 							 */
5724 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5725 								panic("Hmm we don't own the lock?");
5726 							}
5727 						}
5728 #endif
5729 						control->tail_mbuf = NULL;
5730 #ifdef INVARIANTS
5731 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5732 							panic("end_added, nothing left and no MSG_EOR");
5733 						}
5734 #endif
5735 					}
5736 				}
5737 			} else {
5738 				/* Do we need to trim the mbuf? */
5739 				if (control->spec_flags & M_NOTIFICATION) {
5740 					out_flags |= MSG_NOTIFICATION;
5741 				}
5742 				if ((in_flags & MSG_PEEK) == 0) {
5743 					SCTP_BUF_RESV_UF(m, cp_len);
5744 					SCTP_BUF_LEN(m) -= cp_len;
5745 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5746 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5747 					}
5748 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5749 					if ((control->do_not_ref_stcb == 0) &&
5750 					    stcb) {
5751 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5752 					}
5753 					copied_so_far += cp_len;
5754 					freed_so_far += cp_len;
5755 					freed_so_far += MSIZE;
5756 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5757 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5758 						    SCTP_LOG_SBRESULT, 0);
5759 					}
5760 					atomic_subtract_int(&control->length, cp_len);
5761 				} else {
5762 					copied_so_far += cp_len;
5763 				}
5764 			}
5765 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5766 				break;
5767 			}
5768 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5769 			    (control->do_not_ref_stcb == 0) &&
5770 			    (freed_so_far >= rwnd_req)) {
5771 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5772 			}
5773 		}		/* end while(m) */
5774 		/*
5775 		 * At this point we have looked at it all and we either have
5776 		 * a MSG_EOR/or read all the user wants... <OR>
5777 		 * control->length == 0.
5778 		 */
5779 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5780 			/* we are done with this control */
5781 			if (control->length == 0) {
5782 				if (control->data) {
5783 #ifdef INVARIANTS
5784 					panic("control->data not null at read eor?");
5785 #else
5786 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5787 					sctp_m_freem(control->data);
5788 					control->data = NULL;
5789 #endif
5790 				}
5791 		done_with_control:
5792 				if (hold_rlock == 0) {
5793 					SCTP_INP_READ_LOCK(inp);
5794 					hold_rlock = 1;
5795 				}
5796 				TAILQ_REMOVE(&inp->read_queue, control, next);
5797 				/* Add back any hiddend data */
5798 				if (control->held_length) {
5799 					held_length = 0;
5800 					control->held_length = 0;
5801 					wakeup_read_socket = 1;
5802 				}
5803 				if (control->aux_data) {
5804 					sctp_m_free(control->aux_data);
5805 					control->aux_data = NULL;
5806 				}
5807 				no_rcv_needed = control->do_not_ref_stcb;
5808 				sctp_free_remote_addr(control->whoFrom);
5809 				control->data = NULL;
5810 #ifdef INVARIANTS
5811 				if (control->on_strm_q) {
5812 					panic("About to free ctl:%p so:%p and its in %d",
5813 					    control, so, control->on_strm_q);
5814 				}
5815 #endif
5816 				sctp_free_a_readq(stcb, control);
5817 				control = NULL;
5818 				if ((freed_so_far >= rwnd_req) &&
5819 				    (no_rcv_needed == 0))
5820 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5821 
5822 			} else {
5823 				/*
5824 				 * The user did not read all of this
5825 				 * message, turn off the returned MSG_EOR
5826 				 * since we are leaving more behind on the
5827 				 * control to read.
5828 				 */
5829 #ifdef INVARIANTS
5830 				if (control->end_added &&
5831 				    (control->data == NULL) &&
5832 				    (control->tail_mbuf == NULL)) {
5833 					panic("Gak, control->length is corrupt?");
5834 				}
5835 #endif
5836 				no_rcv_needed = control->do_not_ref_stcb;
5837 				out_flags &= ~MSG_EOR;
5838 			}
5839 		}
5840 		if (out_flags & MSG_EOR) {
5841 			goto release;
5842 		}
5843 		if ((uio->uio_resid == 0) ||
5844 		    ((in_eeor_mode) &&
5845 		    (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
5846 			goto release;
5847 		}
5848 		/*
5849 		 * If I hit here the receiver wants more and this message is
5850 		 * NOT done (pd-api). So two questions. Can we block? if not
5851 		 * we are done. Did the user NOT set MSG_WAITALL?
5852 		 */
5853 		if (block_allowed == 0) {
5854 			goto release;
5855 		}
5856 		/*
5857 		 * We need to wait for more data a few things: - We don't
5858 		 * sbunlock() so we don't get someone else reading. - We
5859 		 * must be sure to account for the case where what is added
5860 		 * is NOT to our control when we wakeup.
5861 		 */
5862 
5863 		/*
5864 		 * Do we need to tell the transport a rwnd update might be
5865 		 * needed before we go to sleep?
5866 		 */
5867 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5868 		    ((freed_so_far >= rwnd_req) &&
5869 		    (control->do_not_ref_stcb == 0) &&
5870 		    (no_rcv_needed == 0))) {
5871 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5872 		}
5873 wait_some_more:
5874 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5875 			goto release;
5876 		}
5877 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5878 			goto release;
5879 
5880 		if (hold_rlock == 1) {
5881 			SCTP_INP_READ_UNLOCK(inp);
5882 			hold_rlock = 0;
5883 		}
5884 		if (hold_sblock == 0) {
5885 			SOCKBUF_LOCK(&so->so_rcv);
5886 			hold_sblock = 1;
5887 		}
5888 		if ((copied_so_far) && (control->length == 0) &&
5889 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5890 			goto release;
5891 		}
5892 		if (so->so_rcv.sb_cc <= control->held_length) {
5893 			error = sbwait(&so->so_rcv);
5894 			if (error) {
5895 				goto release;
5896 			}
5897 			control->held_length = 0;
5898 		}
5899 		if (hold_sblock) {
5900 			SOCKBUF_UNLOCK(&so->so_rcv);
5901 			hold_sblock = 0;
5902 		}
5903 		if (control->length == 0) {
5904 			/* still nothing here */
5905 			if (control->end_added == 1) {
5906 				/* he aborted, or is done i.e.did a shutdown */
5907 				out_flags |= MSG_EOR;
5908 				if (control->pdapi_aborted) {
5909 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5910 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5911 
5912 					out_flags |= MSG_TRUNC;
5913 				} else {
5914 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5915 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5916 				}
5917 				goto done_with_control;
5918 			}
5919 			if (so->so_rcv.sb_cc > held_length) {
5920 				control->held_length = so->so_rcv.sb_cc;
5921 				held_length = 0;
5922 			}
5923 			goto wait_some_more;
5924 		} else if (control->data == NULL) {
5925 			/*
5926 			 * we must re-sync since data is probably being
5927 			 * added
5928 			 */
5929 			SCTP_INP_READ_LOCK(inp);
5930 			if ((control->length > 0) && (control->data == NULL)) {
5931 				/*
5932 				 * big trouble.. we have the lock and its
5933 				 * corrupt?
5934 				 */
5935 #ifdef INVARIANTS
5936 				panic("Impossible data==NULL length !=0");
5937 #endif
5938 				out_flags |= MSG_EOR;
5939 				out_flags |= MSG_TRUNC;
5940 				control->length = 0;
5941 				SCTP_INP_READ_UNLOCK(inp);
5942 				goto done_with_control;
5943 			}
5944 			SCTP_INP_READ_UNLOCK(inp);
5945 			/* We will fall around to get more data */
5946 		}
5947 		goto get_more_data;
5948 	} else {
5949 		/*-
5950 		 * Give caller back the mbuf chain,
5951 		 * store in uio_resid the length
5952 		 */
5953 		wakeup_read_socket = 0;
5954 		if ((control->end_added == 0) ||
5955 		    (TAILQ_NEXT(control, next) == NULL)) {
5956 			/* Need to get rlock */
5957 			if (hold_rlock == 0) {
5958 				SCTP_INP_READ_LOCK(inp);
5959 				hold_rlock = 1;
5960 			}
5961 		}
5962 		if (control->end_added) {
5963 			out_flags |= MSG_EOR;
5964 			if ((control->do_not_ref_stcb == 0) &&
5965 			    (control->stcb != NULL) &&
5966 			    ((control->spec_flags & M_NOTIFICATION) == 0))
5967 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5968 		}
5969 		if (control->spec_flags & M_NOTIFICATION) {
5970 			out_flags |= MSG_NOTIFICATION;
5971 		}
5972 		uio->uio_resid = control->length;
5973 		*mp = control->data;
5974 		m = control->data;
5975 		while (m) {
5976 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5977 				sctp_sblog(&so->so_rcv,
5978 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5979 			}
5980 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5981 			freed_so_far += SCTP_BUF_LEN(m);
5982 			freed_so_far += MSIZE;
5983 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5984 				sctp_sblog(&so->so_rcv,
5985 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5986 			}
5987 			m = SCTP_BUF_NEXT(m);
5988 		}
5989 		control->data = control->tail_mbuf = NULL;
5990 		control->length = 0;
5991 		if (out_flags & MSG_EOR) {
5992 			/* Done with this control */
5993 			goto done_with_control;
5994 		}
5995 	}
5996 release:
5997 	if (hold_rlock == 1) {
5998 		SCTP_INP_READ_UNLOCK(inp);
5999 		hold_rlock = 0;
6000 	}
6001 	if (hold_sblock == 1) {
6002 		SOCKBUF_UNLOCK(&so->so_rcv);
6003 		hold_sblock = 0;
6004 	}
6005 	sbunlock(&so->so_rcv);
6006 	sockbuf_lock = 0;
6007 
6008 release_unlocked:
6009 	if (hold_sblock) {
6010 		SOCKBUF_UNLOCK(&so->so_rcv);
6011 		hold_sblock = 0;
6012 	}
6013 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6014 		if ((freed_so_far >= rwnd_req) &&
6015 		    (control && (control->do_not_ref_stcb == 0)) &&
6016 		    (no_rcv_needed == 0))
6017 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6018 	}
6019 out:
6020 	if (msg_flags) {
6021 		*msg_flags = out_flags;
6022 	}
6023 	if (((out_flags & MSG_EOR) == 0) &&
6024 	    ((in_flags & MSG_PEEK) == 0) &&
6025 	    (sinfo) &&
6026 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6027 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6028 		struct sctp_extrcvinfo *s_extra;
6029 
6030 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6031 		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
6032 	}
6033 	if (hold_rlock == 1) {
6034 		SCTP_INP_READ_UNLOCK(inp);
6035 	}
6036 	if (hold_sblock) {
6037 		SOCKBUF_UNLOCK(&so->so_rcv);
6038 	}
6039 	if (sockbuf_lock) {
6040 		sbunlock(&so->so_rcv);
6041 	}
6042 	if (freecnt_applied) {
6043 		/*
6044 		 * The lock on the socket buffer protects us so the free
6045 		 * code will stop. But since we used the socketbuf lock and
6046 		 * the sender uses the tcb_lock to increment, we need to use
6047 		 * the atomic add to the refcnt.
6048 		 */
6049 		if (stcb == NULL) {
6050 #ifdef INVARIANTS
6051 			panic("stcb for refcnt has gone NULL?");
6052 			goto stage_left;
6053 #else
6054 			goto stage_left;
6055 #endif
6056 		}
6057 		/* Save the value back for next time */
6058 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6059 		atomic_add_int(&stcb->asoc.refcnt, -1);
6060 	}
6061 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6062 		if (stcb) {
6063 			sctp_misc_ints(SCTP_SORECV_DONE,
6064 			    freed_so_far,
6065 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6066 			    stcb->asoc.my_rwnd,
6067 			    so->so_rcv.sb_cc);
6068 		} else {
6069 			sctp_misc_ints(SCTP_SORECV_DONE,
6070 			    freed_so_far,
6071 			    (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
6072 			    0,
6073 			    so->so_rcv.sb_cc);
6074 		}
6075 	}
6076 stage_left:
6077 	if (wakeup_read_socket) {
6078 		sctp_sorwakeup(inp, so);
6079 	}
6080 	return (error);
6081 }
6082 
6083 
6084 #ifdef SCTP_MBUF_LOGGING
6085 struct mbuf *
6086 sctp_m_free(struct mbuf *m)
6087 {
6088 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6089 		sctp_log_mb(m, SCTP_MBUF_IFREE);
6090 	}
6091 	return (m_free(m));
6092 }
6093 
6094 void
6095 sctp_m_freem(struct mbuf *mb)
6096 {
6097 	while (mb != NULL)
6098 		mb = sctp_m_free(mb);
6099 }
6100 
6101 #endif
6102 
6103 int
6104 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6105 {
6106 	/*
6107 	 * Given a local address. For all associations that holds the
6108 	 * address, request a peer-set-primary.
6109 	 */
6110 	struct sctp_ifa *ifa;
6111 	struct sctp_laddr *wi;
6112 
6113 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6114 	if (ifa == NULL) {
6115 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6116 		return (EADDRNOTAVAIL);
6117 	}
6118 	/*
6119 	 * Now that we have the ifa we must awaken the iterator with this
6120 	 * message.
6121 	 */
6122 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6123 	if (wi == NULL) {
6124 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6125 		return (ENOMEM);
6126 	}
6127 	/* Now incr the count and int wi structure */
6128 	SCTP_INCR_LADDR_COUNT();
6129 	memset(wi, 0, sizeof(*wi));
6130 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6131 	wi->ifa = ifa;
6132 	wi->action = SCTP_SET_PRIM_ADDR;
6133 	atomic_add_int(&ifa->refcount, 1);
6134 
6135 	/* Now add it to the work queue */
6136 	SCTP_WQ_ADDR_LOCK();
6137 	/*
6138 	 * Should this really be a tailq? As it is we will process the
6139 	 * newest first :-0
6140 	 */
6141 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6142 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6143 	    (struct sctp_inpcb *)NULL,
6144 	    (struct sctp_tcb *)NULL,
6145 	    (struct sctp_nets *)NULL);
6146 	SCTP_WQ_ADDR_UNLOCK();
6147 	return (0);
6148 }
6149 
6150 
6151 int
6152 sctp_soreceive(struct socket *so,
6153     struct sockaddr **psa,
6154     struct uio *uio,
6155     struct mbuf **mp0,
6156     struct mbuf **controlp,
6157     int *flagsp)
6158 {
6159 	int error, fromlen;
6160 	uint8_t sockbuf[256];
6161 	struct sockaddr *from;
6162 	struct sctp_extrcvinfo sinfo;
6163 	int filling_sinfo = 1;
6164 	struct sctp_inpcb *inp;
6165 
6166 	inp = (struct sctp_inpcb *)so->so_pcb;
6167 	/* pickup the assoc we are reading from */
6168 	if (inp == NULL) {
6169 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6170 		return (EINVAL);
6171 	}
6172 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6173 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6174 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6175 	    (controlp == NULL)) {
6176 		/* user does not want the sndrcv ctl */
6177 		filling_sinfo = 0;
6178 	}
6179 	if (psa) {
6180 		from = (struct sockaddr *)sockbuf;
6181 		fromlen = sizeof(sockbuf);
6182 		from->sa_len = 0;
6183 	} else {
6184 		from = NULL;
6185 		fromlen = 0;
6186 	}
6187 
6188 	if (filling_sinfo) {
6189 		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6190 	}
6191 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6192 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6193 	if (controlp != NULL) {
6194 		/* copy back the sinfo in a CMSG format */
6195 		if (filling_sinfo)
6196 			*controlp = sctp_build_ctl_nchunk(inp,
6197 			    (struct sctp_sndrcvinfo *)&sinfo);
6198 		else
6199 			*controlp = NULL;
6200 	}
6201 	if (psa) {
6202 		/* copy back the address info */
6203 		if (from && from->sa_len) {
6204 			*psa = sodupsockaddr(from, M_NOWAIT);
6205 		} else {
6206 			*psa = NULL;
6207 		}
6208 	}
6209 	return (error);
6210 }
6211 
6212 
6213 
6214 
6215 
6216 int
6217 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6218     int totaddr, int *error)
6219 {
6220 	int added = 0;
6221 	int i;
6222 	struct sctp_inpcb *inp;
6223 	struct sockaddr *sa;
6224 	size_t incr = 0;
6225 #ifdef INET
6226 	struct sockaddr_in *sin;
6227 #endif
6228 #ifdef INET6
6229 	struct sockaddr_in6 *sin6;
6230 #endif
6231 
6232 	sa = addr;
6233 	inp = stcb->sctp_ep;
6234 	*error = 0;
6235 	for (i = 0; i < totaddr; i++) {
6236 		switch (sa->sa_family) {
6237 #ifdef INET
6238 		case AF_INET:
6239 			incr = sizeof(struct sockaddr_in);
6240 			sin = (struct sockaddr_in *)sa;
6241 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6242 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6243 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6244 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6245 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6246 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6247 				*error = EINVAL;
6248 				goto out_now;
6249 			}
6250 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6251 			    SCTP_DONOT_SETSCOPE,
6252 			    SCTP_ADDR_IS_CONFIRMED)) {
6253 				/* assoc gone no un-lock */
6254 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6255 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6256 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6257 				*error = ENOBUFS;
6258 				goto out_now;
6259 			}
6260 			added++;
6261 			break;
6262 #endif
6263 #ifdef INET6
6264 		case AF_INET6:
6265 			incr = sizeof(struct sockaddr_in6);
6266 			sin6 = (struct sockaddr_in6 *)sa;
6267 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6268 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6269 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6270 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6271 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6272 				*error = EINVAL;
6273 				goto out_now;
6274 			}
6275 			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
6276 			    SCTP_DONOT_SETSCOPE,
6277 			    SCTP_ADDR_IS_CONFIRMED)) {
6278 				/* assoc gone no un-lock */
6279 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6280 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6281 				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6282 				*error = ENOBUFS;
6283 				goto out_now;
6284 			}
6285 			added++;
6286 			break;
6287 #endif
6288 		default:
6289 			break;
6290 		}
6291 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6292 	}
6293 out_now:
6294 	return (added);
6295 }
6296 
6297 struct sctp_tcb *
6298 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6299     unsigned int *totaddr,
6300     unsigned int *num_v4, unsigned int *num_v6, int *error,
6301     unsigned int limit, int *bad_addr)
6302 {
6303 	struct sockaddr *sa;
6304 	struct sctp_tcb *stcb = NULL;
6305 	unsigned int incr, at, i;
6306 
6307 	at = 0;
6308 	sa = addr;
6309 	*error = *num_v6 = *num_v4 = 0;
6310 	/* account and validate addresses */
6311 	for (i = 0; i < *totaddr; i++) {
6312 		switch (sa->sa_family) {
6313 #ifdef INET
6314 		case AF_INET:
6315 			incr = (unsigned int)sizeof(struct sockaddr_in);
6316 			if (sa->sa_len != incr) {
6317 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6318 				*error = EINVAL;
6319 				*bad_addr = 1;
6320 				return (NULL);
6321 			}
6322 			(*num_v4) += 1;
6323 			break;
6324 #endif
6325 #ifdef INET6
6326 		case AF_INET6:
6327 			{
6328 				struct sockaddr_in6 *sin6;
6329 
6330 				sin6 = (struct sockaddr_in6 *)sa;
6331 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6332 					/* Must be non-mapped for connectx */
6333 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6334 					*error = EINVAL;
6335 					*bad_addr = 1;
6336 					return (NULL);
6337 				}
6338 				incr = (unsigned int)sizeof(struct sockaddr_in6);
6339 				if (sa->sa_len != incr) {
6340 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6341 					*error = EINVAL;
6342 					*bad_addr = 1;
6343 					return (NULL);
6344 				}
6345 				(*num_v6) += 1;
6346 				break;
6347 			}
6348 #endif
6349 		default:
6350 			*totaddr = i;
6351 			incr = 0;
6352 			/* we are done */
6353 			break;
6354 		}
6355 		if (i == *totaddr) {
6356 			break;
6357 		}
6358 		SCTP_INP_INCR_REF(inp);
6359 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6360 		if (stcb != NULL) {
6361 			/* Already have or am bring up an association */
6362 			return (stcb);
6363 		} else {
6364 			SCTP_INP_DECR_REF(inp);
6365 		}
6366 		if ((at + incr) > limit) {
6367 			*totaddr = i;
6368 			break;
6369 		}
6370 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6371 	}
6372 	return ((struct sctp_tcb *)NULL);
6373 }
6374 
6375 /*
6376  * sctp_bindx(ADD) for one address.
6377  * assumes all arguments are valid/checked by caller.
6378  */
6379 void
6380 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6381     struct sockaddr *sa, sctp_assoc_t assoc_id,
6382     uint32_t vrf_id, int *error, void *p)
6383 {
6384 	struct sockaddr *addr_touse;
6385 #if defined(INET) && defined(INET6)
6386 	struct sockaddr_in sin;
6387 #endif
6388 
6389 	/* see if we're bound all already! */
6390 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6391 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6392 		*error = EINVAL;
6393 		return;
6394 	}
6395 	addr_touse = sa;
6396 #ifdef INET6
6397 	if (sa->sa_family == AF_INET6) {
6398 #ifdef INET
6399 		struct sockaddr_in6 *sin6;
6400 
6401 #endif
6402 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6403 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6404 			*error = EINVAL;
6405 			return;
6406 		}
6407 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6408 			/* can only bind v6 on PF_INET6 sockets */
6409 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6410 			*error = EINVAL;
6411 			return;
6412 		}
6413 #ifdef INET
6414 		sin6 = (struct sockaddr_in6 *)addr_touse;
6415 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6416 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6417 			    SCTP_IPV6_V6ONLY(inp)) {
6418 				/* can't bind v4-mapped on PF_INET sockets */
6419 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6420 				*error = EINVAL;
6421 				return;
6422 			}
6423 			in6_sin6_2_sin(&sin, sin6);
6424 			addr_touse = (struct sockaddr *)&sin;
6425 		}
6426 #endif
6427 	}
6428 #endif
6429 #ifdef INET
6430 	if (sa->sa_family == AF_INET) {
6431 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6432 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6433 			*error = EINVAL;
6434 			return;
6435 		}
6436 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6437 		    SCTP_IPV6_V6ONLY(inp)) {
6438 			/* can't bind v4 on PF_INET sockets */
6439 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6440 			*error = EINVAL;
6441 			return;
6442 		}
6443 	}
6444 #endif
6445 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6446 		if (p == NULL) {
6447 			/* Can't get proc for Net/Open BSD */
6448 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6449 			*error = EINVAL;
6450 			return;
6451 		}
6452 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6453 		return;
6454 	}
6455 	/*
6456 	 * No locks required here since bind and mgmt_ep_sa all do their own
6457 	 * locking. If we do something for the FIX: below we may need to
6458 	 * lock in that case.
6459 	 */
6460 	if (assoc_id == 0) {
6461 		/* add the address */
6462 		struct sctp_inpcb *lep;
6463 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6464 
6465 		/* validate the incoming port */
6466 		if ((lsin->sin_port != 0) &&
6467 		    (lsin->sin_port != inp->sctp_lport)) {
6468 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6469 			*error = EINVAL;
6470 			return;
6471 		} else {
6472 			/* user specified 0 port, set it to existing port */
6473 			lsin->sin_port = inp->sctp_lport;
6474 		}
6475 
6476 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6477 		if (lep != NULL) {
6478 			/*
6479 			 * We must decrement the refcount since we have the
6480 			 * ep already and are binding. No remove going on
6481 			 * here.
6482 			 */
6483 			SCTP_INP_DECR_REF(lep);
6484 		}
6485 		if (lep == inp) {
6486 			/* already bound to it.. ok */
6487 			return;
6488 		} else if (lep == NULL) {
6489 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6490 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6491 			    SCTP_ADD_IP_ADDRESS,
6492 			    vrf_id, NULL);
6493 		} else {
6494 			*error = EADDRINUSE;
6495 		}
6496 		if (*error)
6497 			return;
6498 	} else {
6499 		/*
6500 		 * FIX: decide whether we allow assoc based bindx
6501 		 */
6502 	}
6503 }
6504 
6505 /*
6506  * sctp_bindx(DELETE) for one address.
6507  * assumes all arguments are valid/checked by caller.
6508  */
6509 void
6510 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6511     struct sockaddr *sa, sctp_assoc_t assoc_id,
6512     uint32_t vrf_id, int *error)
6513 {
6514 	struct sockaddr *addr_touse;
6515 #if defined(INET) && defined(INET6)
6516 	struct sockaddr_in sin;
6517 #endif
6518 
6519 	/* see if we're bound all already! */
6520 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6521 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6522 		*error = EINVAL;
6523 		return;
6524 	}
6525 	addr_touse = sa;
6526 #ifdef INET6
6527 	if (sa->sa_family == AF_INET6) {
6528 #ifdef INET
6529 		struct sockaddr_in6 *sin6;
6530 #endif
6531 
6532 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6533 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6534 			*error = EINVAL;
6535 			return;
6536 		}
6537 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6538 			/* can only bind v6 on PF_INET6 sockets */
6539 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6540 			*error = EINVAL;
6541 			return;
6542 		}
6543 #ifdef INET
6544 		sin6 = (struct sockaddr_in6 *)addr_touse;
6545 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6546 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6547 			    SCTP_IPV6_V6ONLY(inp)) {
6548 				/* can't bind mapped-v4 on PF_INET sockets */
6549 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6550 				*error = EINVAL;
6551 				return;
6552 			}
6553 			in6_sin6_2_sin(&sin, sin6);
6554 			addr_touse = (struct sockaddr *)&sin;
6555 		}
6556 #endif
6557 	}
6558 #endif
6559 #ifdef INET
6560 	if (sa->sa_family == AF_INET) {
6561 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6562 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6563 			*error = EINVAL;
6564 			return;
6565 		}
6566 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6567 		    SCTP_IPV6_V6ONLY(inp)) {
6568 			/* can't bind v4 on PF_INET sockets */
6569 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6570 			*error = EINVAL;
6571 			return;
6572 		}
6573 	}
6574 #endif
6575 	/*
6576 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6577 	 * below is ever changed we may need to lock before calling
6578 	 * association level binding.
6579 	 */
6580 	if (assoc_id == 0) {
6581 		/* delete the address */
6582 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6583 		    SCTP_DEL_IP_ADDRESS,
6584 		    vrf_id, NULL);
6585 	} else {
6586 		/*
6587 		 * FIX: decide whether we allow assoc based bindx
6588 		 */
6589 	}
6590 }
6591 
6592 /*
6593  * returns the valid local address count for an assoc, taking into account
6594  * all scoping rules
6595  */
6596 int
6597 sctp_local_addr_count(struct sctp_tcb *stcb)
6598 {
6599 	int loopback_scope;
6600 #if defined(INET)
6601 	int ipv4_local_scope, ipv4_addr_legal;
6602 #endif
6603 #if defined (INET6)
6604 	int local_scope, site_scope, ipv6_addr_legal;
6605 #endif
6606 	struct sctp_vrf *vrf;
6607 	struct sctp_ifn *sctp_ifn;
6608 	struct sctp_ifa *sctp_ifa;
6609 	int count = 0;
6610 
6611 	/* Turn on all the appropriate scopes */
6612 	loopback_scope = stcb->asoc.scope.loopback_scope;
6613 #if defined(INET)
6614 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6615 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6616 #endif
6617 #if defined(INET6)
6618 	local_scope = stcb->asoc.scope.local_scope;
6619 	site_scope = stcb->asoc.scope.site_scope;
6620 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6621 #endif
6622 	SCTP_IPI_ADDR_RLOCK();
6623 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6624 	if (vrf == NULL) {
6625 		/* no vrf, no addresses */
6626 		SCTP_IPI_ADDR_RUNLOCK();
6627 		return (0);
6628 	}
6629 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6630 		/*
6631 		 * bound all case: go through all ifns on the vrf
6632 		 */
6633 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6634 			if ((loopback_scope == 0) &&
6635 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6636 				continue;
6637 			}
6638 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6639 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6640 					continue;
6641 				switch (sctp_ifa->address.sa.sa_family) {
6642 #ifdef INET
6643 				case AF_INET:
6644 					if (ipv4_addr_legal) {
6645 						struct sockaddr_in *sin;
6646 
6647 						sin = &sctp_ifa->address.sin;
6648 						if (sin->sin_addr.s_addr == 0) {
6649 							/*
6650 							 * skip unspecified
6651 							 * addrs
6652 							 */
6653 							continue;
6654 						}
6655 						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6656 						    &sin->sin_addr) != 0) {
6657 							continue;
6658 						}
6659 						if ((ipv4_local_scope == 0) &&
6660 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6661 							continue;
6662 						}
6663 						/* count this one */
6664 						count++;
6665 					} else {
6666 						continue;
6667 					}
6668 					break;
6669 #endif
6670 #ifdef INET6
6671 				case AF_INET6:
6672 					if (ipv6_addr_legal) {
6673 						struct sockaddr_in6 *sin6;
6674 
6675 						sin6 = &sctp_ifa->address.sin6;
6676 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6677 							continue;
6678 						}
6679 						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6680 						    &sin6->sin6_addr) != 0) {
6681 							continue;
6682 						}
6683 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6684 							if (local_scope == 0)
6685 								continue;
6686 							if (sin6->sin6_scope_id == 0) {
6687 								if (sa6_recoverscope(sin6) != 0)
6688 									/*
6689 									 *
6690 									 * bad
6691 									 * link
6692 									 *
6693 									 * local
6694 									 *
6695 									 * address
6696 									 */
6697 									continue;
6698 							}
6699 						}
6700 						if ((site_scope == 0) &&
6701 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6702 							continue;
6703 						}
6704 						/* count this one */
6705 						count++;
6706 					}
6707 					break;
6708 #endif
6709 				default:
6710 					/* TSNH */
6711 					break;
6712 				}
6713 			}
6714 		}
6715 	} else {
6716 		/*
6717 		 * subset bound case
6718 		 */
6719 		struct sctp_laddr *laddr;
6720 
6721 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6722 		    sctp_nxt_addr) {
6723 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6724 				continue;
6725 			}
6726 			/* count this one */
6727 			count++;
6728 		}
6729 	}
6730 	SCTP_IPI_ADDR_RUNLOCK();
6731 	return (count);
6732 }
6733 
6734 #if defined(SCTP_LOCAL_TRACE_BUF)
6735 
6736 void
6737 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6738 {
6739 	uint32_t saveindex, newindex;
6740 
6741 	do {
6742 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6743 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6744 			newindex = 1;
6745 		} else {
6746 			newindex = saveindex + 1;
6747 		}
6748 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6749 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6750 		saveindex = 0;
6751 	}
6752 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6753 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6754 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6755 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6756 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6757 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6758 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6759 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6760 }
6761 
6762 #endif
6763 static void
6764 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6765     const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6766 {
6767 	struct ip *iph;
6768 #ifdef INET6
6769 	struct ip6_hdr *ip6;
6770 #endif
6771 	struct mbuf *sp, *last;
6772 	struct udphdr *uhdr;
6773 	uint16_t port;
6774 
6775 	if ((m->m_flags & M_PKTHDR) == 0) {
6776 		/* Can't handle one that is not a pkt hdr */
6777 		goto out;
6778 	}
6779 	/* Pull the src port */
6780 	iph = mtod(m, struct ip *);
6781 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6782 	port = uhdr->uh_sport;
6783 	/*
6784 	 * Split out the mbuf chain. Leave the IP header in m, place the
6785 	 * rest in the sp.
6786 	 */
6787 	sp = m_split(m, off, M_NOWAIT);
6788 	if (sp == NULL) {
6789 		/* Gak, drop packet, we can't do a split */
6790 		goto out;
6791 	}
6792 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6793 		/* Gak, packet can't have an SCTP header in it - too small */
6794 		m_freem(sp);
6795 		goto out;
6796 	}
6797 	/* Now pull up the UDP header and SCTP header together */
6798 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6799 	if (sp == NULL) {
6800 		/* Gak pullup failed */
6801 		goto out;
6802 	}
6803 	/* Trim out the UDP header */
6804 	m_adj(sp, sizeof(struct udphdr));
6805 
6806 	/* Now reconstruct the mbuf chain */
6807 	for (last = m; last->m_next; last = last->m_next);
6808 	last->m_next = sp;
6809 	m->m_pkthdr.len += sp->m_pkthdr.len;
6810 	/*
6811 	 * The CSUM_DATA_VALID flags indicates that the HW checked the UDP
6812 	 * checksum and it was valid. Since CSUM_DATA_VALID ==
6813 	 * CSUM_SCTP_VALID this would imply that the HW also verified the
6814 	 * SCTP checksum. Therefore, clear the bit.
6815 	 */
6816 	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
6817 	    "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
6818 	    m->m_pkthdr.len,
6819 	    if_name(m->m_pkthdr.rcvif),
6820 	    (int)m->m_pkthdr.csum_flags, CSUM_BITS);
6821 	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
6822 	iph = mtod(m, struct ip *);
6823 	switch (iph->ip_v) {
6824 #ifdef INET
6825 	case IPVERSION:
6826 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6827 		sctp_input_with_port(m, off, port);
6828 		break;
6829 #endif
6830 #ifdef INET6
6831 	case IPV6_VERSION >> 4:
6832 		ip6 = mtod(m, struct ip6_hdr *);
6833 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6834 		sctp6_input_with_port(&m, &off, port);
6835 		break;
6836 #endif
6837 	default:
6838 		goto out;
6839 		break;
6840 	}
6841 	return;
6842 out:
6843 	m_freem(m);
6844 }
6845 
6846 #ifdef INET
6847 static void
6848 sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
6849 {
6850 	struct ip *outer_ip, *inner_ip;
6851 	struct sctphdr *sh;
6852 	struct icmp *icmp;
6853 	struct udphdr *udp;
6854 	struct sctp_inpcb *inp;
6855 	struct sctp_tcb *stcb;
6856 	struct sctp_nets *net;
6857 	struct sctp_init_chunk *ch;
6858 	struct sockaddr_in src, dst;
6859 	uint8_t type, code;
6860 
6861 	inner_ip = (struct ip *)vip;
6862 	icmp = (struct icmp *)((caddr_t)inner_ip -
6863 	    (sizeof(struct icmp) - sizeof(struct ip)));
6864 	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
6865 	if (ntohs(outer_ip->ip_len) <
6866 	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
6867 		return;
6868 	}
6869 	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
6870 	sh = (struct sctphdr *)(udp + 1);
6871 	memset(&src, 0, sizeof(struct sockaddr_in));
6872 	src.sin_family = AF_INET;
6873 	src.sin_len = sizeof(struct sockaddr_in);
6874 	src.sin_port = sh->src_port;
6875 	src.sin_addr = inner_ip->ip_src;
6876 	memset(&dst, 0, sizeof(struct sockaddr_in));
6877 	dst.sin_family = AF_INET;
6878 	dst.sin_len = sizeof(struct sockaddr_in);
6879 	dst.sin_port = sh->dest_port;
6880 	dst.sin_addr = inner_ip->ip_dst;
6881 	/*
6882 	 * 'dst' holds the dest of the packet that failed to be sent. 'src'
6883 	 * holds our local endpoint address. Thus we reverse the dst and the
6884 	 * src in the lookup.
6885 	 */
6886 	inp = NULL;
6887 	net = NULL;
6888 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
6889 	    (struct sockaddr *)&src,
6890 	    &inp, &net, 1,
6891 	    SCTP_DEFAULT_VRFID);
6892 	if ((stcb != NULL) &&
6893 	    (net != NULL) &&
6894 	    (inp != NULL)) {
6895 		/* Check the UDP port numbers */
6896 		if ((udp->uh_dport != net->port) ||
6897 		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
6898 			SCTP_TCB_UNLOCK(stcb);
6899 			return;
6900 		}
6901 		/* Check the verification tag */
6902 		if (ntohl(sh->v_tag) != 0) {
6903 			/*
6904 			 * This must be the verification tag used for
6905 			 * sending out packets. We don't consider packets
6906 			 * reflecting the verification tag.
6907 			 */
6908 			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
6909 				SCTP_TCB_UNLOCK(stcb);
6910 				return;
6911 			}
6912 		} else {
6913 			if (ntohs(outer_ip->ip_len) >=
6914 			    sizeof(struct ip) +
6915 			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
6916 				/*
6917 				 * In this case we can check if we got an
6918 				 * INIT chunk and if the initiate tag
6919 				 * matches.
6920 				 */
6921 				ch = (struct sctp_init_chunk *)(sh + 1);
6922 				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
6923 				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
6924 					SCTP_TCB_UNLOCK(stcb);
6925 					return;
6926 				}
6927 			} else {
6928 				SCTP_TCB_UNLOCK(stcb);
6929 				return;
6930 			}
6931 		}
6932 		type = icmp->icmp_type;
6933 		code = icmp->icmp_code;
6934 		if ((type == ICMP_UNREACH) &&
6935 		    (code == ICMP_UNREACH_PORT)) {
6936 			code = ICMP_UNREACH_PROTOCOL;
6937 		}
6938 		sctp_notify(inp, stcb, net, type, code,
6939 		    ntohs(inner_ip->ip_len),
6940 		    (uint32_t)ntohs(icmp->icmp_nextmtu));
6941 	} else {
6942 		if ((stcb == NULL) && (inp != NULL)) {
6943 			/* reduce ref-count */
6944 			SCTP_INP_WLOCK(inp);
6945 			SCTP_INP_DECR_REF(inp);
6946 			SCTP_INP_WUNLOCK(inp);
6947 		}
6948 		if (stcb) {
6949 			SCTP_TCB_UNLOCK(stcb);
6950 		}
6951 	}
6952 	return;
6953 }
6954 #endif
6955 
6956 #ifdef INET6
6957 static void
6958 sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
6959 {
6960 	struct ip6ctlparam *ip6cp;
6961 	struct sctp_inpcb *inp;
6962 	struct sctp_tcb *stcb;
6963 	struct sctp_nets *net;
6964 	struct sctphdr sh;
6965 	struct udphdr udp;
6966 	struct sockaddr_in6 src, dst;
6967 	uint8_t type, code;
6968 
6969 	ip6cp = (struct ip6ctlparam *)d;
6970 	/*
6971 	 * XXX: We assume that when IPV6 is non NULL, M and OFF are valid.
6972 	 */
6973 	if (ip6cp->ip6c_m == NULL) {
6974 		return;
6975 	}
6976 	/*
6977 	 * Check if we can safely examine the ports and the verification tag
6978 	 * of the SCTP common header.
6979 	 */
6980 	if (ip6cp->ip6c_m->m_pkthdr.len <
6981 	    ip6cp->ip6c_off + sizeof(struct udphdr) + offsetof(struct sctphdr, checksum)) {
6982 		return;
6983 	}
6984 	/* Copy out the UDP header. */
6985 	memset(&udp, 0, sizeof(struct udphdr));
6986 	m_copydata(ip6cp->ip6c_m,
6987 	    ip6cp->ip6c_off,
6988 	    sizeof(struct udphdr),
6989 	    (caddr_t)&udp);
6990 	/* Copy out the port numbers and the verification tag. */
6991 	memset(&sh, 0, sizeof(struct sctphdr));
6992 	m_copydata(ip6cp->ip6c_m,
6993 	    ip6cp->ip6c_off + sizeof(struct udphdr),
6994 	    sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
6995 	    (caddr_t)&sh);
6996 	memset(&src, 0, sizeof(struct sockaddr_in6));
6997 	src.sin6_family = AF_INET6;
6998 	src.sin6_len = sizeof(struct sockaddr_in6);
6999 	src.sin6_port = sh.src_port;
7000 	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
7001 	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7002 		return;
7003 	}
7004 	memset(&dst, 0, sizeof(struct sockaddr_in6));
7005 	dst.sin6_family = AF_INET6;
7006 	dst.sin6_len = sizeof(struct sockaddr_in6);
7007 	dst.sin6_port = sh.dest_port;
7008 	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
7009 	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
7010 		return;
7011 	}
7012 	inp = NULL;
7013 	net = NULL;
7014 	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
7015 	    (struct sockaddr *)&src,
7016 	    &inp, &net, 1, SCTP_DEFAULT_VRFID);
7017 	if ((stcb != NULL) &&
7018 	    (net != NULL) &&
7019 	    (inp != NULL)) {
7020 		/* Check the UDP port numbers */
7021 		if ((udp.uh_dport != net->port) ||
7022 		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
7023 			SCTP_TCB_UNLOCK(stcb);
7024 			return;
7025 		}
7026 		/* Check the verification tag */
7027 		if (ntohl(sh.v_tag) != 0) {
7028 			/*
7029 			 * This must be the verification tag used for
7030 			 * sending out packets. We don't consider packets
7031 			 * reflecting the verification tag.
7032 			 */
7033 			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
7034 				SCTP_TCB_UNLOCK(stcb);
7035 				return;
7036 			}
7037 		} else {
7038 			if (ip6cp->ip6c_m->m_pkthdr.len >=
7039 			    ip6cp->ip6c_off + sizeof(struct udphdr) +
7040 			    sizeof(struct sctphdr) +
7041 			    sizeof(struct sctp_chunkhdr) +
7042 			    offsetof(struct sctp_init, a_rwnd)) {
7043 				/*
7044 				 * In this case we can check if we got an
7045 				 * INIT chunk and if the initiate tag
7046 				 * matches.
7047 				 */
7048 				uint32_t initiate_tag;
7049 				uint8_t chunk_type;
7050 
7051 				m_copydata(ip6cp->ip6c_m,
7052 				    ip6cp->ip6c_off +
7053 				    sizeof(struct udphdr) +
7054 				    sizeof(struct sctphdr),
7055 				    sizeof(uint8_t),
7056 				    (caddr_t)&chunk_type);
7057 				m_copydata(ip6cp->ip6c_m,
7058 				    ip6cp->ip6c_off +
7059 				    sizeof(struct udphdr) +
7060 				    sizeof(struct sctphdr) +
7061 				    sizeof(struct sctp_chunkhdr),
7062 				    sizeof(uint32_t),
7063 				    (caddr_t)&initiate_tag);
7064 				if ((chunk_type != SCTP_INITIATION) ||
7065 				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
7066 					SCTP_TCB_UNLOCK(stcb);
7067 					return;
7068 				}
7069 			} else {
7070 				SCTP_TCB_UNLOCK(stcb);
7071 				return;
7072 			}
7073 		}
7074 		type = ip6cp->ip6c_icmp6->icmp6_type;
7075 		code = ip6cp->ip6c_icmp6->icmp6_code;
7076 		if ((type == ICMP6_DST_UNREACH) &&
7077 		    (code == ICMP6_DST_UNREACH_NOPORT)) {
7078 			type = ICMP6_PARAM_PROB;
7079 			code = ICMP6_PARAMPROB_NEXTHEADER;
7080 		}
7081 		sctp6_notify(inp, stcb, net, type, code,
7082 		    ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
7083 	} else {
7084 		if ((stcb == NULL) && (inp != NULL)) {
7085 			/* reduce inp's ref-count */
7086 			SCTP_INP_WLOCK(inp);
7087 			SCTP_INP_DECR_REF(inp);
7088 			SCTP_INP_WUNLOCK(inp);
7089 		}
7090 		if (stcb) {
7091 			SCTP_TCB_UNLOCK(stcb);
7092 		}
7093 	}
7094 }
7095 #endif
7096 
7097 void
7098 sctp_over_udp_stop(void)
7099 {
7100 	/*
7101 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7102 	 * for writting!
7103 	 */
7104 #ifdef INET
7105 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7106 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
7107 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
7108 	}
7109 #endif
7110 #ifdef INET6
7111 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7112 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7113 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7114 	}
7115 #endif
7116 }
7117 
7118 int
7119 sctp_over_udp_start(void)
7120 {
7121 	uint16_t port;
7122 	int ret;
7123 #ifdef INET
7124 	struct sockaddr_in sin;
7125 #endif
7126 #ifdef INET6
7127 	struct sockaddr_in6 sin6;
7128 #endif
7129 	/*
7130 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7131 	 * for writting!
7132 	 */
7133 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7134 	if (ntohs(port) == 0) {
7135 		/* Must have a port set */
7136 		return (EINVAL);
7137 	}
7138 #ifdef INET
7139 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7140 		/* Already running -- must stop first */
7141 		return (EALREADY);
7142 	}
7143 #endif
7144 #ifdef INET6
7145 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7146 		/* Already running -- must stop first */
7147 		return (EALREADY);
7148 	}
7149 #endif
7150 #ifdef INET
7151 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7152 	    SOCK_DGRAM, IPPROTO_UDP,
7153 	    curthread->td_ucred, curthread))) {
7154 		sctp_over_udp_stop();
7155 		return (ret);
7156 	}
7157 	/* Call the special UDP hook. */
7158 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7159 	    sctp_recv_udp_tunneled_packet,
7160 	    sctp_recv_icmp_tunneled_packet,
7161 	    NULL))) {
7162 		sctp_over_udp_stop();
7163 		return (ret);
7164 	}
7165 	/* Ok, we have a socket, bind it to the port. */
7166 	memset(&sin, 0, sizeof(struct sockaddr_in));
7167 	sin.sin_len = sizeof(struct sockaddr_in);
7168 	sin.sin_family = AF_INET;
7169 	sin.sin_port = htons(port);
7170 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7171 	    (struct sockaddr *)&sin, curthread))) {
7172 		sctp_over_udp_stop();
7173 		return (ret);
7174 	}
7175 #endif
7176 #ifdef INET6
7177 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7178 	    SOCK_DGRAM, IPPROTO_UDP,
7179 	    curthread->td_ucred, curthread))) {
7180 		sctp_over_udp_stop();
7181 		return (ret);
7182 	}
7183 	/* Call the special UDP hook. */
7184 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7185 	    sctp_recv_udp_tunneled_packet,
7186 	    sctp_recv_icmp6_tunneled_packet,
7187 	    NULL))) {
7188 		sctp_over_udp_stop();
7189 		return (ret);
7190 	}
7191 	/* Ok, we have a socket, bind it to the port. */
7192 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7193 	sin6.sin6_len = sizeof(struct sockaddr_in6);
7194 	sin6.sin6_family = AF_INET6;
7195 	sin6.sin6_port = htons(port);
7196 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7197 	    (struct sockaddr *)&sin6, curthread))) {
7198 		sctp_over_udp_stop();
7199 		return (ret);
7200 	}
7201 #endif
7202 	return (0);
7203 }
7204 
7205 /*
7206  * sctp_min_mtu ()returns the minimum of all non-zero arguments.
7207  * If all arguments are zero, zero is returned.
7208  */
7209 uint32_t
7210 sctp_min_mtu(uint32_t mtu1, uint32_t mtu2, uint32_t mtu3)
7211 {
7212 	if (mtu1 > 0) {
7213 		if (mtu2 > 0) {
7214 			if (mtu3 > 0) {
7215 				return (min(mtu1, min(mtu2, mtu3)));
7216 			} else {
7217 				return (min(mtu1, mtu2));
7218 			}
7219 		} else {
7220 			if (mtu3 > 0) {
7221 				return (min(mtu1, mtu3));
7222 			} else {
7223 				return (mtu1);
7224 			}
7225 		}
7226 	} else {
7227 		if (mtu2 > 0) {
7228 			if (mtu3 > 0) {
7229 				return (min(mtu2, mtu3));
7230 			} else {
7231 				return (mtu2);
7232 			}
7233 		} else {
7234 			return (mtu3);
7235 		}
7236 	}
7237 }
7238 
7239 void
7240 sctp_hc_set_mtu(union sctp_sockstore *addr, uint16_t fibnum, uint32_t mtu)
7241 {
7242 	struct in_conninfo inc;
7243 
7244 	memset(&inc, 0, sizeof(struct in_conninfo));
7245 	inc.inc_fibnum = fibnum;
7246 	switch (addr->sa.sa_family) {
7247 #ifdef INET
7248 	case AF_INET:
7249 		inc.inc_faddr = addr->sin.sin_addr;
7250 		break;
7251 #endif
7252 #ifdef INET6
7253 	case AF_INET6:
7254 		inc.inc_flags |= INC_ISIPV6;
7255 		inc.inc6_faddr = addr->sin6.sin6_addr;
7256 		break;
7257 #endif
7258 	default:
7259 		return;
7260 	}
7261 	tcp_hc_updatemtu(&inc, (u_long)mtu);
7262 }
7263 
7264 uint32_t
7265 sctp_hc_get_mtu(union sctp_sockstore *addr, uint16_t fibnum)
7266 {
7267 	struct in_conninfo inc;
7268 
7269 	memset(&inc, 0, sizeof(struct in_conninfo));
7270 	inc.inc_fibnum = fibnum;
7271 	switch (addr->sa.sa_family) {
7272 #ifdef INET
7273 	case AF_INET:
7274 		inc.inc_faddr = addr->sin.sin_addr;
7275 		break;
7276 #endif
7277 #ifdef INET6
7278 	case AF_INET6:
7279 		inc.inc_flags |= INC_ISIPV6;
7280 		inc.inc6_faddr = addr->sin6.sin6_addr;
7281 		break;
7282 #endif
7283 	default:
7284 		return (0);
7285 	}
7286 	return ((uint32_t)tcp_hc_getmtu(&inc));
7287 }
7288